Compare commits
123 Commits
mongo-plug
...
1.6.0-rc4
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
71948a8f84 | ||
|
|
b5babc3f00 | ||
|
|
5f0fbc7d30 | ||
|
|
cde9c37e26 | ||
|
|
83c560af22 | ||
|
|
38075d8ff8 | ||
|
|
3c039921e1 | ||
|
|
0ffa24c4b3 | ||
|
|
1734e97d58 | ||
|
|
6332ede542 | ||
|
|
2a9198cea6 | ||
|
|
1e95f9785c | ||
|
|
7b23287e20 | ||
|
|
f4c0aac898 | ||
|
|
bcaaeda49c | ||
|
|
9d2f3fcbb9 | ||
|
|
0aad487cab | ||
|
|
19c102cf4b | ||
|
|
109c1a4344 | ||
|
|
82448a9dd1 | ||
|
|
64b239663c | ||
|
|
7e3ec16e15 | ||
|
|
a971ffb880 | ||
|
|
461c0dccd8 | ||
|
|
971debb582 | ||
|
|
6d585beedf | ||
|
|
38ec968b0b | ||
|
|
0c1293ad5e | ||
|
|
b99cd14129 | ||
|
|
c2108fcf09 | ||
|
|
04b9afff68 | ||
|
|
a320f91516 | ||
|
|
ef112e6ee7 | ||
|
|
5be1198274 | ||
|
|
8a73dc05c0 | ||
|
|
43bd23e555 | ||
|
|
b0b18df0bf | ||
|
|
cc97b48ca8 | ||
|
|
36b8220181 | ||
|
|
1c0f63a90d | ||
|
|
503881d4d7 | ||
|
|
63de4ffc51 | ||
|
|
4cefe3eadd | ||
|
|
b63073deb2 | ||
|
|
e60abdf8ea | ||
|
|
e5e75a62cc | ||
|
|
a4870e6a6d | ||
|
|
3469e74dd9 | ||
|
|
def76ace3b | ||
|
|
05393da939 | ||
|
|
e8fc3ca70c | ||
|
|
729388f4dd | ||
|
|
be9d4f4be0 | ||
|
|
3658ac8f53 | ||
|
|
d7f279e3d3 | ||
|
|
e28f422d21 | ||
|
|
cd919066d5 | ||
|
|
6200683c29 | ||
|
|
76ce71f7fa | ||
|
|
2160779126 | ||
|
|
6e5e2f713d | ||
|
|
8e515688eb | ||
|
|
6d6631382c | ||
|
|
f1b681cbdc | ||
|
|
4118ec7629 | ||
|
|
f114f6a124 | ||
|
|
8cfd001441 | ||
|
|
9ce70aad77 | ||
|
|
07dbbb27dc | ||
|
|
0e14e31b0a | ||
|
|
8b3767fd6e | ||
|
|
81a93fcddf | ||
|
|
8005883de8 | ||
|
|
f7207f514e | ||
|
|
f1c8abd68c | ||
|
|
e4ce057885 | ||
|
|
a6d366fb84 | ||
|
|
de22480e7d | ||
|
|
2b65915b96 | ||
|
|
9d8b1b1e87 | ||
|
|
b9ddbbd5ed | ||
|
|
c377c8fb7c | ||
|
|
45c22e42da | ||
|
|
ad5e954047 | ||
|
|
93b2870b28 | ||
|
|
3501b65f7c | ||
|
|
35378ae9cc | ||
|
|
1212b2ddc5 | ||
|
|
0a37386c5e | ||
|
|
00a52a67b9 | ||
|
|
dc96c34e2c | ||
|
|
5928219454 | ||
|
|
8c932abff6 | ||
|
|
fcd6d4eb09 | ||
|
|
b355536b20 | ||
|
|
e988c83068 | ||
|
|
80d9417315 | ||
|
|
f4fa05530a | ||
|
|
18aef35c58 | ||
|
|
8147d60973 | ||
|
|
df80fa6099 | ||
|
|
53221d87eb | ||
|
|
ddde8809f4 | ||
|
|
0ca3900abe | ||
|
|
a777ce9293 | ||
|
|
3242f97deb | ||
|
|
6e35071c89 | ||
|
|
cd620ac144 | ||
|
|
6406abbc89 | ||
|
|
9aabf56795 | ||
|
|
4ac78d5c6d | ||
|
|
3fe3d75bb3 | ||
|
|
a55456b56c | ||
|
|
6c656d92a0 | ||
|
|
2ee270f274 | ||
|
|
5b37fd3ae9 | ||
|
|
f82f03b92c | ||
|
|
42ccc9f324 | ||
|
|
a00d5b48f8 | ||
|
|
f5ea13a9ab | ||
|
|
32dd1b3725 | ||
|
|
1b0e87a8b0 | ||
|
|
efa9095829 |
@@ -1,7 +1,7 @@
|
||||
---
|
||||
defaults: &defaults
|
||||
docker:
|
||||
- image: 'circleci/golang:1.9.2'
|
||||
- image: 'circleci/golang:1.9.4'
|
||||
working_directory: '/go/src/github.com/influxdata/telegraf'
|
||||
version: 2
|
||||
jobs:
|
||||
@@ -9,7 +9,8 @@ jobs:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- checkout
|
||||
- run: 'make ci-test'
|
||||
- run: 'make deps'
|
||||
- run: 'make test-ci'
|
||||
release:
|
||||
<<: *defaults
|
||||
steps:
|
||||
@@ -42,7 +43,7 @@ workflows:
|
||||
- 'build'
|
||||
triggers:
|
||||
- schedule:
|
||||
cron: "0 0 * * *"
|
||||
cron: "0 18 * * *"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,5 +1,3 @@
|
||||
build
|
||||
/build
|
||||
/telegraf
|
||||
/telegraf.gz
|
||||
*~
|
||||
*#
|
||||
|
||||
74
CHANGELOG.md
74
CHANGELOG.md
@@ -2,15 +2,16 @@
|
||||
|
||||
### Release Notes
|
||||
|
||||
- The `mysql` input plugin has been updated to convert values to the
|
||||
correct data type. This may cause a `field type error` when inserting into
|
||||
InfluxDB due the change of types. It is recommended to drop the `mysql`,
|
||||
`mysql_variables`, and `mysql_innodb`:
|
||||
```
|
||||
DROP MEASUREMENT mysql
|
||||
DROP MEASUREMENT mysql_variables
|
||||
DROP MEASUREMENT mysql_innodb
|
||||
```
|
||||
- The `mysql` input plugin has been updated fix a number of type convertion
|
||||
issues. This may cause a `field type error` when inserting into InfluxDB due
|
||||
the change of types.
|
||||
|
||||
To address this we have introduced a new `metric_version` option to control
|
||||
enabling the new format. For in depth recommendations on upgrading please
|
||||
reference the [mysql plugin documentation](./plugins/inputs/mysql/README.md#metric-version).
|
||||
|
||||
It is encouraged to migrate to the new model when possible as the old version
|
||||
is deprecated and will be removed in a future version.
|
||||
|
||||
- The `postgresql` plugins now defaults to using a persistent connection to the database.
|
||||
In environments where TCP connections are terminated the `max_lifetime`
|
||||
@@ -26,12 +27,24 @@
|
||||
is set. It is encouraged to enable this option when possible as the old
|
||||
ordering is deprecated.
|
||||
|
||||
- The new `http` input configured with `data_format = "json"` can perform the
|
||||
same task as the, now deprecated, `httpjson` input.
|
||||
|
||||
### New Plugins
|
||||
|
||||
### New Inputs
|
||||
|
||||
- [http](./plugins/inputs/http/README.md) - Thanks to @grange74
|
||||
- [ipset](./plugins/inputs/ipset/README.md) - Thanks to @sajoupa
|
||||
- [nats](./plugins/inputs/nats/README.md) - Thanks to @mjs & @levex
|
||||
|
||||
### New Processors
|
||||
|
||||
- [override](./plugins/processors/override/README.md) - Thanks to @KarstenSchnitter
|
||||
|
||||
### New Parsers
|
||||
|
||||
- [dropwizard](./docs/DATA_FORMATS_INPUT.md#dropwizard) - Thanks to @atzoum
|
||||
|
||||
### Features
|
||||
|
||||
- [#3551](https://github.com/influxdata/telegraf/pull/3551): Add health status mapping from string to int in elasticsearch input.
|
||||
@@ -56,17 +69,56 @@
|
||||
- [#3618](https://github.com/influxdata/telegraf/pull/3618): Add new sqlserver output data model.
|
||||
- [#3559](https://github.com/influxdata/telegraf/pull/3559): Add native Go method for finding pids to procstat.
|
||||
- [#3722](https://github.com/influxdata/telegraf/pull/3722): Add additional metrics and reverse metric names option to openldap.
|
||||
- [#3769](https://github.com/influxdata/telegraf/pull/3769): Add TLS support to the mesos input plugin.
|
||||
- [#3546](https://github.com/influxdata/telegraf/pull/3546): Add http input plugin.
|
||||
- [#3781](https://github.com/influxdata/telegraf/pull/3781): Add keep alive support to the TCP mode of statsd.
|
||||
- [#3783](https://github.com/influxdata/telegraf/pull/3783): Support deadline in ping plugin.
|
||||
- [#3765](https://github.com/influxdata/telegraf/pull/3765): Add option to disable labels in prometheus output for string fields.
|
||||
- [#3808](https://github.com/influxdata/telegraf/pull/3808): Add shard server stats to the mongodb input plugin.
|
||||
- [#3713](https://github.com/influxdata/telegraf/pull/3713): Add server option to unbound plugin.
|
||||
- [#3804](https://github.com/influxdata/telegraf/pull/3804): Convert boolean metric values to float in datadog output.
|
||||
- [#3799](https://github.com/influxdata/telegraf/pull/3799): Add Solr 3 compatibility.
|
||||
- [#3797](https://github.com/influxdata/telegraf/pull/3797): Add sum stat to basicstats aggregator.
|
||||
- [#3626](https://github.com/influxdata/telegraf/pull/3626): Add ability to override proxy from environment in http response.
|
||||
- [#3853](https://github.com/influxdata/telegraf/pull/3853): Add host to ping timeout log message.
|
||||
- [#3773](https://github.com/influxdata/telegraf/pull/3773): Add override processor.
|
||||
- [#3814](https://github.com/influxdata/telegraf/pull/3814): Add status_code and result tags and result_type field to http_response input.
|
||||
- [#3880](https://github.com/influxdata/telegraf/pull/3880): Added config flag to skip collection of network protocol metrics.
|
||||
- [#3927](https://github.com/influxdata/telegraf/pull/3927): Add TLS support to kapacitor input.
|
||||
- [#3496](https://github.com/influxdata/telegraf/pull/3496): Add HTTP basic auth support to the http_listener input.
|
||||
- [#3452](https://github.com/influxdata/telegraf/issues/3452): Tags in output InfluxDB Line Protocol are now sorted.
|
||||
- [#3631](https://github.com/influxdata/telegraf/issues/3631): InfluxDB Line Protocol parser now accepts DOS line endings.
|
||||
- [#2496](https://github.com/influxdata/telegraf/issues/2496): An option has been added to skip database creation in the InfluxDB output.
|
||||
- [#3366](https://github.com/influxdata/telegraf/issues/3366): Add support for connecting to InfluxDB over a unix domain socket.
|
||||
- [#3946](https://github.com/influxdata/telegraf/pull/3946): Add optional unsigned integer support to the influx data format.
|
||||
- [#3811](https://github.com/influxdata/telegraf/pull/3811): Add TLS support to zookeeper input.
|
||||
- [#2737](https://github.com/influxdata/telegraf/issues/2737): Add filters for container state to docker input.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#1896](https://github.com/influxdata/telegraf/issues/1896): Fix various mysql data type conversions.
|
||||
- [#3810](https://github.com/influxdata/telegraf/issues/3810): Fix metric buffer limit in internal plugin after reload.
|
||||
- [#3801](https://github.com/influxdata/telegraf/issues/3801): Fix panic in http_response on invalid regex.
|
||||
- [#3973](https://github.com/influxdata/telegraf/issues/3873): Fix socket_listener setting ReadBufferSize on tcp sockets.
|
||||
- [#1575](https://github.com/influxdata/telegraf/issues/1575): Add tag for target url to phpfpm input.
|
||||
- [#3868](https://github.com/influxdata/telegraf/issues/3868): Fix cannot unmarshal object error in DC/OS input.
|
||||
- [#3648](https://github.com/influxdata/telegraf/issues/3648): Fix InfluxDB output not able to reconnect when server address changes.
|
||||
- [#3957](https://github.com/influxdata/telegraf/issues/3957): Fix parsing of dos line endings in the smart input.
|
||||
- [#3754](https://github.com/influxdata/telegraf/issues/3754): Fix precision truncation when no timestamp included.
|
||||
- [#3655](https://github.com/influxdata/telegraf/issues/3655): Fix SNMPv3 connection with Cisco ASA 5515 in snmp input.
|
||||
- [#3981](https://github.com/influxdata/telegraf/pull/3981): Export all vars defined in /etc/default/telegraf.
|
||||
|
||||
## v1.5.3 [unreleased]
|
||||
## v1.5.3 [2018-03-14]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3729](https://github.com/influxdata/telegraf/issues/3729): Set path to / if HOST_MOUNT_PREFIX matches full path.
|
||||
- [#3739](https://github.com/influxdata/telegraf/issues/3739): Remove userinfo from url tag in prometheus input.
|
||||
- [#3778](https://github.com/influxdata/telegraf/issues/3778): Fix ping plugin not reporting zero durations.
|
||||
- [#3697](https://github.com/influxdata/telegraf/issues/3697): Disable keepalive in mqtt output to prevent deadlock.
|
||||
- [#3786](https://github.com/influxdata/telegraf/pull/3786): Fix collation difference in sqlserver input.
|
||||
- [#3871](https://github.com/influxdata/telegraf/pull/3871): Fix uptime metric in passenger input plugin.
|
||||
- [#3851](https://github.com/influxdata/telegraf/issues/3851): Add output of stderr in case of error to exec log message.
|
||||
|
||||
## v1.5.2 [2018-01-30]
|
||||
|
||||
|
||||
@@ -170,7 +170,7 @@ and `Stop()` methods.
|
||||
### Service Plugin Guidelines
|
||||
|
||||
* Same as the `Plugin` guidelines, except that they must conform to the
|
||||
`inputs.ServiceInput` interface.
|
||||
[`telegraf.ServiceInput`](https://godoc.org/github.com/influxdata/telegraf#ServiceInput) interface.
|
||||
|
||||
## Output Plugins
|
||||
|
||||
|
||||
12
Godeps
12
Godeps
@@ -31,9 +31,10 @@ github.com/go-redis/redis 73b70592cdaa9e6abdfcfbf97b4a90d80728c836
|
||||
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
|
||||
github.com/influxdata/tail a395bf99fe07c233f41fba0735fa2b13b58588ea
|
||||
github.com/influxdata/tail c43482518d410361b6c383d7aebce33d0471d7bc
|
||||
github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
|
||||
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
|
||||
github.com/fsnotify/fsnotify c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9
|
||||
github.com/jackc/pgx 63f58fd32edb5684b9e9f4cfaac847c6b42b3917
|
||||
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
|
||||
github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413
|
||||
@@ -65,15 +66,15 @@ github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
|
||||
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
|
||||
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
|
||||
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
|
||||
github.com/shirou/gopsutil 384a55110aa5ae052eb93ea94940548c1e305a99
|
||||
github.com/shirou/gopsutil fc04d2dd9a512906a2604242b35275179e250eda
|
||||
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
||||
github.com/Shopify/sarama 3b1b38866a79f06deddf0487d5c27ba0697ccd65
|
||||
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
|
||||
github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
|
||||
github.com/soniah/gosnmp f15472a4cd6f6ea7929e4c7d9f163c49f059924f
|
||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||
github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
|
||||
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
||||
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
|
||||
github.com/stretchr/objx facf9a85c22f48d2f52f2380e4efce1768749a89
|
||||
github.com/stretchr/testify 12b6f73e6084dad08a7c6e575284b177ecafbc71
|
||||
github.com/tidwall/gjson 0623bd8fbdbf97cc62b98d15108832851a658e59
|
||||
github.com/tidwall/match 173748da739a410c5b0b813b956f89ff94730b4c
|
||||
github.com/vjeantet/grok d73e972b60935c7fec0b4ffbc904ed39ecaf7efe
|
||||
@@ -87,7 +88,6 @@ golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
|
||||
golang.org/x/text 506f9d5c962f284575e88337e7d9296d27e729d3
|
||||
gopkg.in/asn1-ber.v1 4e86f4367175e39f69d9358a5f17b4dda270378d
|
||||
gopkg.in/fatih/pool.v2 6e328e67893eb46323ad06f0e92cb9536babbabc
|
||||
gopkg.in/fsnotify.v1 a8a77c9133d2d6fd8334f3260d06f60e8d80a5fb
|
||||
gopkg.in/gorethink/gorethink.v3 7ab832f7b65573104a555d84a27992ae9ea1f659
|
||||
gopkg.in/ldap.v2 8168ee085ee43257585e50c6441aadf54ecb2c9f
|
||||
gopkg.in/mgo.v2 3f83fa5005286a7fe593b055f0d7771a7dce4655
|
||||
|
||||
29
Makefile
29
Makefile
@@ -3,7 +3,8 @@ VERSION := $(shell git describe --exact-match --tags 2>/dev/null)
|
||||
BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
|
||||
COMMIT := $(shell git rev-parse --short HEAD)
|
||||
GOFILES ?= $(shell git ls-files '*.go')
|
||||
GOFMT ?= $(shell gofmt -l $(GOFILES))
|
||||
GOFMT ?= $(shell gofmt -l $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)))
|
||||
BUILDFLAGS ?=
|
||||
|
||||
ifdef GOBIN
|
||||
PATH := $(GOBIN):$(PATH)
|
||||
@@ -19,16 +20,9 @@ ifdef VERSION
|
||||
endif
|
||||
|
||||
all:
|
||||
$(MAKE) fmtcheck
|
||||
$(MAKE) deps
|
||||
$(MAKE) telegraf
|
||||
|
||||
ci-test:
|
||||
$(MAKE) deps
|
||||
$(MAKE) fmtcheck
|
||||
$(MAKE) vet
|
||||
$(MAKE) test
|
||||
|
||||
deps:
|
||||
go get -u github.com/golang/lint/golint
|
||||
go get github.com/sparrc/gdm
|
||||
@@ -48,7 +42,7 @@ test:
|
||||
go test -short ./...
|
||||
|
||||
fmt:
|
||||
@gofmt -w $(GOFILES)
|
||||
@gofmt -w $(filter-out plugins/parsers/influx/machine.go, $(GOFILES))
|
||||
|
||||
fmtcheck:
|
||||
@echo '[INFO] running gofmt to identify incorrectly formatted code...'
|
||||
@@ -61,9 +55,6 @@ fmtcheck:
|
||||
fi
|
||||
@echo '[INFO] done.'
|
||||
|
||||
lint:
|
||||
golint ./...
|
||||
|
||||
test-windows:
|
||||
go test ./plugins/inputs/ping/...
|
||||
go test ./plugins/inputs/win_perf_counters/...
|
||||
@@ -73,15 +64,18 @@ test-windows:
|
||||
# vet runs the Go source code static analysis tool `vet` to find
|
||||
# any common errors.
|
||||
vet:
|
||||
@echo 'go vet $$(go list ./...)'
|
||||
@go vet $$(go list ./...) ; if [ $$? -eq 1 ]; then \
|
||||
@echo 'go vet $$(go list ./... | grep -v ./plugins/parsers/influx)'
|
||||
@go vet $$(go list ./... | grep -v ./plugins/parsers/influx) ; if [ $$? -eq 1 ]; then \
|
||||
echo ""; \
|
||||
echo "go vet has found suspicious constructs. Please remediate any reported errors"; \
|
||||
echo "to fix them before submitting code for review."; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
test-all: vet
|
||||
test-ci: fmtcheck vet
|
||||
go test -short./...
|
||||
|
||||
test-all: fmtcheck vet
|
||||
go test ./...
|
||||
|
||||
package:
|
||||
@@ -96,4 +90,7 @@ docker-image:
|
||||
cp build/telegraf*$(COMMIT)*.deb .
|
||||
docker build -f scripts/dev.docker --build-arg "package=telegraf*$(COMMIT)*.deb" -t "telegraf-dev:$(COMMIT)" .
|
||||
|
||||
.PHONY: deps telegraf install test test-windows lint vet test-all package clean docker-image fmtcheck
|
||||
plugins/parsers/influx/machine.go: plugins/parsers/influx/machine.go.rl
|
||||
ragel -Z -G2 $^ -o $@
|
||||
|
||||
.PHONY: deps telegraf install test test-windows lint vet test-all package clean docker-image fmtcheck uint64
|
||||
|
||||
@@ -152,6 +152,7 @@ configuration options.
|
||||
* [graylog](./plugins/inputs/graylog)
|
||||
* [haproxy](./plugins/inputs/haproxy)
|
||||
* [hddtemp](./plugins/inputs/hddtemp)
|
||||
* [http](./plugins/inputs/http) (generic HTTP plugin, supports using input data formats)
|
||||
* [http_response](./plugins/inputs/http_response)
|
||||
* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
|
||||
* [internal](./plugins/inputs/internal)
|
||||
@@ -207,7 +208,7 @@ configuration options.
|
||||
* [teamspeak](./plugins/inputs/teamspeak)
|
||||
* [tomcat](./plugins/inputs/tomcat)
|
||||
* [twemproxy](./plugins/inputs/twemproxy)
|
||||
* [unbound](./plugins/input/unbound)
|
||||
* [unbound](./plugins/inputs/unbound)
|
||||
* [varnish](./plugins/inputs/varnish)
|
||||
* [zfs](./plugins/inputs/zfs)
|
||||
* [zookeeper](./plugins/inputs/zookeeper)
|
||||
@@ -263,6 +264,7 @@ formats may be used with input plugins supporting the `data_format` option:
|
||||
## Processor Plugins
|
||||
|
||||
* [printer](./plugins/processors/printer)
|
||||
* [override](./plugins/processors/override)
|
||||
|
||||
## Aggregator Plugins
|
||||
|
||||
|
||||
@@ -15,63 +15,36 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
now := time.Now()
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddFields(t *testing.T) {
|
||||
now := time.Now()
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
tags := map[string]string{"foo": "bar"}
|
||||
fields := map[string]interface{}{
|
||||
"usage": float64(99),
|
||||
}
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
a.AddGauge("acctest", fields, map[string]string{"acc": "test"})
|
||||
a.AddCounter("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||
now := time.Now()
|
||||
a.AddCounter("acctest", fields, tags, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest usage=99")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test usage=99")
|
||||
require.Equal(t, "acctest", testm.Name())
|
||||
actual, ok := testm.GetField("usage")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test usage=99 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, float64(99), actual)
|
||||
|
||||
actual, ok = testm.GetTag("foo")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "bar", actual)
|
||||
|
||||
tm := testm.Time()
|
||||
// okay if monotonic clock differs
|
||||
require.True(t, now.Equal(tm))
|
||||
|
||||
tp := testm.Type()
|
||||
require.Equal(t, telegraf.Counter, tp)
|
||||
}
|
||||
|
||||
func TestAccAddError(t *testing.T) {
|
||||
@@ -98,215 +71,61 @@ func TestAccAddError(t *testing.T) {
|
||||
assert.Contains(t, string(errs[2]), "baz")
|
||||
}
|
||||
|
||||
func TestAddNoIntervalWithPrecision(t *testing.T) {
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
a.SetPrecision(0, time.Second)
|
||||
func TestSetPrecision(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
unset bool
|
||||
precision time.Duration
|
||||
interval time.Duration
|
||||
timestamp time.Time
|
||||
expected time.Time
|
||||
}{
|
||||
{
|
||||
name: "default precision is nanosecond",
|
||||
unset: true,
|
||||
timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
|
||||
expected: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
|
||||
},
|
||||
{
|
||||
name: "second interval",
|
||||
interval: time.Second,
|
||||
timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
|
||||
expected: time.Date(2006, time.February, 10, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
{
|
||||
name: "microsecond interval",
|
||||
interval: time.Microsecond,
|
||||
timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
|
||||
expected: time.Date(2006, time.February, 10, 12, 0, 0, 82913000, time.UTC),
|
||||
},
|
||||
{
|
||||
name: "2 second precision",
|
||||
precision: 2 * time.Second,
|
||||
timestamp: time.Date(2006, time.February, 10, 12, 0, 2, 4, time.UTC),
|
||||
expected: time.Date(2006, time.February, 10, 12, 0, 2, 0, time.UTC),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
if !tt.unset {
|
||||
a.SetPrecision(tt.precision, tt.interval)
|
||||
}
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{},
|
||||
tt.timestamp,
|
||||
)
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
testm := <-metrics
|
||||
require.Equal(t, tt.expected, testm.Time())
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddDisablePrecision(t *testing.T) {
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.SetPrecision(time.Nanosecond, 0)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddNoPrecisionWithInterval(t *testing.T) {
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.SetPrecision(0, time.Second)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestDifferentPrecisions(t *testing.T) {
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.SetPrecision(0, time.Second)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Millisecond)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800083000000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Microsecond)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082913000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Nanosecond)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddGauge(t *testing.T) {
|
||||
now := time.Now()
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
}
|
||||
|
||||
func TestAddCounter(t *testing.T) {
|
||||
now := time.Now()
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
close(metrics)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type TestMetricMaker struct {
|
||||
|
||||
@@ -271,11 +271,9 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, ag
|
||||
// if dropOriginal is set to true, then we will only send this
|
||||
// metric to the aggregators, not the outputs.
|
||||
var dropOriginal bool
|
||||
if !m.IsAggregate() {
|
||||
for _, agg := range a.Config.Aggregators {
|
||||
if ok := agg.Add(m.Copy()); ok {
|
||||
dropOriginal = true
|
||||
}
|
||||
for _, agg := range a.Config.Aggregators {
|
||||
if ok := agg.Add(m.Copy()); ok {
|
||||
dropOriginal = true
|
||||
}
|
||||
}
|
||||
if !dropOriginal {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
image: Previous Visual Studio 2015
|
||||
version: "{build}"
|
||||
|
||||
cache:
|
||||
@@ -12,11 +13,11 @@ platform: x64
|
||||
|
||||
install:
|
||||
- IF NOT EXIST "C:\Cache" mkdir C:\Cache
|
||||
- IF NOT EXIST "C:\Cache\go1.9.2.msi" curl -o "C:\Cache\go1.9.2.msi" https://storage.googleapis.com/golang/go1.9.2.windows-amd64.msi
|
||||
- IF NOT EXIST "C:\Cache\go1.9.4.msi" curl -o "C:\Cache\go1.9.4.msi" https://storage.googleapis.com/golang/go1.9.4.windows-amd64.msi
|
||||
- IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip
|
||||
- IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip
|
||||
- IF EXIST "C:\Go" rmdir /S /Q C:\Go
|
||||
- msiexec.exe /i "C:\Cache\go1.9.2.msi" /quiet
|
||||
- msiexec.exe /i "C:\Cache\go1.9.4.msi" /quiet
|
||||
- 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y
|
||||
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
|
||||
- go version
|
||||
|
||||
@@ -2,12 +2,12 @@
|
||||
|
||||
Telegraf is able to serialize metrics into the following output data formats:
|
||||
|
||||
1. [InfluxDB Line Protocol](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#influx)
|
||||
1. [JSON](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#json)
|
||||
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite)
|
||||
1. [InfluxDB Line Protocol](#influx)
|
||||
1. [JSON](#json)
|
||||
1. [Graphite](#graphite)
|
||||
|
||||
Telegraf metrics, like InfluxDB
|
||||
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
|
||||
[points](https://docs.influxdata.com/influxdb/latest/concepts/glossary/#point),
|
||||
are a combination of four basic parts:
|
||||
|
||||
1. Measurement Name
|
||||
@@ -49,8 +49,10 @@ I'll go over below.
|
||||
|
||||
# Influx:
|
||||
|
||||
There are no additional configuration options for InfluxDB line-protocol. The
|
||||
metrics are serialized directly into InfluxDB line-protocol.
|
||||
The `influx` format outputs data as
|
||||
[InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/latest/write_protocols/line_protocol_tutorial/).
|
||||
This is the recommended format to use unless another format is required for
|
||||
interoperability.
|
||||
|
||||
### Influx Configuration:
|
||||
|
||||
@@ -64,6 +66,20 @@ metrics are serialized directly into InfluxDB line-protocol.
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
|
||||
## Maximum line length in bytes. Useful only for debugging.
|
||||
# influx_max_line_bytes = 0
|
||||
|
||||
## When true, fields will be output in ascending lexical order. Enabling
|
||||
## this option will result in decreased performance and is only recommended
|
||||
## when you need predictable ordering while debugging.
|
||||
# influx_sort_fields = false
|
||||
|
||||
## When true, Telegraf will output unsigned integers as unsigned values,
|
||||
## i.e.: `42u`. You will need a version of InfluxDB supporting unsigned
|
||||
## integer values. Enabling this option will result in field type errors if
|
||||
## existing data has been written.
|
||||
# influx_uint_support = false
|
||||
```
|
||||
|
||||
# Graphite:
|
||||
|
||||
@@ -24,6 +24,7 @@ following works:
|
||||
- github.com/eapache/go-xerial-snappy [MIT](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE)
|
||||
- github.com/eapache/queue [MIT](https://github.com/eapache/queue/blob/master/LICENSE)
|
||||
- github.com/eclipse/paho.mqtt.golang [ECLIPSE](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE)
|
||||
- github.com/fsnotify/fsnotify [BSD](https://github.com/fsnotify/fsnotify/blob/master/LICENSE)
|
||||
- github.com/fsouza/go-dockerclient [BSD](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE)
|
||||
- github.com/gobwas/glob [MIT](https://github.com/gobwas/glob/blob/master/LICENSE)
|
||||
- github.com/google/go-cmp [BSD](https://github.com/google/go-cmp/blob/master/LICENSE)
|
||||
@@ -99,7 +100,6 @@ following works:
|
||||
- gopkg.in/asn1-ber.v1 [MIT](https://github.com/go-asn1-ber/asn1-ber/blob/v1.2/LICENSE)
|
||||
- gopkg.in/dancannon/gorethink.v1 [APACHE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
|
||||
- gopkg.in/fatih/pool.v2 [MIT](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)
|
||||
- gopkg.in/fsnotify.v1 [BSD](https://github.com/fsnotify/fsnotify/blob/v1.4.2/LICENSE)
|
||||
- gopkg.in/ldap.v2 [MIT](https://github.com/go-ldap/ldap/blob/v2.5.0/LICENSE)
|
||||
- gopkg.in/mgo.v2 [BSD](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
|
||||
- gopkg.in/olivere/elastic.v5 [MIT](https://github.com/olivere/elastic/blob/v5.0.38/LICENSE)
|
||||
|
||||
@@ -82,31 +82,42 @@
|
||||
# OUTPUT PLUGINS #
|
||||
###############################################################################
|
||||
|
||||
# Configuration for influxdb server to send metrics to
|
||||
# Configuration for sending metrics to InfluxDB
|
||||
[[outputs.influxdb]]
|
||||
## The full HTTP or UDP URL for your InfluxDB instance.
|
||||
##
|
||||
## Multiple urls can be specified as part of the same cluster,
|
||||
## this means that only ONE of the urls will be written to each interval.
|
||||
# urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
|
||||
urls = ["http://127.0.0.1:8086"] # required
|
||||
## The target database for metrics (telegraf will create it if not exists).
|
||||
database = "telegraf" # required
|
||||
## Multiple URLs can be specified for a single cluster, only ONE of the
|
||||
## urls will be written to each interval.
|
||||
# urls = ["unix:///var/run/influxdb.sock"]
|
||||
# urls = ["udp://127.0.0.1:8089"]
|
||||
# urls = ["http://127.0.0.1:8086"]
|
||||
|
||||
## The target database for metrics; will be created as needed.
|
||||
# database = "telegraf"
|
||||
|
||||
## If true, no CREATE DATABASE queries will be sent. Set to true when using
|
||||
## Telegraf with a user without permissions to create databases or when the
|
||||
## database already exists.
|
||||
# skip_database_creation = false
|
||||
|
||||
## Name of existing retention policy to write to. Empty string writes to
|
||||
## the default retention policy.
|
||||
retention_policy = ""
|
||||
## Write consistency (clusters only), can be: "any", "one", "quorum", "all"
|
||||
write_consistency = "any"
|
||||
# retention_policy = ""
|
||||
|
||||
## Write timeout (for the InfluxDB client), formatted as a string.
|
||||
## If not provided, will default to 5s. 0s means no timeout (not recommended).
|
||||
timeout = "5s"
|
||||
## Write consistency (clusters only), can be: "any", "one", "quorum", "all"
|
||||
# write_consistency = "any"
|
||||
|
||||
## Timeout for HTTP messages.
|
||||
# timeout = "5s"
|
||||
|
||||
## HTTP Basic Auth
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
## Set the user agent for HTTP POSTs (can be useful for log differentiation)
|
||||
|
||||
## HTTP User-Agent
|
||||
# user_agent = "telegraf"
|
||||
## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
|
||||
|
||||
## UDP payload size is the maximum packet size to send.
|
||||
# udp_payload = 512
|
||||
|
||||
## Optional SSL Config
|
||||
@@ -116,14 +127,22 @@
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## HTTP Proxy Config
|
||||
## HTTP Proxy override, if unset values the standard proxy environment
|
||||
## variables are consulted to determine which proxy, if any, should be used.
|
||||
# http_proxy = "http://corporate.proxy:3128"
|
||||
|
||||
## Optional HTTP headers
|
||||
## Additional HTTP headers
|
||||
# http_headers = {"X-Special-Header" = "Special-Value"}
|
||||
|
||||
## Compress each HTTP request payload using GZIP.
|
||||
# content_encoding = "gzip"
|
||||
## HTTP Content-Encoding for write request body, can be set to "gzip" to
|
||||
## compress body or "identity" to apply no encoding.
|
||||
# content_encoding = "identity"
|
||||
|
||||
## When true, Telegraf will output unsigned integers as unsigned values,
|
||||
## i.e.: "42u". You will need a version of InfluxDB supporting unsigned
|
||||
## integer values. Enabling this option will result in field type errors if
|
||||
## existing data has been written.
|
||||
# influx_uint_support = false
|
||||
|
||||
|
||||
# # Configuration for Amon Server to send metrics to.
|
||||
@@ -510,6 +529,9 @@
|
||||
# # username = "telegraf"
|
||||
# # password = "metricsmetricsmetricsmetrics"
|
||||
#
|
||||
# ## Timeout for write operations. default: 5s
|
||||
# # timeout = "5s"
|
||||
#
|
||||
# ## client ID, if not set a random ID is generated
|
||||
# # client_id = ""
|
||||
#
|
||||
@@ -594,12 +616,24 @@
|
||||
# ## Address to listen on
|
||||
# # listen = ":9273"
|
||||
#
|
||||
# ## Use TLS
|
||||
# #tls_cert = "/etc/ssl/telegraf.crt"
|
||||
# #tls_key = "/etc/ssl/telegraf.key"
|
||||
#
|
||||
# ## Use http basic authentication
|
||||
# #basic_username = "Foo"
|
||||
# #basic_password = "Bar"
|
||||
#
|
||||
# ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration
|
||||
# # expiration_interval = "60s"
|
||||
#
|
||||
# ## Collectors to enable, valid entries are "gocollector" and "process".
|
||||
# ## If unset, both are enabled.
|
||||
# collectors_exclude = ["gocollector", "process"]
|
||||
#
|
||||
# # Send string metrics as Prometheus labels.
|
||||
# # Unless set to false all string metrics will be sent as labels.
|
||||
# string_as_label = true
|
||||
|
||||
|
||||
# # Configuration for the Riemann server to send metrics to
|
||||
@@ -699,7 +733,7 @@
|
||||
# #use_regex = false
|
||||
#
|
||||
# ## point tags to use as the source name for Wavefront (if none found, host will be used)
|
||||
# #source_override = ["hostname", "snmp_host", "node_host"]
|
||||
# #source_override = ["hostname", "agent_host", "node_host"]
|
||||
#
|
||||
# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default true
|
||||
# #convert_bool = true
|
||||
@@ -718,6 +752,18 @@
|
||||
# PROCESSOR PLUGINS #
|
||||
###############################################################################
|
||||
|
||||
# # Apply metric modifications using override semantics.
|
||||
# [[processors.override]]
|
||||
# ## All modifications on inputs and aggregators can be overridden:
|
||||
# # name_override = "new_name"
|
||||
# # name_prefix = "new_name_prefix"
|
||||
# # name_suffix = "new_name_suffix"
|
||||
#
|
||||
# ## Tags to be added (all values must be strings)
|
||||
# # [processors.override.tags]
|
||||
# # additional_tag = "tag_value"
|
||||
|
||||
|
||||
# # Print all metrics that pass through this filter.
|
||||
# [[processors.printer]]
|
||||
|
||||
@@ -792,12 +838,11 @@
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
[[inputs.disk]]
|
||||
## By default, telegraf gather stats for all mountpoints.
|
||||
## Setting mountpoints will restrict the stats to the specified mountpoints.
|
||||
## By default stats will be gathered for all mount points.
|
||||
## Set mount_points will restrict the stats to only the specified mount points.
|
||||
# mount_points = ["/"]
|
||||
|
||||
## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
||||
## present on /run, /var/run, /dev/shm or /dev).
|
||||
## Ignore mount points by filesystem type.
|
||||
ignore_fs = ["tmpfs", "devtmpfs", "devfs"]
|
||||
|
||||
|
||||
@@ -806,7 +851,7 @@
|
||||
## By default, telegraf will gather stats for all devices including
|
||||
## disk partitions.
|
||||
## Setting devices will restrict the stats to the specified devices.
|
||||
# devices = ["sda", "sdb"]
|
||||
# devices = ["sda", "sdb", "vd*"]
|
||||
## Uncomment the following line if you need disk serial numbers.
|
||||
# skip_serial_number = false
|
||||
#
|
||||
@@ -1061,19 +1106,28 @@
|
||||
|
||||
# # Gather health check statuses from services registered in Consul
|
||||
# [[inputs.consul]]
|
||||
# ## Most of these values defaults to the one configured on a Consul's agent level.
|
||||
# ## Optional Consul server address (default: "localhost")
|
||||
# ## Consul server address
|
||||
# # address = "localhost"
|
||||
# ## Optional URI scheme for the Consul server (default: "http")
|
||||
#
|
||||
# ## URI scheme for the Consul server, one of "http", "https"
|
||||
# # scheme = "http"
|
||||
# ## Optional ACL token used in every request (default: "")
|
||||
#
|
||||
# ## ACL token used in every request
|
||||
# # token = ""
|
||||
# ## Optional username used for request HTTP Basic Authentication (default: "")
|
||||
#
|
||||
# ## HTTP Basic Authentication username and password.
|
||||
# # username = ""
|
||||
# ## Optional password used for HTTP Basic Authentication (default: "")
|
||||
# # password = ""
|
||||
# ## Optional data centre to query the health checks from (default: "")
|
||||
#
|
||||
# ## Data centre to query the health checks from
|
||||
# # datacentre = ""
|
||||
#
|
||||
# ## SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## If false, skip chain & host verification
|
||||
# # insecure_skip_verify = true
|
||||
|
||||
|
||||
# # Read metrics from one or many couchbase clusters
|
||||
@@ -1196,6 +1250,11 @@
|
||||
# container_name_include = []
|
||||
# container_name_exclude = []
|
||||
#
|
||||
# ## Container states to include and exclude. Globs accepted.
|
||||
# ## When empty only containers in the "running" state will be captured.
|
||||
# # container_state_include = []
|
||||
# # container_state_exclude = []
|
||||
#
|
||||
# ## Timeout for docker list, info, and stats commands
|
||||
# timeout = "5s"
|
||||
#
|
||||
@@ -1265,7 +1324,7 @@
|
||||
#
|
||||
# ## node_stats is a list of sub-stats that you want to have gathered. Valid options
|
||||
# ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
|
||||
# ## "breakers". Per default, all stats are gathered.
|
||||
# ## "breaker". Per default, all stats are gathered.
|
||||
# # node_stats = ["jvm", "http"]
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
@@ -1414,11 +1473,51 @@
|
||||
# # devices = ["sda", "*"]
|
||||
|
||||
|
||||
# # Read formatted metrics from one or more HTTP endpoints
|
||||
# [[inputs.http]]
|
||||
# ## One or more URLs from which to read formatted metrics
|
||||
# urls = [
|
||||
# "http://localhost/metrics"
|
||||
# ]
|
||||
#
|
||||
# ## HTTP method
|
||||
# # method = "GET"
|
||||
#
|
||||
# ## Optional HTTP headers
|
||||
# # headers = {"X-Special-Header" = "Special-Value"}
|
||||
#
|
||||
# ## Optional HTTP Basic Auth Credentials
|
||||
# # username = "username"
|
||||
# # password = "pa$$word"
|
||||
#
|
||||
# ## Tag all metrics with the url
|
||||
# # tag_url = true
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
#
|
||||
# ## Amount of time allowed to complete the HTTP request
|
||||
# # timeout = "5s"
|
||||
#
|
||||
# ## Data format to consume.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# # data_format = "influx"
|
||||
|
||||
|
||||
# # HTTP/HTTPS request given an address a method and a timeout
|
||||
# [[inputs.http_response]]
|
||||
# ## Server address (default http://localhost)
|
||||
# # address = "http://localhost"
|
||||
#
|
||||
# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set)
|
||||
# # http_proxy = "http://localhost:8888"
|
||||
#
|
||||
# ## Set response_timeout (default 5 seconds)
|
||||
# # response_timeout = "5s"
|
||||
#
|
||||
@@ -1478,6 +1577,13 @@
|
||||
# # "my_tag_2"
|
||||
# # ]
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
#
|
||||
# ## HTTP parameters (all values must be strings). For "GET" requests, data
|
||||
# ## will be included in the query. For "POST" requests, data will be included
|
||||
# ## in the request body as "x-www-form-urlencoded".
|
||||
@@ -1489,13 +1595,6 @@
|
||||
# # [inputs.httpjson.headers]
|
||||
# # X-Auth-Token = "my-xauth-token"
|
||||
# # apiVersion = "v1"
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints
|
||||
@@ -1538,7 +1637,10 @@
|
||||
# [[inputs.ipmi_sensor]]
|
||||
# ## optionally specify the path to the ipmitool executable
|
||||
# # path = "/usr/bin/ipmitool"
|
||||
# #
|
||||
# ##
|
||||
# ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR
|
||||
# # privilege = "ADMINISTRATOR"
|
||||
# ##
|
||||
# ## optionally specify one or more servers via a url matching
|
||||
# ## [username[:password]@][protocol[(address)]]
|
||||
# ## e.g.
|
||||
@@ -1556,6 +1658,17 @@
|
||||
# timeout = "20s"
|
||||
|
||||
|
||||
# # Gather packets and bytes counters from Linux ipsets
|
||||
# [[inputs.ipset]]
|
||||
# ## By default, we only show sets which have already matched at least 1 packet.
|
||||
# ## set include_unmatched_sets = true to gather them all.
|
||||
# include_unmatched_sets = false
|
||||
# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save")
|
||||
# use_sudo = false
|
||||
# ## The default timeout of 1s for ipset execution can be overridden here:
|
||||
# # timeout = "1s"
|
||||
|
||||
|
||||
# # Gather packets and bytes throughput from iptables
|
||||
# [[inputs.iptables]]
|
||||
# ## iptables require root access on most systems.
|
||||
@@ -1710,6 +1823,13 @@
|
||||
#
|
||||
# ## Time limit for http requests
|
||||
# timeout = "5s"
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
# # Get kernel statistics from /proc/vmstat
|
||||
@@ -1789,7 +1909,7 @@
|
||||
# ## Timeout, in ms.
|
||||
# timeout = 100
|
||||
# ## A list of Mesos masters.
|
||||
# masters = ["localhost:5050"]
|
||||
# masters = ["http://localhost:5050"]
|
||||
# ## Master metrics groups to be collected, by default, all enabled.
|
||||
# master_collections = [
|
||||
# "resources",
|
||||
@@ -1813,6 +1933,13 @@
|
||||
# # "tasks",
|
||||
# # "messages",
|
||||
# # ]
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
# # Collects scores from a minecraft server's scoreboard using the RCON protocol
|
||||
@@ -1854,6 +1981,20 @@
|
||||
# #
|
||||
# ## If no servers are specified, then localhost is used as the host.
|
||||
# servers = ["tcp(127.0.0.1:3306)/"]
|
||||
#
|
||||
# ## Selects the metric output format.
|
||||
# ##
|
||||
# ## This option exists to maintain backwards compatibility, if you have
|
||||
# ## existing metrics do not set or change this value until you are ready to
|
||||
# ## migrate to the new format.
|
||||
# ##
|
||||
# ## If you do not have existing metrics from this plugin set to the latest
|
||||
# ## version.
|
||||
# ##
|
||||
# ## Telegraf >=1.6: metric_version = 2
|
||||
# ## <1.6: metric_version = 1 (or unset)
|
||||
# metric_version = 2
|
||||
#
|
||||
# ## the limits for metrics form perf_events_statements
|
||||
# perf_events_statements_digest_text_limit = 120
|
||||
# perf_events_statements_limit = 250
|
||||
@@ -1910,6 +2051,15 @@
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
|
||||
|
||||
# # Provides metrics about the state of a NATS server
|
||||
# [[inputs.nats]]
|
||||
# ## The address of the monitoring endpoint of the NATS server
|
||||
# server = "http://localhost:8222"
|
||||
#
|
||||
# ## Maximum time to receive response
|
||||
# # response_timeout = "5s"
|
||||
|
||||
|
||||
# # Read metrics about network interface usage
|
||||
# [[inputs.net]]
|
||||
# ## By default, telegraf gathers stats from any up interface (excluding loopback)
|
||||
@@ -1917,6 +2067,12 @@
|
||||
# ## regardless of status.
|
||||
# ##
|
||||
# # interfaces = ["eth0"]
|
||||
# ##
|
||||
# ## On linux systems telegraf also collects protocol stats.
|
||||
# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics.
|
||||
# ##
|
||||
# # ignore_protocol_stats = false
|
||||
# ##
|
||||
|
||||
|
||||
# # TCP or UDP 'ping' given url and collect response time in seconds
|
||||
@@ -2014,6 +2170,10 @@
|
||||
# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed.
|
||||
# bind_dn = ""
|
||||
# bind_password = ""
|
||||
#
|
||||
# # Reverse metric names so they sort more naturally. Recommended.
|
||||
# # This defaults to false if unset, but is set to true when generating a new config
|
||||
# reverse_metric_names = true
|
||||
|
||||
|
||||
# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver
|
||||
@@ -2087,7 +2247,10 @@
|
||||
# # ping_interval = 1.0
|
||||
# ## per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
|
||||
# # timeout = 1.0
|
||||
# ## interface to send ping from (ping -I <INTERFACE>)
|
||||
# ## total-ping deadline, in s. 0 == no deadline (ping -w <DEADLINE>)
|
||||
# # deadline = 10
|
||||
# ## interface or source address to send ping from (ping -I <INTERFACE/SRC_ADDR>)
|
||||
# ## on Darwin and Freebsd only source address possible: (ping -S <SRC_ADDR>)
|
||||
# # interface = ""
|
||||
|
||||
|
||||
@@ -2098,90 +2261,6 @@
|
||||
# # queue_directory = "/var/spool/postfix"
|
||||
|
||||
|
||||
# # Read metrics from one or many postgresql servers
|
||||
# [[inputs.postgresql]]
|
||||
# ## specify address via a url matching:
|
||||
# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
|
||||
# ## ?sslmode=[disable|verify-ca|verify-full]
|
||||
# ## or a simple string:
|
||||
# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
|
||||
# ##
|
||||
# ## All connection parameters are optional.
|
||||
# ##
|
||||
# ## Without the dbname parameter, the driver will default to a database
|
||||
# ## with the same name as the user. This dbname is just for instantiating a
|
||||
# ## connection with the server and doesn't restrict the databases we are trying
|
||||
# ## to grab metrics for.
|
||||
# ##
|
||||
# address = "host=localhost user=postgres sslmode=disable"
|
||||
#
|
||||
# ## A list of databases to explicitly ignore. If not specified, metrics for all
|
||||
# ## databases are gathered. Do NOT use with the 'databases' option.
|
||||
# # ignored_databases = ["postgres", "template0", "template1"]
|
||||
#
|
||||
# ## A list of databases to pull metrics about. If not specified, metrics for all
|
||||
# ## databases are gathered. Do NOT use with the 'ignored_databases' option.
|
||||
# # databases = ["app_production", "testing"]
|
||||
|
||||
|
||||
# # Read metrics from one or many postgresql servers
|
||||
# [[inputs.postgresql_extensible]]
|
||||
# ## specify address via a url matching:
|
||||
# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
|
||||
# ## ?sslmode=[disable|verify-ca|verify-full]
|
||||
# ## or a simple string:
|
||||
# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
|
||||
# #
|
||||
# ## All connection parameters are optional. #
|
||||
# ## Without the dbname parameter, the driver will default to a database
|
||||
# ## with the same name as the user. This dbname is just for instantiating a
|
||||
# ## connection with the server and doesn't restrict the databases we are trying
|
||||
# ## to grab metrics for.
|
||||
# #
|
||||
# address = "host=localhost user=postgres sslmode=disable"
|
||||
# ## A list of databases to pull metrics about. If not specified, metrics for all
|
||||
# ## databases are gathered.
|
||||
# ## databases = ["app_production", "testing"]
|
||||
# #
|
||||
# # outputaddress = "db01"
|
||||
# ## A custom name for the database that will be used as the "server" tag in the
|
||||
# ## measurement output. If not specified, a default one generated from
|
||||
# ## the connection address is used.
|
||||
# #
|
||||
# ## Define the toml config where the sql queries are stored
|
||||
# ## New queries can be added, if the withdbname is set to true and there is no
|
||||
# ## databases defined in the 'databases field', the sql query is ended by a
|
||||
# ## 'is not null' in order to make the query succeed.
|
||||
# ## Example :
|
||||
# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
|
||||
# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
|
||||
# ## because the databases variable was set to ['postgres', 'pgbench' ] and the
|
||||
# ## withdbname was true. Be careful that if the withdbname is set to false you
|
||||
# ## don't have to define the where clause (aka with the dbname) the tagvalue
|
||||
# ## field is used to define custom tags (separated by commas)
|
||||
# ## The optional "measurement" value can be used to override the default
|
||||
# ## output measurement name ("postgresql").
|
||||
# #
|
||||
# ## Structure :
|
||||
# ## [[inputs.postgresql_extensible.query]]
|
||||
# ## sqlquery string
|
||||
# ## version string
|
||||
# ## withdbname boolean
|
||||
# ## tagvalue string (comma separated)
|
||||
# ## measurement string
|
||||
# [[inputs.postgresql_extensible.query]]
|
||||
# sqlquery="SELECT * FROM pg_stat_database"
|
||||
# version=901
|
||||
# withdbname=false
|
||||
# tagvalue=""
|
||||
# measurement=""
|
||||
# [[inputs.postgresql_extensible.query]]
|
||||
# sqlquery="SELECT * FROM pg_stat_bgwriter"
|
||||
# version=901
|
||||
# withdbname=false
|
||||
# tagvalue="postgresql.stats"
|
||||
|
||||
|
||||
# # Read metrics from one or many PowerDNS servers
|
||||
# [[inputs.powerdns]]
|
||||
# ## An array of sockets to gather stats about.
|
||||
@@ -2191,7 +2270,6 @@
|
||||
|
||||
# # Monitor process cpu and memory usage
|
||||
# [[inputs.procstat]]
|
||||
# ## Must specify one of: pid_file, exe, or pattern
|
||||
# ## PID file to monitor process
|
||||
# pid_file = "/var/run/nginx.pid"
|
||||
# ## executable name (ie, pgrep <exe>)
|
||||
@@ -2208,12 +2286,20 @@
|
||||
# ## override for process_name
|
||||
# ## This is optional; default is sourced from /proc/<pid>/status
|
||||
# # process_name = "bar"
|
||||
#
|
||||
# ## Field name prefix
|
||||
# prefix = ""
|
||||
# ## comment this out if you want raw cpu_time stats
|
||||
# fielddrop = ["cpu_time_*"]
|
||||
# ## This is optional; moves pid into a tag instead of a field
|
||||
# pid_tag = false
|
||||
# # prefix = ""
|
||||
#
|
||||
# ## Add PID as a tag instead of a field; useful to differentiate between
|
||||
# ## processes whose tags are otherwise the same. Can create a large number
|
||||
# ## of series, use judiciously.
|
||||
# # pid_tag = false
|
||||
#
|
||||
# ## Method to use when finding process IDs. Can be one of 'pgrep', or
|
||||
# ## 'native'. The pgrep finder calls the pgrep executable in the PATH while
|
||||
# ## the native finder performs the search directly in a manor dependent on the
|
||||
# ## platform. Default is 'pgrep'
|
||||
# # pid_finder = "pgrep"
|
||||
|
||||
|
||||
# # Read metrics from one or many prometheus clients
|
||||
@@ -2278,6 +2364,15 @@
|
||||
# ## A list of queues to gather as the rabbitmq_queue measurement. If not
|
||||
# ## specified, metrics for all queues are gathered.
|
||||
# # queues = ["telegraf"]
|
||||
#
|
||||
# ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not
|
||||
# ## specified, metrics for all exchanges are gathered.
|
||||
# # exchanges = ["telegraf"]
|
||||
#
|
||||
# ## Queues to include and exclude. Globs accepted.
|
||||
# ## Note that an empty array for both will include all queues
|
||||
# queue_name_include = []
|
||||
# queue_name_exclude = []
|
||||
|
||||
|
||||
# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
|
||||
@@ -2568,6 +2663,28 @@
|
||||
# # servers = [
|
||||
# # "Server=192.168.1.10;Port=1433;User Id=<user>;Password=<pw>;app name=telegraf;log=1;",
|
||||
# # ]
|
||||
#
|
||||
# ## Optional parameter, setting this to 2 will use a new version
|
||||
# ## of the collection queries that break compatibility with the original
|
||||
# ## dashboards.
|
||||
# query_version = 2
|
||||
#
|
||||
# ## If you are using AzureDB, setting this to true will gather resource utilization metrics
|
||||
# # azuredb = false
|
||||
#
|
||||
# ## If you would like to exclude some of the metrics queries, list them here
|
||||
# ## Possible choices:
|
||||
# ## - PerformanceCounters
|
||||
# ## - WaitStatsCategorized
|
||||
# ## - DatabaseIO
|
||||
# ## - DatabaseProperties
|
||||
# ## - CPUHistory
|
||||
# ## - DatabaseSize
|
||||
# ## - DatabaseStats
|
||||
# ## - MemoryClerk
|
||||
# ## - VolumeSpace
|
||||
# ## - PerformanceMetrics
|
||||
# # exclude_query = [ 'DatabaseIO' ]
|
||||
|
||||
|
||||
# # Sysstat metrics collector
|
||||
@@ -2691,6 +2808,10 @@
|
||||
#
|
||||
# ## Use the builtin fielddrop/fieldpass telegraf filters in order to keep/remove specific fields
|
||||
# fieldpass = ["total_*", "num_*","time_up", "mem_*"]
|
||||
#
|
||||
# ## IP of server to connect to, read from unbound conf default, optionally ':port'
|
||||
# ## Will lookup IP if given a hostname
|
||||
# server = "127.0.0.1:8953"
|
||||
|
||||
|
||||
# # A plugin to collect stats from Varnish HTTP Cache
|
||||
@@ -2721,7 +2842,9 @@
|
||||
# ## By default, telegraf gather all zfs stats
|
||||
# ## If not specified, then default is:
|
||||
# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
|
||||
#
|
||||
# ## For Linux, the default is:
|
||||
# # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats",
|
||||
# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"]
|
||||
# ## By default, don't gather zpool stats
|
||||
# # poolMetrics = false
|
||||
|
||||
@@ -2734,6 +2857,17 @@
|
||||
# ## If no servers are specified, then localhost is used as the host.
|
||||
# ## If no port is specified, 2181 is used
|
||||
# servers = [":2181"]
|
||||
#
|
||||
# ## Timeout for metric collections from all servers. Minimum timeout is "1s".
|
||||
# # timeout = "5s"
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # enable_ssl = true
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## If false, skip chain & host verification
|
||||
# # insecure_skip_verify = true
|
||||
|
||||
|
||||
|
||||
@@ -2799,6 +2933,11 @@
|
||||
# ## Add service certificate and key
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
#
|
||||
# ## Optional username and password to accept for HTTP basic authentication.
|
||||
# ## You probably want to make sure you have TLS configured above for this.
|
||||
# # basic_username = "foobar"
|
||||
# # basic_password = "barfoo"
|
||||
|
||||
|
||||
# # Read metrics from Kafka topic(s)
|
||||
@@ -2995,6 +3134,105 @@
|
||||
# data_format = "influx"
|
||||
|
||||
|
||||
# # Read metrics from one or many postgresql servers
|
||||
# [[inputs.postgresql]]
|
||||
# ## specify address via a url matching:
|
||||
# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
|
||||
# ## ?sslmode=[disable|verify-ca|verify-full]
|
||||
# ## or a simple string:
|
||||
# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
|
||||
# ##
|
||||
# ## All connection parameters are optional.
|
||||
# ##
|
||||
# ## Without the dbname parameter, the driver will default to a database
|
||||
# ## with the same name as the user. This dbname is just for instantiating a
|
||||
# ## connection with the server and doesn't restrict the databases we are trying
|
||||
# ## to grab metrics for.
|
||||
# ##
|
||||
# address = "host=localhost user=postgres sslmode=disable"
|
||||
# ## A custom name for the database that will be used as the "server" tag in the
|
||||
# ## measurement output. If not specified, a default one generated from
|
||||
# ## the connection address is used.
|
||||
# # outputaddress = "db01"
|
||||
#
|
||||
# ## connection configuration.
|
||||
# ## maxlifetime - specify the maximum lifetime of a connection.
|
||||
# ## default is forever (0s)
|
||||
# max_lifetime = "0s"
|
||||
#
|
||||
# ## A list of databases to explicitly ignore. If not specified, metrics for all
|
||||
# ## databases are gathered. Do NOT use with the 'databases' option.
|
||||
# # ignored_databases = ["postgres", "template0", "template1"]
|
||||
#
|
||||
# ## A list of databases to pull metrics about. If not specified, metrics for all
|
||||
# ## databases are gathered. Do NOT use with the 'ignored_databases' option.
|
||||
# # databases = ["app_production", "testing"]
|
||||
|
||||
|
||||
# # Read metrics from one or many postgresql servers
|
||||
# [[inputs.postgresql_extensible]]
|
||||
# ## specify address via a url matching:
|
||||
# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
|
||||
# ## ?sslmode=[disable|verify-ca|verify-full]
|
||||
# ## or a simple string:
|
||||
# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
|
||||
# #
|
||||
# ## All connection parameters are optional. #
|
||||
# ## Without the dbname parameter, the driver will default to a database
|
||||
# ## with the same name as the user. This dbname is just for instantiating a
|
||||
# ## connection with the server and doesn't restrict the databases we are trying
|
||||
# ## to grab metrics for.
|
||||
# #
|
||||
# address = "host=localhost user=postgres sslmode=disable"
|
||||
#
|
||||
# ## connection configuration.
|
||||
# ## maxlifetime - specify the maximum lifetime of a connection.
|
||||
# ## default is forever (0s)
|
||||
# max_lifetime = "0s"
|
||||
#
|
||||
# ## A list of databases to pull metrics about. If not specified, metrics for all
|
||||
# ## databases are gathered.
|
||||
# ## databases = ["app_production", "testing"]
|
||||
# #
|
||||
# ## A custom name for the database that will be used as the "server" tag in the
|
||||
# ## measurement output. If not specified, a default one generated from
|
||||
# ## the connection address is used.
|
||||
# # outputaddress = "db01"
|
||||
# #
|
||||
# ## Define the toml config where the sql queries are stored
|
||||
# ## New queries can be added, if the withdbname is set to true and there is no
|
||||
# ## databases defined in the 'databases field', the sql query is ended by a
|
||||
# ## 'is not null' in order to make the query succeed.
|
||||
# ## Example :
|
||||
# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
|
||||
# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
|
||||
# ## because the databases variable was set to ['postgres', 'pgbench' ] and the
|
||||
# ## withdbname was true. Be careful that if the withdbname is set to false you
|
||||
# ## don't have to define the where clause (aka with the dbname) the tagvalue
|
||||
# ## field is used to define custom tags (separated by commas)
|
||||
# ## The optional "measurement" value can be used to override the default
|
||||
# ## output measurement name ("postgresql").
|
||||
# #
|
||||
# ## Structure :
|
||||
# ## [[inputs.postgresql_extensible.query]]
|
||||
# ## sqlquery string
|
||||
# ## version string
|
||||
# ## withdbname boolean
|
||||
# ## tagvalue string (comma separated)
|
||||
# ## measurement string
|
||||
# [[inputs.postgresql_extensible.query]]
|
||||
# sqlquery="SELECT * FROM pg_stat_database"
|
||||
# version=901
|
||||
# withdbname=false
|
||||
# tagvalue=""
|
||||
# measurement=""
|
||||
# [[inputs.postgresql_extensible.query]]
|
||||
# sqlquery="SELECT * FROM pg_stat_bgwriter"
|
||||
# version=901
|
||||
# withdbname=false
|
||||
# tagvalue="postgresql.stats"
|
||||
|
||||
|
||||
# # Generic socket listener capable of handling multiple socket types.
|
||||
# [[inputs.socket_listener]]
|
||||
# ## URL to listen on
|
||||
@@ -3046,6 +3284,14 @@
|
||||
# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
|
||||
# max_tcp_connections = 250
|
||||
#
|
||||
# ## Enable TCP keep alive probes (default=false)
|
||||
# tcp_keep_alive = false
|
||||
#
|
||||
# ## Specifies the keep-alive period for an active network connection.
|
||||
# ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false.
|
||||
# ## Defaults to the OS configuration.
|
||||
# # tcp_keep_alive_period = "2h"
|
||||
#
|
||||
# ## Address and port to host UDP listener on
|
||||
# service_address = ":8125"
|
||||
#
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"regexp"
|
||||
"runtime"
|
||||
"sort"
|
||||
@@ -1366,6 +1367,42 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["influx_max_line_bytes"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if integer, ok := kv.Value.(*ast.Integer); ok {
|
||||
v, err := integer.Int()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.InfluxMaxLineBytes = int(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["influx_sort_fields"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if b, ok := kv.Value.(*ast.Boolean); ok {
|
||||
var err error
|
||||
c.InfluxSortFields, err = b.Boolean()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["influx_uint_support"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if b, ok := kv.Value.(*ast.Boolean); ok {
|
||||
var err error
|
||||
c.InfluxUintSupport, err = b.Boolean()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["json_timestamp_units"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
@@ -1382,6 +1419,9 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
|
||||
}
|
||||
}
|
||||
|
||||
delete(tbl.Fields, "influx_max_line_bytes")
|
||||
delete(tbl.Fields, "influx_sort_fields")
|
||||
delete(tbl.Fields, "influx_uint_support")
|
||||
delete(tbl.Fields, "data_format")
|
||||
delete(tbl.Fields, "prefix")
|
||||
delete(tbl.Fields, "template")
|
||||
|
||||
@@ -2,8 +2,6 @@ package models
|
||||
|
||||
import (
|
||||
"log"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -78,84 +76,6 @@ func makemetric(
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range tags {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
log.Printf("D! Measurement [%s] tag [%s] "+
|
||||
"ends with a backslash, skipping", measurement, k)
|
||||
delete(tags, k)
|
||||
continue
|
||||
} else if strings.HasSuffix(v, `\`) {
|
||||
log.Printf("D! Measurement [%s] tag [%s] has a value "+
|
||||
"ending with a backslash, skipping", measurement, k)
|
||||
delete(tags, k)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range fields {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
log.Printf("D! Measurement [%s] field [%s] "+
|
||||
"ends with a backslash, skipping", measurement, k)
|
||||
delete(fields, k)
|
||||
continue
|
||||
}
|
||||
// Validate uint64 and float64 fields
|
||||
// convert all int & uint types to int64
|
||||
switch val := v.(type) {
|
||||
case nil:
|
||||
// delete nil fields
|
||||
delete(fields, k)
|
||||
case uint:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint8:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint16:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint32:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int8:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int16:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int32:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint64:
|
||||
// InfluxDB does not support writing uint64
|
||||
if val < uint64(9223372036854775808) {
|
||||
fields[k] = int64(val)
|
||||
} else {
|
||||
fields[k] = int64(9223372036854775807)
|
||||
}
|
||||
continue
|
||||
case float32:
|
||||
fields[k] = float64(val)
|
||||
continue
|
||||
case float64:
|
||||
// NaNs are invalid values in influxdb, skip measurement
|
||||
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||
log.Printf("D! Measurement [%s] field [%s] has a NaN or Inf "+
|
||||
"field, skipping",
|
||||
measurement, k)
|
||||
delete(fields, k)
|
||||
continue
|
||||
}
|
||||
case string:
|
||||
fields[k] = v
|
||||
default:
|
||||
fields[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
m, err := metric.New(measurement, tags, fields, t, mType)
|
||||
if err != nil {
|
||||
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
@@ -167,69 +166,6 @@ func TestAddDropOriginal(t *testing.T) {
|
||||
assert.False(t, ra.Add(m2))
|
||||
}
|
||||
|
||||
// make an untyped, counter, & gauge metric
|
||||
func TestMakeMetricA(t *testing.T) {
|
||||
now := time.Now()
|
||||
ra := NewRunningAggregator(&TestAggregator{}, &AggregatorConfig{
|
||||
Name: "TestRunningAggregator",
|
||||
})
|
||||
assert.Equal(t, "aggregators.TestRunningAggregator", ra.Name())
|
||||
|
||||
m := ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Untyped,
|
||||
)
|
||||
|
||||
m = ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Counter,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Counter,
|
||||
)
|
||||
|
||||
m = ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Gauge,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Gauge,
|
||||
)
|
||||
}
|
||||
|
||||
type TestAggregator struct {
|
||||
sum int64
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/serializers/influx"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
@@ -75,7 +76,11 @@ func (r *RunningInput) MakeMetric(
|
||||
)
|
||||
|
||||
if r.trace && m != nil {
|
||||
fmt.Print("> " + m.String())
|
||||
s := influx.NewSerializer()
|
||||
octets, err := s.Serialize(m)
|
||||
if err == nil {
|
||||
fmt.Print("> " + string(octets))
|
||||
}
|
||||
}
|
||||
|
||||
r.MetricsGathered.Incr(1)
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -45,77 +44,17 @@ func TestMakeMetricNilFields(t *testing.T) {
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
// make an untyped, counter, & gauge metric
|
||||
func TestMakeMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
assert.Equal(t, "inputs.TestRunningInput", ri.Name())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
expected, err := metric.New("RITest",
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
map[string]interface{}{
|
||||
"value": int(101),
|
||||
},
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Untyped,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
m = ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Counter,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Counter,
|
||||
)
|
||||
|
||||
m = ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Gauge,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Gauge,
|
||||
)
|
||||
require.Equal(t, expected, m)
|
||||
}
|
||||
|
||||
func TestMakeMetricWithPluginTags(t *testing.T) {
|
||||
@@ -137,11 +76,18 @@ func TestMakeMetricWithPluginTags(t *testing.T) {
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest,foo=bar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
|
||||
expected, err := metric.New("RITest",
|
||||
map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 101,
|
||||
},
|
||||
now,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, m)
|
||||
}
|
||||
|
||||
func TestMakeMetricFilteredOut(t *testing.T) {
|
||||
@@ -187,87 +133,17 @@ func TestMakeMetricWithDaemonTags(t *testing.T) {
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest,foo=bar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
// make an untyped, counter, & gauge metric
|
||||
func TestMakeMetricInfFields(t *testing.T) {
|
||||
inf := math.Inf(1)
|
||||
ninf := math.Inf(-1)
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{
|
||||
"value": int(101),
|
||||
"inf": inf,
|
||||
"ninf": ninf,
|
||||
expected, err := metric.New("RITest",
|
||||
map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 101,
|
||||
},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricAllFieldTypes(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{
|
||||
"a": int(10),
|
||||
"b": int8(10),
|
||||
"c": int16(10),
|
||||
"d": int32(10),
|
||||
"e": uint(10),
|
||||
"f": uint8(10),
|
||||
"g": uint16(10),
|
||||
"h": uint32(10),
|
||||
"i": uint64(10),
|
||||
"j": float32(10),
|
||||
"k": uint64(9223372036854775810),
|
||||
"l": "foobar",
|
||||
"m": true,
|
||||
},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Contains(t, m.String(), "a=10i")
|
||||
assert.Contains(t, m.String(), "b=10i")
|
||||
assert.Contains(t, m.String(), "c=10i")
|
||||
assert.Contains(t, m.String(), "d=10i")
|
||||
assert.Contains(t, m.String(), "e=10i")
|
||||
assert.Contains(t, m.String(), "f=10i")
|
||||
assert.Contains(t, m.String(), "g=10i")
|
||||
assert.Contains(t, m.String(), "h=10i")
|
||||
assert.Contains(t, m.String(), "i=10i")
|
||||
assert.Contains(t, m.String(), "j=10")
|
||||
assert.NotContains(t, m.String(), "j=10i")
|
||||
assert.Contains(t, m.String(), "k=9223372036854775807i")
|
||||
assert.Contains(t, m.String(), "l=\"foobar\"")
|
||||
assert.Contains(t, m.String(), "m=true")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, m)
|
||||
}
|
||||
|
||||
func TestMakeMetricNameOverride(t *testing.T) {
|
||||
@@ -284,11 +160,15 @@ func TestMakeMetricNameOverride(t *testing.T) {
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("foobar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
expected, err := metric.New("foobar",
|
||||
nil,
|
||||
map[string]interface{}{
|
||||
"value": 101,
|
||||
},
|
||||
now,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, m)
|
||||
}
|
||||
|
||||
func TestMakeMetricNamePrefix(t *testing.T) {
|
||||
@@ -305,11 +185,15 @@ func TestMakeMetricNamePrefix(t *testing.T) {
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("foobar_RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
expected, err := metric.New("foobar_RITest",
|
||||
nil,
|
||||
map[string]interface{}{
|
||||
"value": 101,
|
||||
},
|
||||
now,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, m)
|
||||
}
|
||||
|
||||
func TestMakeMetricNameSuffix(t *testing.T) {
|
||||
@@ -326,134 +210,15 @@ func TestMakeMetricNameSuffix(t *testing.T) {
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest_foobar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
expected, err := metric.New("RITest_foobar",
|
||||
nil,
|
||||
map[string]interface{}{
|
||||
"value": 101,
|
||||
},
|
||||
now,
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetric_TrailingSlash(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
measurement string
|
||||
fields map[string]interface{}
|
||||
tags map[string]string
|
||||
expectedNil bool
|
||||
expectedMeasurement string
|
||||
expectedFields map[string]interface{}
|
||||
expectedTags map[string]string
|
||||
}{
|
||||
{
|
||||
name: "Measurement cannot have trailing slash",
|
||||
measurement: `cpu\`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedNil: true,
|
||||
},
|
||||
{
|
||||
name: "Field key with trailing slash dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
`bad\`: `xyzzy`,
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
expectedTags: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "Field value with trailing slash okay",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
"ok": `xyzzy\`,
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
"ok": `xyzzy\`,
|
||||
},
|
||||
expectedTags: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "Must have one field after dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"bad": math.NaN(),
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedNil: true,
|
||||
},
|
||||
{
|
||||
name: "Tag key with trailing slash dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
tags: map[string]string{
|
||||
`host\`: "localhost",
|
||||
"a": "x",
|
||||
},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
expectedTags: map[string]string{
|
||||
"a": "x",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Tag value with trailing slash dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
tags: map[string]string{
|
||||
`host`: `localhost\`,
|
||||
"a": "x",
|
||||
},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
expectedTags: map[string]string{
|
||||
"a": "x",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
m := ri.MakeMetric(
|
||||
tc.measurement,
|
||||
tc.fields,
|
||||
tc.tags,
|
||||
telegraf.Untyped,
|
||||
now)
|
||||
|
||||
if tc.expectedNil {
|
||||
require.Nil(t, m)
|
||||
} else {
|
||||
require.NotNil(t, m)
|
||||
require.Equal(t, tc.expectedMeasurement, m.Name())
|
||||
require.Equal(t, tc.expectedFields, m.Fields())
|
||||
require.Equal(t, tc.expectedTags, m.Tags())
|
||||
}
|
||||
})
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, m)
|
||||
}
|
||||
|
||||
type testInput struct{}
|
||||
|
||||
@@ -87,7 +87,7 @@ func NewRunningOutput(
|
||||
map[string]string{"output": name},
|
||||
),
|
||||
}
|
||||
ro.BufferLimit.Incr(int64(ro.MetricBufferLimit))
|
||||
ro.BufferLimit.Set(int64(ro.MetricBufferLimit))
|
||||
return ro
|
||||
}
|
||||
|
||||
|
||||
60
metric.go
60
metric.go
@@ -17,48 +17,50 @@ const (
|
||||
Histogram
|
||||
)
|
||||
|
||||
type Tag struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
type Field struct {
|
||||
Key string
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
type Metric interface {
|
||||
// Serialize serializes the metric into a line-protocol byte buffer,
|
||||
// including a newline at the end.
|
||||
Serialize() []byte
|
||||
// same as Serialize, but avoids an allocation.
|
||||
// returns number of bytes copied into dst.
|
||||
SerializeTo(dst []byte) int
|
||||
// String is the same as Serialize, but returns a string.
|
||||
String() string
|
||||
// Copy deep-copies the metric.
|
||||
Copy() Metric
|
||||
// Split will attempt to return multiple metrics with the same timestamp
|
||||
// whose string representations are no longer than maxSize.
|
||||
// Metrics with a single field may exceed the requested size.
|
||||
Split(maxSize int) []Metric
|
||||
// Getting data structure functions
|
||||
Name() string
|
||||
Tags() map[string]string
|
||||
TagList() []*Tag
|
||||
Fields() map[string]interface{}
|
||||
FieldList() []*Field
|
||||
Time() time.Time
|
||||
Type() ValueType
|
||||
|
||||
// Name functions
|
||||
SetName(name string)
|
||||
AddPrefix(prefix string)
|
||||
AddSuffix(suffix string)
|
||||
|
||||
// Tag functions
|
||||
GetTag(key string) (string, bool)
|
||||
HasTag(key string) bool
|
||||
AddTag(key, value string)
|
||||
RemoveTag(key string)
|
||||
|
||||
// Field functions
|
||||
GetField(key string) (interface{}, bool)
|
||||
HasField(key string) bool
|
||||
AddField(key string, value interface{})
|
||||
RemoveField(key string) error
|
||||
RemoveField(key string)
|
||||
|
||||
// Name functions
|
||||
SetName(name string)
|
||||
SetPrefix(prefix string)
|
||||
SetSuffix(suffix string)
|
||||
|
||||
// Getting data structure functions
|
||||
Name() string
|
||||
Tags() map[string]string
|
||||
Fields() map[string]interface{}
|
||||
Time() time.Time
|
||||
UnixNano() int64
|
||||
Type() ValueType
|
||||
Len() int // returns the length of the serialized metric, including newline
|
||||
// HashID returns an unique identifier for the series.
|
||||
HashID() uint64
|
||||
|
||||
// aggregator things:
|
||||
// Copy returns a deep copy of the Metric.
|
||||
Copy() Metric
|
||||
|
||||
// Mark Metric as an aggregate
|
||||
SetAggregate(bool)
|
||||
IsAggregate() bool
|
||||
}
|
||||
|
||||
53
metric/builder.go
Normal file
53
metric/builder.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type TimeFunc func() time.Time
|
||||
|
||||
type Builder struct {
|
||||
TimeFunc
|
||||
TimePrecision time.Duration
|
||||
|
||||
*metric
|
||||
}
|
||||
|
||||
func NewBuilder() *Builder {
|
||||
b := &Builder{
|
||||
TimeFunc: time.Now,
|
||||
TimePrecision: 1 * time.Nanosecond,
|
||||
}
|
||||
b.Reset()
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *Builder) SetName(name string) {
|
||||
b.name = name
|
||||
}
|
||||
|
||||
func (b *Builder) AddTag(key string, value string) {
|
||||
b.metric.AddTag(key, value)
|
||||
}
|
||||
|
||||
func (b *Builder) AddField(key string, value interface{}) {
|
||||
b.metric.AddField(key, value)
|
||||
}
|
||||
|
||||
func (b *Builder) SetTime(tm time.Time) {
|
||||
b.tm = tm
|
||||
}
|
||||
|
||||
func (b *Builder) Reset() {
|
||||
b.metric = &metric{}
|
||||
}
|
||||
|
||||
func (b *Builder) Metric() (telegraf.Metric, error) {
|
||||
if b.tm.IsZero() {
|
||||
b.tm = b.TimeFunc().Truncate(b.TimePrecision)
|
||||
}
|
||||
|
||||
return b.metric, nil
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// escaper is for escaping:
|
||||
// - tag keys
|
||||
// - tag values
|
||||
// - field keys
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
escaper = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`)
|
||||
unEscaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`)
|
||||
|
||||
// nameEscaper is for escaping measurement names only.
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
nameEscaper = strings.NewReplacer(`,`, `\,`, ` `, `\ `)
|
||||
nameUnEscaper = strings.NewReplacer(`\,`, `,`, `\ `, ` `)
|
||||
|
||||
// stringFieldEscaper is for escaping string field values only.
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
stringFieldEscaper = strings.NewReplacer(
|
||||
`"`, `\"`,
|
||||
`\`, `\\`,
|
||||
)
|
||||
stringFieldUnEscaper = strings.NewReplacer(
|
||||
`\"`, `"`,
|
||||
`\\`, `\`,
|
||||
)
|
||||
)
|
||||
|
||||
func escape(s string, t string) string {
|
||||
switch t {
|
||||
case "fieldkey", "tagkey", "tagval":
|
||||
return escaper.Replace(s)
|
||||
case "name":
|
||||
return nameEscaper.Replace(s)
|
||||
case "fieldval":
|
||||
return stringFieldEscaper.Replace(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func unescape(s string, t string) string {
|
||||
switch t {
|
||||
case "fieldkey", "tagkey", "tagval":
|
||||
return unEscaper.Replace(s)
|
||||
case "name":
|
||||
return nameUnEscaper.Replace(s)
|
||||
case "fieldval":
|
||||
return stringFieldUnEscaper.Replace(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt.
|
||||
func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) {
|
||||
s := unsafeBytesToString(b)
|
||||
return strconv.ParseInt(s, base, bitSize)
|
||||
}
|
||||
|
||||
// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat.
|
||||
func parseFloatBytes(b []byte, bitSize int) (float64, error) {
|
||||
s := unsafeBytesToString(b)
|
||||
return strconv.ParseFloat(s, bitSize)
|
||||
}
|
||||
|
||||
// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool.
|
||||
func parseBoolBytes(b []byte) (bool, error) {
|
||||
return strconv.ParseBool(unsafeBytesToString(b))
|
||||
}
|
||||
|
||||
// unsafeBytesToString converts a []byte to a string without a heap allocation.
|
||||
//
|
||||
// It is unsafe, and is intended to prepare input to short-lived functions
|
||||
// that require strings.
|
||||
func unsafeBytesToString(in []byte) string {
|
||||
src := *(*reflect.SliceHeader)(unsafe.Pointer(&in))
|
||||
dst := reflect.StringHeader{
|
||||
Data: src.Data,
|
||||
Len: src.Len,
|
||||
}
|
||||
s := *(*string)(unsafe.Pointer(&dst))
|
||||
return s
|
||||
}
|
||||
@@ -1,103 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
)
|
||||
|
||||
func TestParseIntBytesEquivalenceFuzz(t *testing.T) {
|
||||
f := func(b []byte, base int, bitSize int) bool {
|
||||
exp, expErr := strconv.ParseInt(string(b), base, bitSize)
|
||||
got, gotErr := parseIntBytes(b, base, bitSize)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseIntBytesValid64bitBase10EquivalenceFuzz(t *testing.T) {
|
||||
buf := []byte{}
|
||||
f := func(n int64) bool {
|
||||
buf = strconv.AppendInt(buf[:0], n, 10)
|
||||
|
||||
exp, expErr := strconv.ParseInt(string(buf), 10, 64)
|
||||
got, gotErr := parseIntBytes(buf, 10, 64)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFloatBytesEquivalenceFuzz(t *testing.T) {
|
||||
f := func(b []byte, bitSize int) bool {
|
||||
exp, expErr := strconv.ParseFloat(string(b), bitSize)
|
||||
got, gotErr := parseFloatBytes(b, bitSize)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFloatBytesValid64bitEquivalenceFuzz(t *testing.T) {
|
||||
buf := []byte{}
|
||||
f := func(n float64) bool {
|
||||
buf = strconv.AppendFloat(buf[:0], n, 'f', -1, 64)
|
||||
|
||||
exp, expErr := strconv.ParseFloat(string(buf), 64)
|
||||
got, gotErr := parseFloatBytes(buf, 64)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseBoolBytesEquivalence(t *testing.T) {
|
||||
var buf []byte
|
||||
for _, s := range []string{"1", "t", "T", "TRUE", "true", "True", "0", "f", "F", "FALSE", "false", "False", "fail", "TrUe", "FAlSE", "numbers", ""} {
|
||||
buf = append(buf[:0], s...)
|
||||
|
||||
exp, expErr := strconv.ParseBool(s)
|
||||
got, gotErr := parseBoolBytes(buf)
|
||||
|
||||
if got != exp || !checkErrs(expErr, gotErr) {
|
||||
t.Errorf("Failed to parse boolean value %q correctly: wanted (%t, %v), got (%t, %v)", s, exp, expErr, got, gotErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkErrs(a, b error) bool {
|
||||
if (a == nil) != (b == nil) {
|
||||
return false
|
||||
}
|
||||
|
||||
return a == nil || a.Error() == b.Error()
|
||||
}
|
||||
785
metric/metric.go
785
metric/metric.go
@@ -1,623 +1,278 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
const MaxInt = int(^uint(0) >> 1)
|
||||
type metric struct {
|
||||
name string
|
||||
tags []*telegraf.Tag
|
||||
fields []*telegraf.Field
|
||||
tm time.Time
|
||||
|
||||
tp telegraf.ValueType
|
||||
aggregate bool
|
||||
}
|
||||
|
||||
func New(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
fields map[string]interface{},
|
||||
t time.Time,
|
||||
mType ...telegraf.ValueType,
|
||||
tm time.Time,
|
||||
tp ...telegraf.ValueType,
|
||||
) (telegraf.Metric, error) {
|
||||
if len(name) == 0 {
|
||||
return nil, fmt.Errorf("missing measurement name")
|
||||
}
|
||||
if len(fields) == 0 {
|
||||
return nil, fmt.Errorf("%s: must have one or more fields", name)
|
||||
}
|
||||
if strings.HasSuffix(name, `\`) {
|
||||
return nil, fmt.Errorf("%s: measurement name cannot end with a backslash", name)
|
||||
}
|
||||
|
||||
var thisType telegraf.ValueType
|
||||
if len(mType) > 0 {
|
||||
thisType = mType[0]
|
||||
var vtype telegraf.ValueType
|
||||
if len(tp) > 0 {
|
||||
vtype = tp[0]
|
||||
} else {
|
||||
thisType = telegraf.Untyped
|
||||
vtype = telegraf.Untyped
|
||||
}
|
||||
|
||||
m := &metric{
|
||||
name: []byte(escape(name, "name")),
|
||||
t: []byte(fmt.Sprint(t.UnixNano())),
|
||||
nsec: t.UnixNano(),
|
||||
mType: thisType,
|
||||
name: name,
|
||||
tags: nil,
|
||||
fields: nil,
|
||||
tm: tm,
|
||||
tp: vtype,
|
||||
}
|
||||
|
||||
// pre-allocate exact size of the tags slice
|
||||
taglen := 0
|
||||
for k, v := range tags {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
return nil, fmt.Errorf("%s: tag key cannot end with a backslash: %s", name, k)
|
||||
if len(tags) > 0 {
|
||||
m.tags = make([]*telegraf.Tag, 0, len(tags))
|
||||
for k, v := range tags {
|
||||
m.tags = append(m.tags,
|
||||
&telegraf.Tag{Key: k, Value: v})
|
||||
}
|
||||
if strings.HasSuffix(v, `\`) {
|
||||
return nil, fmt.Errorf("%s: tag value cannot end with a backslash: %s", name, v)
|
||||
}
|
||||
|
||||
if len(k) == 0 || len(v) == 0 {
|
||||
continue
|
||||
}
|
||||
taglen += 2 + len(escape(k, "tagkey")) + len(escape(v, "tagval"))
|
||||
}
|
||||
m.tags = make([]byte, taglen)
|
||||
|
||||
i := 0
|
||||
for k, v := range tags {
|
||||
if len(k) == 0 || len(v) == 0 {
|
||||
continue
|
||||
}
|
||||
m.tags[i] = ','
|
||||
i++
|
||||
i += copy(m.tags[i:], escape(k, "tagkey"))
|
||||
m.tags[i] = '='
|
||||
i++
|
||||
i += copy(m.tags[i:], escape(v, "tagval"))
|
||||
sort.Slice(m.tags, func(i, j int) bool { return m.tags[i].Key < m.tags[j].Key })
|
||||
}
|
||||
|
||||
// pre-allocate capacity of the fields slice
|
||||
fieldlen := 0
|
||||
for k, _ := range fields {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
return nil, fmt.Errorf("%s: field key cannot end with a backslash: %s", name, k)
|
||||
}
|
||||
|
||||
// 10 bytes is completely arbitrary, but will at least prevent some
|
||||
// amount of allocations. There's a small possibility this will create
|
||||
// slightly more allocations for a metric that has many short fields.
|
||||
fieldlen += len(k) + 10
|
||||
}
|
||||
m.fields = make([]byte, 0, fieldlen)
|
||||
|
||||
i = 0
|
||||
m.fields = make([]*telegraf.Field, 0, len(fields))
|
||||
for k, v := range fields {
|
||||
if i != 0 {
|
||||
m.fields = append(m.fields, ',')
|
||||
v := convertField(v)
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
m.fields = appendField(m.fields, k, v)
|
||||
i++
|
||||
m.AddField(k, v)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// indexUnescapedByte finds the index of the first byte equal to b in buf that
|
||||
// is not escaped. Does not allow the escape char to be escaped. Returns -1 if
|
||||
// not found.
|
||||
func indexUnescapedByte(buf []byte, b byte) int {
|
||||
var keyi int
|
||||
for {
|
||||
i := bytes.IndexByte(buf[keyi:], b)
|
||||
if i == -1 {
|
||||
return -1
|
||||
} else if i == 0 {
|
||||
break
|
||||
}
|
||||
keyi += i
|
||||
if buf[keyi-1] != '\\' {
|
||||
break
|
||||
} else {
|
||||
keyi++
|
||||
}
|
||||
}
|
||||
return keyi
|
||||
}
|
||||
|
||||
// indexUnescapedByteBackslashEscaping finds the index of the first byte equal
|
||||
// to b in buf that is not escaped. Allows for the escape char `\` to be
|
||||
// escaped. Returns -1 if not found.
|
||||
func indexUnescapedByteBackslashEscaping(buf []byte, b byte) int {
|
||||
var keyi int
|
||||
for {
|
||||
i := bytes.IndexByte(buf[keyi:], b)
|
||||
if i == -1 {
|
||||
return -1
|
||||
} else if i == 0 {
|
||||
break
|
||||
}
|
||||
keyi += i
|
||||
if countBackslashes(buf, keyi-1)%2 == 0 {
|
||||
break
|
||||
} else {
|
||||
keyi++
|
||||
}
|
||||
}
|
||||
return keyi
|
||||
}
|
||||
|
||||
// countBackslashes counts the number of preceding backslashes starting at
|
||||
// the 'start' index.
|
||||
func countBackslashes(buf []byte, index int) int {
|
||||
var count int
|
||||
for {
|
||||
if index < 0 {
|
||||
return count
|
||||
}
|
||||
if buf[index] == '\\' {
|
||||
count++
|
||||
index--
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
type metric struct {
|
||||
name []byte
|
||||
tags []byte
|
||||
fields []byte
|
||||
t []byte
|
||||
|
||||
mType telegraf.ValueType
|
||||
aggregate bool
|
||||
|
||||
// cached values for reuse in "get" functions
|
||||
hashID uint64
|
||||
nsec int64
|
||||
}
|
||||
|
||||
func (m *metric) String() string {
|
||||
return string(m.name) + string(m.tags) + " " + string(m.fields) + " " + string(m.t) + "\n"
|
||||
return fmt.Sprintf("%s %v %v %d", m.name, m.Tags(), m.Fields(), m.tm.UnixNano())
|
||||
}
|
||||
|
||||
func (m *metric) Name() string {
|
||||
return m.name
|
||||
}
|
||||
|
||||
func (m *metric) Tags() map[string]string {
|
||||
tags := make(map[string]string, len(m.tags))
|
||||
for _, tag := range m.tags {
|
||||
tags[tag.Key] = tag.Value
|
||||
}
|
||||
return tags
|
||||
}
|
||||
|
||||
func (m *metric) TagList() []*telegraf.Tag {
|
||||
return m.tags
|
||||
}
|
||||
|
||||
func (m *metric) Fields() map[string]interface{} {
|
||||
fields := make(map[string]interface{}, len(m.fields))
|
||||
for _, field := range m.fields {
|
||||
fields[field.Key] = field.Value
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
func (m *metric) FieldList() []*telegraf.Field {
|
||||
return m.fields
|
||||
}
|
||||
|
||||
func (m *metric) Time() time.Time {
|
||||
return m.tm
|
||||
}
|
||||
|
||||
func (m *metric) Type() telegraf.ValueType {
|
||||
return m.tp
|
||||
}
|
||||
|
||||
func (m *metric) SetName(name string) {
|
||||
m.name = name
|
||||
}
|
||||
|
||||
func (m *metric) AddPrefix(prefix string) {
|
||||
m.name = prefix + m.name
|
||||
}
|
||||
|
||||
func (m *metric) AddSuffix(suffix string) {
|
||||
m.name = m.name + suffix
|
||||
}
|
||||
|
||||
func (m *metric) AddTag(key, value string) {
|
||||
for i, tag := range m.tags {
|
||||
if key > tag.Key {
|
||||
continue
|
||||
}
|
||||
|
||||
if key == tag.Key {
|
||||
tag.Value = value
|
||||
}
|
||||
|
||||
m.tags = append(m.tags, nil)
|
||||
copy(m.tags[i+1:], m.tags[i:])
|
||||
m.tags[i] = &telegraf.Tag{Key: key, Value: value}
|
||||
return
|
||||
}
|
||||
|
||||
m.tags = append(m.tags, &telegraf.Tag{Key: key, Value: value})
|
||||
}
|
||||
|
||||
func (m *metric) HasTag(key string) bool {
|
||||
for _, tag := range m.tags {
|
||||
if tag.Key == key {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *metric) GetTag(key string) (string, bool) {
|
||||
for _, tag := range m.tags {
|
||||
if tag.Key == key {
|
||||
return tag.Value, true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (m *metric) RemoveTag(key string) {
|
||||
for i, tag := range m.tags {
|
||||
if tag.Key == key {
|
||||
copy(m.tags[i:], m.tags[i+1:])
|
||||
m.tags[len(m.tags)-1] = nil
|
||||
m.tags = m.tags[:len(m.tags)-1]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *metric) AddField(key string, value interface{}) {
|
||||
for i, field := range m.fields {
|
||||
if key == field.Key {
|
||||
m.fields[i] = &telegraf.Field{Key: key, Value: convertField(value)}
|
||||
}
|
||||
}
|
||||
m.fields = append(m.fields, &telegraf.Field{Key: key, Value: convertField(value)})
|
||||
}
|
||||
|
||||
func (m *metric) HasField(key string) bool {
|
||||
for _, field := range m.fields {
|
||||
if field.Key == key {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *metric) GetField(key string) (interface{}, bool) {
|
||||
for _, field := range m.fields {
|
||||
if field.Key == key {
|
||||
return field.Value, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (m *metric) RemoveField(key string) {
|
||||
for i, field := range m.fields {
|
||||
if field.Key == key {
|
||||
copy(m.fields[i:], m.fields[i+1:])
|
||||
m.fields[len(m.fields)-1] = nil
|
||||
m.fields = m.fields[:len(m.fields)-1]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *metric) Copy() telegraf.Metric {
|
||||
m2 := &metric{
|
||||
name: m.name,
|
||||
tags: make([]*telegraf.Tag, len(m.tags)),
|
||||
fields: make([]*telegraf.Field, len(m.fields)),
|
||||
tm: m.tm,
|
||||
tp: m.tp,
|
||||
aggregate: m.aggregate,
|
||||
}
|
||||
|
||||
for i, tag := range m.tags {
|
||||
m2.tags[i] = tag
|
||||
}
|
||||
|
||||
for i, field := range m.fields {
|
||||
m2.fields[i] = field
|
||||
}
|
||||
return m2
|
||||
}
|
||||
|
||||
func (m *metric) SetAggregate(b bool) {
|
||||
m.aggregate = b
|
||||
m.aggregate = true
|
||||
}
|
||||
|
||||
func (m *metric) IsAggregate() bool {
|
||||
return m.aggregate
|
||||
}
|
||||
|
||||
func (m *metric) Type() telegraf.ValueType {
|
||||
return m.mType
|
||||
}
|
||||
|
||||
func (m *metric) Len() int {
|
||||
// 3 is for 2 spaces surrounding the fields array + newline at the end.
|
||||
return len(m.name) + len(m.tags) + len(m.fields) + len(m.t) + 3
|
||||
}
|
||||
|
||||
func (m *metric) Serialize() []byte {
|
||||
tmp := make([]byte, m.Len())
|
||||
i := 0
|
||||
i += copy(tmp[i:], m.name)
|
||||
i += copy(tmp[i:], m.tags)
|
||||
tmp[i] = ' '
|
||||
i++
|
||||
i += copy(tmp[i:], m.fields)
|
||||
tmp[i] = ' '
|
||||
i++
|
||||
i += copy(tmp[i:], m.t)
|
||||
tmp[i] = '\n'
|
||||
return tmp
|
||||
}
|
||||
|
||||
func (m *metric) SerializeTo(dst []byte) int {
|
||||
i := 0
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.name)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.tags)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
dst[i] = ' '
|
||||
i++
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.fields)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
dst[i] = ' '
|
||||
i++
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.t)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
dst[i] = '\n'
|
||||
|
||||
return i + 1
|
||||
}
|
||||
|
||||
func (m *metric) Split(maxSize int) []telegraf.Metric {
|
||||
if m.Len() <= maxSize {
|
||||
return []telegraf.Metric{m}
|
||||
}
|
||||
var out []telegraf.Metric
|
||||
|
||||
// constant number of bytes for each metric (in addition to field bytes)
|
||||
constant := len(m.name) + len(m.tags) + len(m.t) + 3
|
||||
// currently selected fields
|
||||
fields := make([]byte, 0, maxSize)
|
||||
|
||||
i := 0
|
||||
for {
|
||||
if i >= len(m.fields) {
|
||||
// hit the end of the field byte slice
|
||||
if len(fields) > 0 {
|
||||
out = append(out, copyWith(m.name, m.tags, fields, m.t))
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// find the end of the next field
|
||||
j := indexUnescapedByte(m.fields[i:], ',')
|
||||
if j == -1 {
|
||||
j = len(m.fields)
|
||||
} else {
|
||||
j += i
|
||||
}
|
||||
|
||||
// if true, then we need to create a metric _not_ including the currently
|
||||
// selected field
|
||||
if len(m.fields[i:j])+len(fields)+constant >= maxSize {
|
||||
// if false, then we'll create a metric including the currently
|
||||
// selected field anyways. This means that the given maxSize is too
|
||||
// small for a single field to fit.
|
||||
if len(fields) > 0 {
|
||||
out = append(out, copyWith(m.name, m.tags, fields, m.t))
|
||||
}
|
||||
|
||||
fields = make([]byte, 0, maxSize)
|
||||
}
|
||||
if len(fields) > 0 {
|
||||
fields = append(fields, ',')
|
||||
}
|
||||
fields = append(fields, m.fields[i:j]...)
|
||||
|
||||
i = j + 1
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (m *metric) Fields() map[string]interface{} {
|
||||
fieldMap := map[string]interface{}{}
|
||||
i := 0
|
||||
for {
|
||||
if i >= len(m.fields) {
|
||||
break
|
||||
}
|
||||
// end index of field key
|
||||
i1 := indexUnescapedByte(m.fields[i:], '=')
|
||||
if i1 == -1 {
|
||||
break
|
||||
}
|
||||
// start index of field value
|
||||
i2 := i1 + 1
|
||||
|
||||
// end index of field value
|
||||
var i3 int
|
||||
if m.fields[i:][i2] == '"' {
|
||||
i3 = indexUnescapedByteBackslashEscaping(m.fields[i:][i2+1:], '"')
|
||||
if i3 == -1 {
|
||||
i3 = len(m.fields[i:])
|
||||
}
|
||||
i3 += i2 + 2 // increment index to the comma
|
||||
} else {
|
||||
i3 = indexUnescapedByte(m.fields[i:], ',')
|
||||
if i3 == -1 {
|
||||
i3 = len(m.fields[i:])
|
||||
}
|
||||
}
|
||||
|
||||
switch m.fields[i:][i2] {
|
||||
case '"':
|
||||
// string field
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = unescape(string(m.fields[i:][i2+1:i3-1]), "fieldval")
|
||||
case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
// number field
|
||||
switch m.fields[i:][i3-1] {
|
||||
case 'i':
|
||||
// integer field
|
||||
n, err := parseIntBytes(m.fields[i:][i2:i3-1], 10, 64)
|
||||
if err == nil {
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = n
|
||||
} else {
|
||||
// TODO handle error or just ignore field silently?
|
||||
}
|
||||
default:
|
||||
// float field
|
||||
n, err := parseFloatBytes(m.fields[i:][i2:i3], 64)
|
||||
if err == nil {
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = n
|
||||
} else {
|
||||
// TODO handle error or just ignore field silently?
|
||||
}
|
||||
}
|
||||
case 'T', 't':
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = true
|
||||
case 'F', 'f':
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = false
|
||||
default:
|
||||
// TODO handle unsupported field type
|
||||
}
|
||||
|
||||
i += i3 + 1
|
||||
}
|
||||
|
||||
return fieldMap
|
||||
}
|
||||
|
||||
func (m *metric) Tags() map[string]string {
|
||||
tagMap := map[string]string{}
|
||||
if len(m.tags) == 0 {
|
||||
return tagMap
|
||||
}
|
||||
|
||||
i := 0
|
||||
for {
|
||||
// start index of tag key
|
||||
i0 := indexUnescapedByte(m.tags[i:], ',') + 1
|
||||
if i0 == 0 {
|
||||
// didn't find a tag start
|
||||
break
|
||||
}
|
||||
// end index of tag key
|
||||
i1 := indexUnescapedByte(m.tags[i:], '=')
|
||||
// start index of tag value
|
||||
i2 := i1 + 1
|
||||
// end index of tag value (starting from i2)
|
||||
i3 := indexUnescapedByte(m.tags[i+i2:], ',')
|
||||
if i3 == -1 {
|
||||
tagMap[unescape(string(m.tags[i:][i0:i1]), "tagkey")] = unescape(string(m.tags[i:][i2:]), "tagval")
|
||||
break
|
||||
}
|
||||
tagMap[unescape(string(m.tags[i:][i0:i1]), "tagkey")] = unescape(string(m.tags[i:][i2:i2+i3]), "tagval")
|
||||
// increment start index for the next tag
|
||||
i += i2 + i3
|
||||
}
|
||||
|
||||
return tagMap
|
||||
}
|
||||
|
||||
func (m *metric) Name() string {
|
||||
return unescape(string(m.name), "name")
|
||||
}
|
||||
|
||||
func (m *metric) Time() time.Time {
|
||||
// assume metric has been verified already and ignore error:
|
||||
if m.nsec == 0 {
|
||||
m.nsec, _ = parseIntBytes(m.t, 10, 64)
|
||||
}
|
||||
return time.Unix(0, m.nsec)
|
||||
}
|
||||
|
||||
func (m *metric) UnixNano() int64 {
|
||||
// assume metric has been verified already and ignore error:
|
||||
if m.nsec == 0 {
|
||||
m.nsec, _ = parseIntBytes(m.t, 10, 64)
|
||||
}
|
||||
return m.nsec
|
||||
}
|
||||
|
||||
func (m *metric) SetName(name string) {
|
||||
m.hashID = 0
|
||||
m.name = []byte(nameEscaper.Replace(name))
|
||||
}
|
||||
|
||||
func (m *metric) SetPrefix(prefix string) {
|
||||
m.hashID = 0
|
||||
m.name = append([]byte(nameEscaper.Replace(prefix)), m.name...)
|
||||
}
|
||||
|
||||
func (m *metric) SetSuffix(suffix string) {
|
||||
m.hashID = 0
|
||||
m.name = append(m.name, []byte(nameEscaper.Replace(suffix))...)
|
||||
}
|
||||
|
||||
func (m *metric) AddTag(key, value string) {
|
||||
m.RemoveTag(key)
|
||||
m.tags = append(m.tags, []byte(","+escape(key, "tagkey")+"="+escape(value, "tagval"))...)
|
||||
}
|
||||
|
||||
func (m *metric) HasTag(key string) bool {
|
||||
i := bytes.Index(m.tags, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *metric) RemoveTag(key string) {
|
||||
m.hashID = 0
|
||||
|
||||
i := bytes.Index(m.tags, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return
|
||||
}
|
||||
|
||||
tmp := m.tags[0 : i-1]
|
||||
j := indexUnescapedByte(m.tags[i:], ',')
|
||||
if j != -1 {
|
||||
tmp = append(tmp, m.tags[i+j:]...)
|
||||
}
|
||||
m.tags = tmp
|
||||
return
|
||||
}
|
||||
|
||||
func (m *metric) AddField(key string, value interface{}) {
|
||||
m.fields = append(m.fields, ',')
|
||||
m.fields = appendField(m.fields, key, value)
|
||||
}
|
||||
|
||||
func (m *metric) HasField(key string) bool {
|
||||
i := bytes.Index(m.fields, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *metric) RemoveField(key string) error {
|
||||
i := bytes.Index(m.fields, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var tmp []byte
|
||||
if i != 0 {
|
||||
tmp = m.fields[0 : i-1]
|
||||
}
|
||||
j := indexUnescapedByte(m.fields[i:], ',')
|
||||
if j != -1 {
|
||||
tmp = append(tmp, m.fields[i+j:]...)
|
||||
}
|
||||
|
||||
if len(tmp) == 0 {
|
||||
return fmt.Errorf("Metric cannot remove final field: %s", m.fields)
|
||||
}
|
||||
|
||||
m.fields = tmp
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *metric) Copy() telegraf.Metric {
|
||||
return copyWith(m.name, m.tags, m.fields, m.t)
|
||||
}
|
||||
|
||||
func copyWith(name, tags, fields, t []byte) telegraf.Metric {
|
||||
out := metric{
|
||||
name: make([]byte, len(name)),
|
||||
tags: make([]byte, len(tags)),
|
||||
fields: make([]byte, len(fields)),
|
||||
t: make([]byte, len(t)),
|
||||
}
|
||||
copy(out.name, name)
|
||||
copy(out.tags, tags)
|
||||
copy(out.fields, fields)
|
||||
copy(out.t, t)
|
||||
return &out
|
||||
}
|
||||
|
||||
func (m *metric) HashID() uint64 {
|
||||
if m.hashID == 0 {
|
||||
h := fnv.New64a()
|
||||
h.Write(m.name)
|
||||
|
||||
tags := m.Tags()
|
||||
tmp := make([]string, len(tags))
|
||||
i := 0
|
||||
for k, v := range tags {
|
||||
tmp[i] = k + v
|
||||
i++
|
||||
}
|
||||
sort.Strings(tmp)
|
||||
|
||||
for _, s := range tmp {
|
||||
h.Write([]byte(s))
|
||||
}
|
||||
|
||||
m.hashID = h.Sum64()
|
||||
h := fnv.New64a()
|
||||
h.Write([]byte(m.name))
|
||||
for _, tag := range m.tags {
|
||||
h.Write([]byte(tag.Key))
|
||||
h.Write([]byte(tag.Value))
|
||||
}
|
||||
return m.hashID
|
||||
return h.Sum64()
|
||||
}
|
||||
|
||||
func appendField(b []byte, k string, v interface{}) []byte {
|
||||
if v == nil {
|
||||
return b
|
||||
}
|
||||
b = append(b, []byte(escape(k, "tagkey")+"=")...)
|
||||
|
||||
// check popular types first
|
||||
// Convert field to a supported type or nil if unconvertible
|
||||
func convertField(v interface{}) interface{} {
|
||||
switch v := v.(type) {
|
||||
case float64:
|
||||
b = strconv.AppendFloat(b, v, 'f', -1, 64)
|
||||
return v
|
||||
case int64:
|
||||
b = strconv.AppendInt(b, v, 10)
|
||||
b = append(b, 'i')
|
||||
return v
|
||||
case string:
|
||||
b = append(b, '"')
|
||||
b = append(b, []byte(escape(v, "fieldval"))...)
|
||||
b = append(b, '"')
|
||||
return v
|
||||
case bool:
|
||||
b = strconv.AppendBool(b, v)
|
||||
case int32:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case int16:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case int8:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
return v
|
||||
case int:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint64:
|
||||
// Cap uints above the maximum int value
|
||||
var intv int64
|
||||
if v <= uint64(MaxInt) {
|
||||
intv = int64(v)
|
||||
} else {
|
||||
intv = int64(MaxInt)
|
||||
}
|
||||
b = strconv.AppendInt(b, intv, 10)
|
||||
b = append(b, 'i')
|
||||
case uint32:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint16:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint8:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
return int64(v)
|
||||
case uint:
|
||||
// Cap uints above the maximum int value
|
||||
var intv int64
|
||||
if v <= uint(MaxInt) {
|
||||
intv = int64(v)
|
||||
} else {
|
||||
intv = int64(MaxInt)
|
||||
}
|
||||
b = strconv.AppendInt(b, intv, 10)
|
||||
b = append(b, 'i')
|
||||
case float32:
|
||||
b = strconv.AppendFloat(b, float64(v), 'f', -1, 32)
|
||||
return uint64(v)
|
||||
case uint64:
|
||||
return uint64(v)
|
||||
case []byte:
|
||||
b = append(b, v...)
|
||||
return string(v)
|
||||
case int32:
|
||||
return int64(v)
|
||||
case int16:
|
||||
return int64(v)
|
||||
case int8:
|
||||
return int64(v)
|
||||
case uint32:
|
||||
return uint64(v)
|
||||
case uint16:
|
||||
return uint64(v)
|
||||
case uint8:
|
||||
return uint64(v)
|
||||
case float32:
|
||||
return float64(v)
|
||||
default:
|
||||
// Can't determine the type, so convert to string
|
||||
b = append(b, '"')
|
||||
b = append(b, []byte(escape(fmt.Sprintf("%v", v), "fieldval"))...)
|
||||
b = append(b, '"')
|
||||
return nil
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
@@ -1,148 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
// vars for making sure that the compiler doesnt optimize out the benchmarks:
|
||||
var (
|
||||
s string
|
||||
I interface{}
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
)
|
||||
|
||||
func BenchmarkNewMetric(b *testing.B) {
|
||||
var mt telegraf.Metric
|
||||
for n := 0; n < b.N; n++ {
|
||||
mt, _ = New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
}
|
||||
s = string(mt.String())
|
||||
}
|
||||
|
||||
func BenchmarkAddTag(b *testing.B) {
|
||||
var mt telegraf.Metric
|
||||
mt = &metric{
|
||||
name: []byte("cpu"),
|
||||
tags: []byte(",host=localhost"),
|
||||
fields: []byte("a=101"),
|
||||
t: []byte("1480614053000000000"),
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
mt.AddTag("foo", "bar")
|
||||
}
|
||||
s = string(mt.String())
|
||||
}
|
||||
|
||||
func BenchmarkSplit(b *testing.B) {
|
||||
var mt telegraf.Metric
|
||||
mt = &metric{
|
||||
name: []byte("cpu"),
|
||||
tags: []byte(",host=localhost"),
|
||||
fields: []byte("a=101,b=10i,c=10101,d=101010,e=42"),
|
||||
t: []byte("1480614053000000000"),
|
||||
}
|
||||
var metrics []telegraf.Metric
|
||||
for n := 0; n < b.N; n++ {
|
||||
metrics = mt.Split(60)
|
||||
}
|
||||
s = string(metrics[0].String())
|
||||
}
|
||||
|
||||
func BenchmarkTags(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
var mt, _ = New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
tags = mt.Tags()
|
||||
}
|
||||
s = fmt.Sprint(tags)
|
||||
}
|
||||
|
||||
func BenchmarkFields(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
var mt, _ = New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
fields = mt.Fields()
|
||||
}
|
||||
s = fmt.Sprint(fields)
|
||||
}
|
||||
|
||||
func BenchmarkString(b *testing.B) {
|
||||
mt, _ := New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var S string
|
||||
for n := 0; n < b.N; n++ {
|
||||
S = mt.String()
|
||||
}
|
||||
s = S
|
||||
}
|
||||
|
||||
func BenchmarkSerialize(b *testing.B) {
|
||||
mt, _ := New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var B []byte
|
||||
for n := 0; n < b.N; n++ {
|
||||
B = mt.Serialize()
|
||||
}
|
||||
s = string(B)
|
||||
}
|
||||
@@ -1,14 +1,10 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -25,102 +21,184 @@ func TestNewMetric(t *testing.T) {
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, telegraf.Untyped, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
require.Equal(t, "cpu", m.Name())
|
||||
require.Equal(t, tags, m.Tags())
|
||||
require.Equal(t, fields, m.Fields())
|
||||
require.Equal(t, 2, len(m.FieldList()))
|
||||
require.Equal(t, now, m.Time())
|
||||
}
|
||||
|
||||
func TestNewErrors(t *testing.T) {
|
||||
// creating a metric with an empty name produces an error:
|
||||
m, err := New(
|
||||
"",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, m)
|
||||
|
||||
// creating a metric with empty fields produces an error:
|
||||
m, err = New(
|
||||
"foobar",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, m)
|
||||
}
|
||||
|
||||
func TestNewMetric_Tags(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
func baseMetric() telegraf.Metric {
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
assert.True(t, m.HasTag("host"))
|
||||
assert.True(t, m.HasTag("datacenter"))
|
||||
func TestHasTag(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
m.AddTag("newtag", "foo")
|
||||
assert.True(t, m.HasTag("newtag"))
|
||||
require.False(t, m.HasTag("host"))
|
||||
m.AddTag("host", "localhost")
|
||||
require.True(t, m.HasTag("host"))
|
||||
m.RemoveTag("host")
|
||||
require.False(t, m.HasTag("host"))
|
||||
}
|
||||
|
||||
func TestAddTagOverwrites(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
m.AddTag("host", "localhost")
|
||||
m.AddTag("host", "example.org")
|
||||
|
||||
value, ok := m.GetTag("host")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "example.org", value)
|
||||
}
|
||||
|
||||
func TestRemoveTagNoEffectOnMissingTags(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
m.RemoveTag("foo")
|
||||
m.AddTag("a", "x")
|
||||
m.RemoveTag("foo")
|
||||
m.RemoveTag("bar")
|
||||
value, ok := m.GetTag("a")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "x", value)
|
||||
}
|
||||
|
||||
func TestGetTag(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
value, ok := m.GetTag("host")
|
||||
require.False(t, ok)
|
||||
|
||||
m.AddTag("host", "localhost")
|
||||
|
||||
value, ok = m.GetTag("host")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "localhost", value)
|
||||
|
||||
m.RemoveTag("host")
|
||||
assert.False(t, m.HasTag("host"))
|
||||
assert.True(t, m.HasTag("newtag"))
|
||||
assert.True(t, m.HasTag("datacenter"))
|
||||
|
||||
m.RemoveTag("datacenter")
|
||||
assert.False(t, m.HasTag("datacenter"))
|
||||
assert.True(t, m.HasTag("newtag"))
|
||||
assert.Equal(t, map[string]string{"newtag": "foo"}, m.Tags())
|
||||
|
||||
m.RemoveTag("newtag")
|
||||
assert.False(t, m.HasTag("newtag"))
|
||||
assert.Equal(t, map[string]string{}, m.Tags())
|
||||
|
||||
assert.Equal(t, "cpu value=1 "+fmt.Sprint(now.UnixNano())+"\n", m.String())
|
||||
value, ok = m.GetTag("host")
|
||||
require.False(t, ok)
|
||||
}
|
||||
|
||||
func TestSerialize(t *testing.T) {
|
||||
func TestHasField(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
require.False(t, m.HasField("x"))
|
||||
m.AddField("x", 42.0)
|
||||
require.True(t, m.HasField("x"))
|
||||
m.RemoveTag("x")
|
||||
require.False(t, m.HasTag("x"))
|
||||
}
|
||||
|
||||
func TestAddFieldOverwrites(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
m.AddField("value", 1.0)
|
||||
m.AddField("value", 42.0)
|
||||
|
||||
value, ok := m.GetField("value")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, 42.0, value)
|
||||
}
|
||||
|
||||
func TestAddFieldChangesType(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
m.AddField("value", 1.0)
|
||||
m.AddField("value", "xyzzy")
|
||||
|
||||
value, ok := m.GetField("value")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "xyzzy", value)
|
||||
}
|
||||
|
||||
func TestRemoveFieldNoEffectOnMissingFields(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
m.RemoveField("foo")
|
||||
m.AddField("a", "x")
|
||||
m.RemoveField("foo")
|
||||
m.RemoveField("bar")
|
||||
value, ok := m.GetField("a")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "x", value)
|
||||
}
|
||||
|
||||
func TestGetField(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
value, ok := m.GetField("foo")
|
||||
require.False(t, ok)
|
||||
|
||||
m.AddField("foo", "bar")
|
||||
|
||||
value, ok = m.GetField("foo")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "bar", value)
|
||||
|
||||
m.RemoveTag("foo")
|
||||
value, ok = m.GetTag("foo")
|
||||
require.False(t, ok)
|
||||
}
|
||||
|
||||
func TestTagList_Sorted(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
m.AddTag("b", "y")
|
||||
m.AddTag("c", "z")
|
||||
m.AddTag("a", "x")
|
||||
|
||||
taglist := m.TagList()
|
||||
require.Equal(t, "a", taglist[0].Key)
|
||||
require.Equal(t, "b", taglist[1].Key)
|
||||
require.Equal(t, "c", taglist[2].Key)
|
||||
}
|
||||
|
||||
func TestEquals(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
m1, err := New("cpu",
|
||||
map[string]string{
|
||||
"host": "localhost",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 42.0,
|
||||
},
|
||||
now,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t,
|
||||
[]byte("cpu,datacenter=us-east-1 value=1 "+fmt.Sprint(now.UnixNano())+"\n"),
|
||||
m.Serialize())
|
||||
m2, err := New("cpu",
|
||||
map[string]string{
|
||||
"host": "localhost",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 42.0,
|
||||
},
|
||||
now,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
m.RemoveTag("datacenter")
|
||||
assert.Equal(t,
|
||||
[]byte("cpu value=1 "+fmt.Sprint(now.UnixNano())+"\n"),
|
||||
m.Serialize())
|
||||
lhs := m1.(*metric)
|
||||
require.Equal(t, lhs, m2)
|
||||
|
||||
m3 := m2.Copy()
|
||||
require.Equal(t, lhs, m3)
|
||||
m3.AddTag("a", "x")
|
||||
require.NotEqual(t, lhs, m3)
|
||||
}
|
||||
|
||||
func TestHashID(t *testing.T) {
|
||||
@@ -171,567 +249,62 @@ func TestHashID_Consistency(t *testing.T) {
|
||||
)
|
||||
hash := m.HashID()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
m2, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Equal(t, hash, m2.HashID())
|
||||
}
|
||||
m2, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Equal(t, hash, m2.HashID())
|
||||
|
||||
m3 := m.Copy()
|
||||
assert.Equal(t, m2.HashID(), m3.HashID())
|
||||
}
|
||||
|
||||
func TestNewMetric_NameModifiers(t *testing.T) {
|
||||
func TestSetName(t *testing.T) {
|
||||
m := baseMetric()
|
||||
m.SetName("foo")
|
||||
require.Equal(t, "foo", m.Name())
|
||||
}
|
||||
|
||||
func TestAddPrefix(t *testing.T) {
|
||||
m := baseMetric()
|
||||
m.AddPrefix("foo_")
|
||||
require.Equal(t, "foo_cpu", m.Name())
|
||||
m.AddPrefix("foo_")
|
||||
require.Equal(t, "foo_foo_cpu", m.Name())
|
||||
}
|
||||
|
||||
func TestAddSuffix(t *testing.T) {
|
||||
m := baseMetric()
|
||||
m.AddSuffix("_foo")
|
||||
require.Equal(t, "cpu_foo", m.Name())
|
||||
m.AddSuffix("_foo")
|
||||
require.Equal(t, "cpu_foo_foo", m.Name())
|
||||
}
|
||||
|
||||
func TestValueType(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
hash := m.HashID()
|
||||
suffix := fmt.Sprintf(" value=1 %d\n", now.UnixNano())
|
||||
assert.Equal(t, "cpu"+suffix, m.String())
|
||||
|
||||
m.SetPrefix("pre_")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
hash = m.HashID()
|
||||
assert.Equal(t, "pre_cpu"+suffix, m.String())
|
||||
|
||||
m.SetSuffix("_post")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
hash = m.HashID()
|
||||
assert.Equal(t, "pre_cpu_post"+suffix, m.String())
|
||||
|
||||
m.SetName("mem")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
assert.Equal(t, "mem"+suffix, m.String())
|
||||
}
|
||||
|
||||
func TestNewMetric_FieldModifiers(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.True(t, m.HasField("value"))
|
||||
assert.False(t, m.HasField("foo"))
|
||||
|
||||
m.AddField("newfield", "foo")
|
||||
assert.True(t, m.HasField("newfield"))
|
||||
|
||||
assert.NoError(t, m.RemoveField("newfield"))
|
||||
assert.False(t, m.HasField("newfield"))
|
||||
|
||||
// don't allow user to remove all fields:
|
||||
assert.Error(t, m.RemoveField("value"))
|
||||
|
||||
m.AddField("value2", int64(101))
|
||||
assert.NoError(t, m.RemoveField("value"))
|
||||
assert.False(t, m.HasField("value"))
|
||||
}
|
||||
|
||||
func TestNewMetric_Fields(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
"int": int64(1),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
"quote_string": `x"y`,
|
||||
"backslash_quote_string": `x\"y`,
|
||||
"backslash": `x\y`,
|
||||
"ends_with_backslash": `x\`,
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
}
|
||||
|
||||
func TestNewMetric_Time(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
"int": int64(1),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
m = m.Copy()
|
||||
m2 := m.Copy()
|
||||
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m2.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewMetric_Copy(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
m2 := m.Copy()
|
||||
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu float=1 %d\n", now.UnixNano()),
|
||||
m.String())
|
||||
m.AddTag("host", "localhost")
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu,host=localhost float=1 %d\n", now.UnixNano()),
|
||||
m.String())
|
||||
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu float=1 %d\n", now.UnixNano()),
|
||||
m2.String())
|
||||
}
|
||||
|
||||
func TestNewMetric_AllTypes(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"float64": float64(1),
|
||||
"float32": float32(1),
|
||||
"int64": int64(1),
|
||||
"int32": int32(1),
|
||||
"int16": int16(1),
|
||||
"int8": int8(1),
|
||||
"int": int(1),
|
||||
"uint64": uint64(1),
|
||||
"uint32": uint32(1),
|
||||
"uint16": uint16(1),
|
||||
"uint8": uint8(1),
|
||||
"uint": uint(1),
|
||||
"bytes": []byte("foo"),
|
||||
"nil": nil,
|
||||
"maxuint64": uint64(MaxInt) + 10,
|
||||
"maxuint": uint(MaxInt) + 10,
|
||||
"unsupported": []int{1, 2},
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, m.String(), "float64=1")
|
||||
assert.Contains(t, m.String(), "float32=1")
|
||||
assert.Contains(t, m.String(), "int64=1i")
|
||||
assert.Contains(t, m.String(), "int32=1i")
|
||||
assert.Contains(t, m.String(), "int16=1i")
|
||||
assert.Contains(t, m.String(), "int8=1i")
|
||||
assert.Contains(t, m.String(), "int=1i")
|
||||
assert.Contains(t, m.String(), "uint64=1i")
|
||||
assert.Contains(t, m.String(), "uint32=1i")
|
||||
assert.Contains(t, m.String(), "uint16=1i")
|
||||
assert.Contains(t, m.String(), "uint8=1i")
|
||||
assert.Contains(t, m.String(), "uint=1i")
|
||||
assert.NotContains(t, m.String(), "nil")
|
||||
assert.Contains(t, m.String(), fmt.Sprintf("maxuint64=%di", MaxInt))
|
||||
assert.Contains(t, m.String(), fmt.Sprintf("maxuint=%di", MaxInt))
|
||||
}
|
||||
|
||||
func TestIndexUnescapedByte(t *testing.T) {
|
||||
tests := []struct {
|
||||
in []byte
|
||||
b byte
|
||||
expected int
|
||||
}{
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
b: 'b',
|
||||
expected: 3,
|
||||
},
|
||||
{
|
||||
in: []byte(`foo\bar`),
|
||||
b: 'b',
|
||||
expected: -1,
|
||||
},
|
||||
{
|
||||
in: []byte(`foo\\bar`),
|
||||
b: 'b',
|
||||
expected: -1,
|
||||
},
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
b: 'f',
|
||||
expected: 0,
|
||||
},
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
b: 'r',
|
||||
expected: 5,
|
||||
},
|
||||
{
|
||||
in: []byte(`\foobar`),
|
||||
b: 'f',
|
||||
expected: -1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := indexUnescapedByte(test.in, test.b)
|
||||
assert.Equal(t, test.expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewGaugeMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
"value": float64(42),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now, telegraf.Gauge)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, telegraf.Gauge, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewCounterMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now, telegraf.Counter)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, telegraf.Counter, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
// test splitting metric into various max lengths
|
||||
func TestSplitMetric(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
split80 := m.Split(80)
|
||||
assert.Len(t, split80, 2)
|
||||
|
||||
split70 := m.Split(70)
|
||||
assert.Len(t, split70, 3)
|
||||
|
||||
split60 := m.Split(60)
|
||||
assert.Len(t, split60, 5)
|
||||
}
|
||||
|
||||
// test splitting metric into various max lengths
|
||||
// use a simple regex check to verify that the split metrics are valid
|
||||
func TestSplitMetric_RegexVerify(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"foo": float64(98934259085),
|
||||
"bar": float64(19385292),
|
||||
"number": float64(19385292),
|
||||
"another": float64(19385292),
|
||||
"n": float64(19385292),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// verification regex
|
||||
re := regexp.MustCompile(`cpu,host=localhost \w+=\d+(,\w+=\d+)* 1480940990034083306`)
|
||||
|
||||
split90 := m.Split(90)
|
||||
assert.Len(t, split90, 2)
|
||||
for _, splitM := range split90 {
|
||||
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
|
||||
}
|
||||
|
||||
split70 := m.Split(70)
|
||||
assert.Len(t, split70, 3)
|
||||
for _, splitM := range split70 {
|
||||
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
|
||||
}
|
||||
|
||||
split20 := m.Split(20)
|
||||
assert.Len(t, split20, 5)
|
||||
for _, splitM := range split20 {
|
||||
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
|
||||
}
|
||||
}
|
||||
|
||||
// test splitting metric even when given length is shorter than
|
||||
// shortest possible length
|
||||
// Split should split metric as short as possible, ie, 1 field per metric
|
||||
func TestSplitMetric_TooShort(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
split := m.Split(10)
|
||||
assert.Len(t, split, 5)
|
||||
strings := make([]string, 5)
|
||||
for i, splitM := range split {
|
||||
strings[i] = splitM.String()
|
||||
}
|
||||
|
||||
assert.Contains(t, strings, "cpu,host=localhost float=100001 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost int=100001i 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost bool=true 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost false=false 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost string=\"test\" 1480940990034083306\n")
|
||||
}
|
||||
|
||||
func TestSplitMetric_NoOp(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
split := m.Split(1000)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, m, split[0])
|
||||
}
|
||||
|
||||
func TestSplitMetric_OneField(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", m.String())
|
||||
|
||||
split := m.Split(1000)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
|
||||
split = m.Split(1)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
|
||||
split = m.Split(40)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
}
|
||||
|
||||
func TestSplitMetric_ExactSize(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
actual := m.Split(m.Len())
|
||||
// check that no copy was made
|
||||
require.Equal(t, &m, &actual[0])
|
||||
}
|
||||
|
||||
func TestSplitMetric_NoRoomForNewline(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
actual := m.Split(m.Len() - 1)
|
||||
require.Equal(t, 2, len(actual))
|
||||
}
|
||||
|
||||
func TestNewMetricAggregate(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.False(t, m.IsAggregate())
|
||||
m.SetAggregate(true)
|
||||
assert.True(t, m.IsAggregate())
|
||||
}
|
||||
|
||||
func TestNewMetricString(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d\n",
|
||||
now.UnixNano())
|
||||
assert.Equal(t, lineProto, m.String())
|
||||
}
|
||||
|
||||
func TestNewMetricFailNaN(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": math.NaN(),
|
||||
}
|
||||
|
||||
_, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestEmptyTagValueOrKey(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"emptytag": "",
|
||||
"": "valuewithoutkey",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
|
||||
assert.True(t, m.HasTag("host"))
|
||||
assert.False(t, m.HasTag("emptytag"))
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu,host=localhost usage_idle=99 %d\n", now.UnixNano()),
|
||||
m.String())
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
func TestNewMetric_TrailingSlash(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
}{
|
||||
{
|
||||
name: `cpu\`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
fields: map[string]interface{}{
|
||||
`value\`: "x",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
tags: map[string]string{
|
||||
`host\`: "localhost",
|
||||
},
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
tags: map[string]string{
|
||||
"host": `localhost\`,
|
||||
},
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
_, err := New(tc.name, tc.tags, tc.fields, now)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
func TestCopyAggreate(t *testing.T) {
|
||||
m1 := baseMetric()
|
||||
m1.SetAggregate(true)
|
||||
m2 := m1.Copy()
|
||||
assert.True(t, m2.IsAggregate())
|
||||
}
|
||||
|
||||
680
metric/parse.go
680
metric/parse.go
@@ -1,680 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrInvalidNumber = errors.New("invalid number")
|
||||
)
|
||||
|
||||
const (
|
||||
// the number of characters for the largest possible int64 (9223372036854775807)
|
||||
maxInt64Digits = 19
|
||||
|
||||
// the number of characters for the smallest possible int64 (-9223372036854775808)
|
||||
minInt64Digits = 20
|
||||
|
||||
// the number of characters required for the largest float64 before a range check
|
||||
// would occur during parsing
|
||||
maxFloat64Digits = 25
|
||||
|
||||
// the number of characters required for smallest float64 before a range check occur
|
||||
// would occur during parsing
|
||||
minFloat64Digits = 27
|
||||
|
||||
MaxKeyLength = 65535
|
||||
)
|
||||
|
||||
// The following constants allow us to specify which state to move to
|
||||
// next, when scanning sections of a Point.
|
||||
const (
|
||||
tagKeyState = iota
|
||||
tagValueState
|
||||
fieldsState
|
||||
)
|
||||
|
||||
func Parse(buf []byte) ([]telegraf.Metric, error) {
|
||||
return ParseWithDefaultTimePrecision(buf, time.Now(), "")
|
||||
}
|
||||
|
||||
func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) {
|
||||
return ParseWithDefaultTimePrecision(buf, t, "")
|
||||
}
|
||||
|
||||
func ParseWithDefaultTimePrecision(
|
||||
buf []byte,
|
||||
t time.Time,
|
||||
precision string,
|
||||
) ([]telegraf.Metric, error) {
|
||||
if len(buf) == 0 {
|
||||
return []telegraf.Metric{}, nil
|
||||
}
|
||||
if len(buf) <= 6 {
|
||||
return []telegraf.Metric{}, makeError("buffer too short", buf, 0)
|
||||
}
|
||||
metrics := make([]telegraf.Metric, 0, bytes.Count(buf, []byte("\n"))+1)
|
||||
var errStr string
|
||||
i := 0
|
||||
for {
|
||||
j := bytes.IndexByte(buf[i:], '\n')
|
||||
if j == -1 {
|
||||
break
|
||||
}
|
||||
if len(buf[i:i+j]) < 2 {
|
||||
i += j + 1 // increment i past the previous newline
|
||||
continue
|
||||
}
|
||||
|
||||
m, err := parseMetric(buf[i:i+j], t, precision)
|
||||
if err != nil {
|
||||
i += j + 1 // increment i past the previous newline
|
||||
errStr += " " + err.Error()
|
||||
continue
|
||||
}
|
||||
i += j + 1 // increment i past the previous newline
|
||||
|
||||
metrics = append(metrics, m)
|
||||
}
|
||||
|
||||
if len(errStr) > 0 {
|
||||
return metrics, fmt.Errorf(errStr)
|
||||
}
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
func parseMetric(buf []byte,
|
||||
defaultTime time.Time,
|
||||
precision string,
|
||||
) (telegraf.Metric, error) {
|
||||
var dTime string
|
||||
// scan the first block which is measurement[,tag1=value1,tag2=value=2...]
|
||||
pos, key, err := scanKey(buf, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// measurement name is required
|
||||
if len(key) == 0 {
|
||||
return nil, fmt.Errorf("missing measurement")
|
||||
}
|
||||
|
||||
if len(key) > MaxKeyLength {
|
||||
return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength)
|
||||
}
|
||||
|
||||
// scan the second block is which is field1=value1[,field2=value2,...]
|
||||
pos, fields, err := scanFields(buf, pos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// at least one field is required
|
||||
if len(fields) == 0 {
|
||||
return nil, fmt.Errorf("missing fields")
|
||||
}
|
||||
|
||||
// scan the last block which is an optional integer timestamp
|
||||
pos, ts, err := scanTime(buf, pos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// apply precision multiplier
|
||||
var nsec int64
|
||||
multiplier := getPrecisionMultiplier(precision)
|
||||
if len(ts) > 0 && multiplier > 1 {
|
||||
tsint, err := parseIntBytes(ts, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nsec := multiplier * tsint
|
||||
ts = []byte(strconv.FormatInt(nsec, 10))
|
||||
}
|
||||
|
||||
m := &metric{
|
||||
fields: fields,
|
||||
t: ts,
|
||||
nsec: nsec,
|
||||
}
|
||||
|
||||
// parse out the measurement name
|
||||
// namei is the index at which the "name" ends
|
||||
namei := indexUnescapedByte(key, ',')
|
||||
if namei < 1 {
|
||||
// no tags
|
||||
m.name = key
|
||||
} else {
|
||||
m.name = key[0:namei]
|
||||
m.tags = key[namei:]
|
||||
}
|
||||
|
||||
if len(m.t) == 0 {
|
||||
if len(dTime) == 0 {
|
||||
dTime = fmt.Sprint(defaultTime.UnixNano())
|
||||
}
|
||||
// use default time
|
||||
m.t = []byte(dTime)
|
||||
}
|
||||
|
||||
// here we copy on return because this allows us to later call
|
||||
// AddTag, AddField, RemoveTag, RemoveField, etc. without worrying about
|
||||
// modifying 'tag' bytes having an affect on 'field' bytes, for example.
|
||||
return m.Copy(), nil
|
||||
}
|
||||
|
||||
// scanKey scans buf starting at i for the measurement and tag portion of the point.
|
||||
// It returns the ending position and the byte slice of key within buf. If there
|
||||
// are tags, they will be sorted if they are not already.
|
||||
func scanKey(buf []byte, i int) (int, []byte, error) {
|
||||
start := skipWhitespace(buf, i)
|
||||
i = start
|
||||
|
||||
// First scan the Point's measurement.
|
||||
state, i, err := scanMeasurement(buf, i)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
|
||||
// Optionally scan tags if needed.
|
||||
if state == tagKeyState {
|
||||
i, err = scanTags(buf, i)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
}
|
||||
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
// scanMeasurement examines the measurement part of a Point, returning
|
||||
// the next state to move to, and the current location in the buffer.
|
||||
func scanMeasurement(buf []byte, i int) (int, int, error) {
|
||||
// Check first byte of measurement, anything except a comma is fine.
|
||||
// It can't be a space, since whitespace is stripped prior to this
|
||||
// function call.
|
||||
if i >= len(buf) || buf[i] == ',' {
|
||||
return -1, i, makeError("missing measurement", buf, i)
|
||||
}
|
||||
|
||||
for {
|
||||
i++
|
||||
if i >= len(buf) {
|
||||
// cpu
|
||||
return -1, i, makeError("missing fields", buf, i)
|
||||
}
|
||||
|
||||
if buf[i-1] == '\\' {
|
||||
// Skip character (it's escaped).
|
||||
continue
|
||||
}
|
||||
|
||||
// Unescaped comma; move onto scanning the tags.
|
||||
if buf[i] == ',' {
|
||||
return tagKeyState, i + 1, nil
|
||||
}
|
||||
|
||||
// Unescaped space; move onto scanning the fields.
|
||||
if buf[i] == ' ' {
|
||||
// cpu value=1.0
|
||||
return fieldsState, i, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanTags examines all the tags in a Point, keeping track of and
|
||||
// returning the updated indices slice, number of commas and location
|
||||
// in buf where to start examining the Point fields.
|
||||
func scanTags(buf []byte, i int) (int, error) {
|
||||
var (
|
||||
err error
|
||||
state = tagKeyState
|
||||
)
|
||||
|
||||
for {
|
||||
switch state {
|
||||
case tagKeyState:
|
||||
i, err = scanTagsKey(buf, i)
|
||||
state = tagValueState // tag value always follows a tag key
|
||||
case tagValueState:
|
||||
state, i, err = scanTagsValue(buf, i)
|
||||
case fieldsState:
|
||||
return i, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanTagsKey scans each character in a tag key.
|
||||
func scanTagsKey(buf []byte, i int) (int, error) {
|
||||
// First character of the key.
|
||||
if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' {
|
||||
// cpu,{'', ' ', ',', '='}
|
||||
return i, makeError("missing tag key", buf, i)
|
||||
}
|
||||
|
||||
// Examine each character in the tag key until we hit an unescaped
|
||||
// equals (the tag value), or we hit an error (i.e., unescaped
|
||||
// space or comma).
|
||||
for {
|
||||
i++
|
||||
|
||||
// Either we reached the end of the buffer or we hit an
|
||||
// unescaped comma or space.
|
||||
if i >= len(buf) ||
|
||||
((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') {
|
||||
// cpu,tag{'', ' ', ','}
|
||||
return i, makeError("missing tag value", buf, i)
|
||||
}
|
||||
|
||||
if buf[i] == '=' && buf[i-1] != '\\' {
|
||||
// cpu,tag=
|
||||
return i + 1, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanTagsValue scans each character in a tag value.
|
||||
func scanTagsValue(buf []byte, i int) (int, int, error) {
|
||||
// Tag value cannot be empty.
|
||||
if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' {
|
||||
// cpu,tag={',', ' '}
|
||||
return -1, i, makeError("missing tag value", buf, i)
|
||||
}
|
||||
|
||||
// Examine each character in the tag value until we hit an unescaped
|
||||
// comma (move onto next tag key), an unescaped space (move onto
|
||||
// fields), or we error out.
|
||||
for {
|
||||
i++
|
||||
if i >= len(buf) {
|
||||
// cpu,tag=value
|
||||
return -1, i, makeError("missing fields", buf, i)
|
||||
}
|
||||
|
||||
// An unescaped equals sign is an invalid tag value.
|
||||
if buf[i] == '=' && buf[i-1] != '\\' {
|
||||
// cpu,tag={'=', 'fo=o'}
|
||||
return -1, i, makeError("invalid tag format", buf, i)
|
||||
}
|
||||
|
||||
if buf[i] == ',' && buf[i-1] != '\\' {
|
||||
// cpu,tag=foo,
|
||||
return tagKeyState, i + 1, nil
|
||||
}
|
||||
|
||||
// cpu,tag=foo value=1.0
|
||||
// cpu, tag=foo\= value=1.0
|
||||
if buf[i] == ' ' && buf[i-1] != '\\' {
|
||||
return fieldsState, i, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanFields scans buf, starting at i for the fields section of a point. It returns
|
||||
// the ending position and the byte slice of the fields within buf
|
||||
func scanFields(buf []byte, i int) (int, []byte, error) {
|
||||
start := skipWhitespace(buf, i)
|
||||
i = start
|
||||
|
||||
// track how many '"" we've seen since last '='
|
||||
quotes := 0
|
||||
|
||||
// tracks how many '=' we've seen
|
||||
equals := 0
|
||||
|
||||
// tracks how many commas we've seen
|
||||
commas := 0
|
||||
|
||||
for {
|
||||
// reached the end of buf?
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
// escaped characters?
|
||||
if buf[i] == '\\' && i+1 < len(buf) {
|
||||
i += 2
|
||||
continue
|
||||
}
|
||||
|
||||
// If the value is quoted, scan until we get to the end quote
|
||||
// Only quote values in the field value since quotes are not significant
|
||||
// in the field key
|
||||
if buf[i] == '"' && equals > commas {
|
||||
i++
|
||||
quotes++
|
||||
if quotes > 2 {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// If we see an =, ensure that there is at least on char before and after it
|
||||
if buf[i] == '=' && quotes != 1 {
|
||||
quotes = 0
|
||||
equals++
|
||||
|
||||
// check for "... =123" but allow "a\ =123"
|
||||
if buf[i-1] == ' ' && buf[i-2] != '\\' {
|
||||
return i, buf[start:i], makeError("missing field key", buf, i)
|
||||
}
|
||||
|
||||
// check for "...a=123,=456" but allow "a=123,a\,=456"
|
||||
if buf[i-1] == ',' && buf[i-2] != '\\' {
|
||||
return i, buf[start:i], makeError("missing field key", buf, i)
|
||||
}
|
||||
|
||||
// check for "... value="
|
||||
if i+1 >= len(buf) {
|
||||
return i, buf[start:i], makeError("missing field value", buf, i)
|
||||
}
|
||||
|
||||
// check for "... value=,value2=..."
|
||||
if buf[i+1] == ',' || buf[i+1] == ' ' {
|
||||
return i, buf[start:i], makeError("missing field value", buf, i)
|
||||
}
|
||||
|
||||
if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' {
|
||||
var err error
|
||||
i, err = scanNumber(buf, i+1)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
continue
|
||||
}
|
||||
// If next byte is not a double-quote, the value must be a boolean
|
||||
if buf[i+1] != '"' {
|
||||
var err error
|
||||
i, _, err = scanBoolean(buf, i+1)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if buf[i] == ',' && quotes != 1 {
|
||||
commas++
|
||||
}
|
||||
|
||||
// reached end of block?
|
||||
if buf[i] == ' ' && quotes != 1 {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if quotes != 0 && quotes != 2 {
|
||||
return i, buf[start:i], makeError("unbalanced quotes", buf, i)
|
||||
}
|
||||
|
||||
// check that all field sections had key and values (e.g. prevent "a=1,b"
|
||||
if equals == 0 || commas != equals-1 {
|
||||
return i, buf[start:i], makeError("invalid field format", buf, i)
|
||||
}
|
||||
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
// scanTime scans buf, starting at i for the time section of a point. It
|
||||
// returns the ending position and the byte slice of the timestamp within buf
|
||||
// and and error if the timestamp is not in the correct numeric format.
|
||||
func scanTime(buf []byte, i int) (int, []byte, error) {
|
||||
start := skipWhitespace(buf, i)
|
||||
i = start
|
||||
|
||||
for {
|
||||
// reached the end of buf?
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
// Reached end of block or trailing whitespace?
|
||||
if buf[i] == '\n' || buf[i] == ' ' {
|
||||
break
|
||||
}
|
||||
|
||||
// Handle negative timestamps
|
||||
if i == start && buf[i] == '-' {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// Timestamps should be integers, make sure they are so we don't need
|
||||
// to actually parse the timestamp until needed.
|
||||
if buf[i] < '0' || buf[i] > '9' {
|
||||
return i, buf[start:i], makeError("invalid timestamp", buf, i)
|
||||
}
|
||||
i++
|
||||
}
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
func isNumeric(b byte) bool {
|
||||
return (b >= '0' && b <= '9') || b == '.'
|
||||
}
|
||||
|
||||
// scanNumber returns the end position within buf, start at i after
|
||||
// scanning over buf for an integer, or float. It returns an
|
||||
// error if a invalid number is scanned.
|
||||
func scanNumber(buf []byte, i int) (int, error) {
|
||||
start := i
|
||||
var isInt bool
|
||||
|
||||
// Is negative number?
|
||||
if i < len(buf) && buf[i] == '-' {
|
||||
i++
|
||||
// There must be more characters now, as just '-' is illegal.
|
||||
if i == len(buf) {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
}
|
||||
|
||||
// how many decimal points we've see
|
||||
decimal := false
|
||||
|
||||
// indicates the number is float in scientific notation
|
||||
scientific := false
|
||||
|
||||
for {
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
if buf[i] == ',' || buf[i] == ' ' {
|
||||
break
|
||||
}
|
||||
|
||||
if buf[i] == 'i' && i > start && !isInt {
|
||||
isInt = true
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
if buf[i] == '.' {
|
||||
// Can't have more than 1 decimal (e.g. 1.1.1 should fail)
|
||||
if decimal {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
decimal = true
|
||||
}
|
||||
|
||||
// `e` is valid for floats but not as the first char
|
||||
if i > start && (buf[i] == 'e' || buf[i] == 'E') {
|
||||
scientific = true
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// + and - are only valid at this point if they follow an e (scientific notation)
|
||||
if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// NaN is an unsupported value
|
||||
if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
|
||||
if !isNumeric(buf[i]) {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if isInt && (decimal || scientific) {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
|
||||
numericDigits := i - start
|
||||
if isInt {
|
||||
numericDigits--
|
||||
}
|
||||
if decimal {
|
||||
numericDigits--
|
||||
}
|
||||
if buf[start] == '-' {
|
||||
numericDigits--
|
||||
}
|
||||
|
||||
if numericDigits == 0 {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
|
||||
// It's more common that numbers will be within min/max range for their type but we need to prevent
|
||||
// out or range numbers from being parsed successfully. This uses some simple heuristics to decide
|
||||
// if we should parse the number to the actual type. It does not do it all the time because it incurs
|
||||
// extra allocations and we end up converting the type again when writing points to disk.
|
||||
if isInt {
|
||||
// Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid)
|
||||
if buf[i-1] != 'i' {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
// Parse the int to check bounds the number of digits could be larger than the max range
|
||||
// We subtract 1 from the index to remove the `i` from our tests
|
||||
if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits {
|
||||
if _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil {
|
||||
return i, makeError(fmt.Sprintf("unable to parse integer %s: %s", buf[start:i-1], err), buf, i)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range
|
||||
if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits {
|
||||
if _, err := parseFloatBytes(buf[start:i], 10); err != nil {
|
||||
return i, makeError("invalid float", buf, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
// scanBoolean returns the end position within buf, start at i after
|
||||
// scanning over buf for boolean. Valid values for a boolean are
|
||||
// t, T, true, TRUE, f, F, false, FALSE. It returns an error if a invalid boolean
|
||||
// is scanned.
|
||||
func scanBoolean(buf []byte, i int) (int, []byte, error) {
|
||||
start := i
|
||||
|
||||
if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') {
|
||||
return i, buf[start:i], makeError("invalid value", buf, i)
|
||||
}
|
||||
|
||||
i++
|
||||
for {
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
if buf[i] == ',' || buf[i] == ' ' {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
// Single char bool (t, T, f, F) is ok
|
||||
if i-start == 1 {
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
// length must be 4 for true or TRUE
|
||||
if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 {
|
||||
return i, buf[start:i], makeError("invalid boolean", buf, i)
|
||||
}
|
||||
|
||||
// length must be 5 for false or FALSE
|
||||
if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 {
|
||||
return i, buf[start:i], makeError("invalid boolean", buf, i)
|
||||
}
|
||||
|
||||
// Otherwise
|
||||
valid := false
|
||||
switch buf[start] {
|
||||
case 't':
|
||||
valid = bytes.Equal(buf[start:i], []byte("true"))
|
||||
case 'f':
|
||||
valid = bytes.Equal(buf[start:i], []byte("false"))
|
||||
case 'T':
|
||||
valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True"))
|
||||
case 'F':
|
||||
valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False"))
|
||||
}
|
||||
|
||||
if !valid {
|
||||
return i, buf[start:i], makeError("invalid boolean", buf, i)
|
||||
}
|
||||
|
||||
return i, buf[start:i], nil
|
||||
|
||||
}
|
||||
|
||||
// skipWhitespace returns the end position within buf, starting at i after
|
||||
// scanning over spaces in tags
|
||||
func skipWhitespace(buf []byte, i int) int {
|
||||
for i < len(buf) {
|
||||
if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// makeError is a helper function for making a metric parsing error.
|
||||
// reason is the reason why the error occurred.
|
||||
// buf should be the current buffer we are parsing.
|
||||
// i is the current index, to give some context on where in the buffer we are.
|
||||
func makeError(reason string, buf []byte, i int) error {
|
||||
return fmt.Errorf("metric parsing error, reason: [%s], buffer: [%s], index: [%d]",
|
||||
reason, buf, i)
|
||||
}
|
||||
|
||||
// getPrecisionMultiplier will return a multiplier for the precision specified.
|
||||
func getPrecisionMultiplier(precision string) int64 {
|
||||
d := time.Nanosecond
|
||||
switch precision {
|
||||
case "u":
|
||||
d = time.Microsecond
|
||||
case "ms":
|
||||
d = time.Millisecond
|
||||
case "s":
|
||||
d = time.Second
|
||||
case "m":
|
||||
d = time.Minute
|
||||
case "h":
|
||||
d = time.Hour
|
||||
}
|
||||
return int64(d)
|
||||
}
|
||||
@@ -1,413 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const trues = `booltest b=T
|
||||
booltest b=t
|
||||
booltest b=True
|
||||
booltest b=TRUE
|
||||
booltest b=true
|
||||
`
|
||||
|
||||
const falses = `booltest b=F
|
||||
booltest b=f
|
||||
booltest b=False
|
||||
booltest b=FALSE
|
||||
booltest b=false
|
||||
`
|
||||
|
||||
const withEscapes = `w\,\ eather,host=local temp=99 1465839830100400200
|
||||
w\,eather,host=local temp=99 1465839830100400200
|
||||
weather,location=us\,midwest temperature=82 1465839830100400200
|
||||
weather,location=us-midwest temp\=rature=82 1465839830100400200
|
||||
weather,location\ place=us-midwest temperature=82 1465839830100400200
|
||||
weather,location=us-midwest temperature="too\"hot\"" 1465839830100400200
|
||||
`
|
||||
|
||||
const withTimestamps = `cpu usage=99 1480595849000000000
|
||||
cpu usage=99 1480595850000000000
|
||||
cpu usage=99 1480595851700030000
|
||||
cpu usage=99 1480595852000000300
|
||||
`
|
||||
|
||||
const sevenMetrics = `cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
`
|
||||
|
||||
const negMetrics = `weather,host=local temp=-99i,temp_float=-99.4 1465839830100400200
|
||||
`
|
||||
|
||||
// some metrics are invalid
|
||||
const someInvalid = `cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu3, host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu4 , usage_idle=99,usage_busy=1
|
||||
cpu 1480595852000000300
|
||||
cpu usage=99 1480595852foobar300
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
`
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
start := time.Now()
|
||||
metrics, err := Parse([]byte(sevenMetrics))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 7)
|
||||
|
||||
// all metrics parsed together w/o a timestamp should have the same time.
|
||||
firstTime := metrics[0].Time()
|
||||
for _, m := range metrics {
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"idle": float64(99),
|
||||
"busy": int64(1),
|
||||
"b": true,
|
||||
"s": "string",
|
||||
},
|
||||
m.Fields(),
|
||||
)
|
||||
assert.Equal(t,
|
||||
map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
},
|
||||
m.Tags(),
|
||||
)
|
||||
assert.True(t, m.Time().After(start))
|
||||
assert.True(t, m.Time().Equal(firstTime))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseNegNumbers(t *testing.T) {
|
||||
metrics, err := Parse([]byte(negMetrics))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"temp": int64(-99),
|
||||
"temp_float": float64(-99.4),
|
||||
},
|
||||
metrics[0].Fields(),
|
||||
)
|
||||
assert.Equal(t,
|
||||
map[string]string{
|
||||
"host": "local",
|
||||
},
|
||||
metrics[0].Tags(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestParseErrors(t *testing.T) {
|
||||
start := time.Now()
|
||||
metrics, err := Parse([]byte(someInvalid))
|
||||
assert.Error(t, err)
|
||||
assert.Len(t, metrics, 4)
|
||||
|
||||
// all metrics parsed together w/o a timestamp should have the same time.
|
||||
firstTime := metrics[0].Time()
|
||||
for _, m := range metrics {
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
},
|
||||
m.Fields(),
|
||||
)
|
||||
assert.Equal(t,
|
||||
map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
},
|
||||
m.Tags(),
|
||||
)
|
||||
assert.True(t, m.Time().After(start))
|
||||
assert.True(t, m.Time().Equal(firstTime))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseWithTimestamps(t *testing.T) {
|
||||
metrics, err := Parse([]byte(withTimestamps))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 4)
|
||||
|
||||
expectedTimestamps := []time.Time{
|
||||
time.Unix(0, 1480595849000000000),
|
||||
time.Unix(0, 1480595850000000000),
|
||||
time.Unix(0, 1480595851700030000),
|
||||
time.Unix(0, 1480595852000000300),
|
||||
}
|
||||
|
||||
// all metrics parsed together w/o a timestamp should have the same time.
|
||||
for i, m := range metrics {
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"usage": float64(99),
|
||||
},
|
||||
m.Fields(),
|
||||
)
|
||||
assert.True(t, m.Time().Equal(expectedTimestamps[i]))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseEscapes(t *testing.T) {
|
||||
metrics, err := Parse([]byte(withEscapes))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 6)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fields map[string]interface{}
|
||||
tags map[string]string
|
||||
}{
|
||||
{
|
||||
name: `w, eather`,
|
||||
fields: map[string]interface{}{"temp": float64(99)},
|
||||
tags: map[string]string{"host": "local"},
|
||||
},
|
||||
{
|
||||
name: `w,eather`,
|
||||
fields: map[string]interface{}{"temp": float64(99)},
|
||||
tags: map[string]string{"host": "local"},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{"temperature": float64(82)},
|
||||
tags: map[string]string{"location": `us,midwest`},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{`temp=rature`: float64(82)},
|
||||
tags: map[string]string{"location": `us-midwest`},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{"temperature": float64(82)},
|
||||
tags: map[string]string{`location place`: `us-midwest`},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{`temperature`: `too"hot"`},
|
||||
tags: map[string]string{"location": `us-midwest`},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
assert.Equal(t, test.name, metrics[i].Name())
|
||||
assert.Equal(t, test.fields, metrics[i].Fields())
|
||||
assert.Equal(t, test.tags, metrics[i].Tags())
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTrueBooleans(t *testing.T) {
|
||||
metrics, err := Parse([]byte(trues))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 5)
|
||||
|
||||
for _, metric := range metrics {
|
||||
assert.Equal(t, "booltest", metric.Name())
|
||||
assert.Equal(t, true, metric.Fields()["b"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFalseBooleans(t *testing.T) {
|
||||
metrics, err := Parse([]byte(falses))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 5)
|
||||
|
||||
for _, metric := range metrics {
|
||||
assert.Equal(t, "booltest", metric.Name())
|
||||
assert.Equal(t, false, metric.Fields()["b"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointBadNumber(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"cpu v=- ",
|
||||
"cpu v=-i ",
|
||||
"cpu v=-. ",
|
||||
"cpu v=. ",
|
||||
"cpu v=1.0i ",
|
||||
"cpu v=1ii ",
|
||||
"cpu v=1a ",
|
||||
"cpu v=-e-e-e ",
|
||||
"cpu v=42+3 ",
|
||||
"cpu v= ",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTagsMissingParts(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
`cpu,host`,
|
||||
`cpu,host,`,
|
||||
`cpu,host=`,
|
||||
`cpu,f=oo=bar value=1`,
|
||||
`cpu,host value=1i`,
|
||||
`cpu,host=serverA,region value=1i`,
|
||||
`cpu,host=serverA,region= value=1i`,
|
||||
`cpu,host=serverA,region=,zone=us-west value=1i`,
|
||||
`cpu, value=1`,
|
||||
`cpu, ,,`,
|
||||
`cpu,,,`,
|
||||
`cpu,host=serverA,=us-east value=1i`,
|
||||
`cpu,host=serverAa\,,=us-east value=1i`,
|
||||
`cpu,host=serverA\,,=us-east value=1i`,
|
||||
`cpu, =serverA value=1i`,
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointWhitespace(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
`cpu value=1.0 1257894000000000000`,
|
||||
`cpu value=1.0 1257894000000000000`,
|
||||
`cpu value=1.0 1257894000000000000`,
|
||||
`cpu value=1.0 1257894000000000000 `,
|
||||
} {
|
||||
m, err := Parse([]byte(tt + "\n"))
|
||||
assert.NoError(t, err, tt)
|
||||
assert.Equal(t, "cpu", m[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{"value": float64(1)}, m[0].Fields())
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointInvalidFields(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test,foo=bar a=101,=value",
|
||||
"test,foo=bar =value",
|
||||
"test,foo=bar a=101,key=",
|
||||
"test,foo=bar key=",
|
||||
`test,foo=bar a=101,b="foo`,
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointNoFields(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"cpu_load_short,host=server01,region=us-west",
|
||||
"very_long_measurement_name",
|
||||
"cpu,host==",
|
||||
"============",
|
||||
"cpu",
|
||||
"cpu\n\n\n\n\n\n\n",
|
||||
" ",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
// a b=1 << this is the shortest possible metric
|
||||
// any shorter is just ignored
|
||||
func TestParseBufTooShort(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"",
|
||||
"a",
|
||||
"a ",
|
||||
"a b=",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseInvalidBooleans(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test b=tru",
|
||||
"test b=fals",
|
||||
"test b=faLse",
|
||||
"test q=foo",
|
||||
"test b=lambchops",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseInvalidNumbers(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test b=-",
|
||||
"test b=1.1.1",
|
||||
"test b=nan",
|
||||
"test b=9i10",
|
||||
"test b=9999999999999999999i",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseNegativeTimestamps(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test foo=101 -1257894000000000000",
|
||||
} {
|
||||
metrics, err := Parse([]byte(tt + "\n"))
|
||||
assert.NoError(t, err, tt)
|
||||
assert.True(t, metrics[0].Time().Equal(time.Unix(0, -1257894000000000000)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePrecision(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
line string
|
||||
precision string
|
||||
expected int64
|
||||
}{
|
||||
{"test v=42 1491847420", "s", 1491847420000000000},
|
||||
{"test v=42 1491847420123", "ms", 1491847420123000000},
|
||||
{"test v=42 1491847420123456", "u", 1491847420123456000},
|
||||
{"test v=42 1491847420123456789", "ns", 1491847420123456789},
|
||||
|
||||
{"test v=42 1491847420123456789", "1s", 1491847420123456789},
|
||||
{"test v=42 1491847420123456789", "asdf", 1491847420123456789},
|
||||
} {
|
||||
metrics, err := ParseWithDefaultTimePrecision(
|
||||
[]byte(tt.line+"\n"), time.Now(), tt.precision)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expected, metrics[0].UnixNano())
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePrecisionUnsetTime(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
line string
|
||||
precision string
|
||||
}{
|
||||
{"test v=42", "s"},
|
||||
{"test v=42", "ns"},
|
||||
} {
|
||||
_, err := ParseWithDefaultTimePrecision(
|
||||
[]byte(tt.line+"\n"), time.Now(), tt.precision)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseMaxKeyLength(t *testing.T) {
|
||||
key := ""
|
||||
for {
|
||||
if len(key) > MaxKeyLength {
|
||||
break
|
||||
}
|
||||
key += "test"
|
||||
}
|
||||
|
||||
_, err := Parse([]byte(key + " value=1\n"))
|
||||
assert.Error(t, err)
|
||||
}
|
||||
159
metric/reader.go
159
metric/reader.go
@@ -1,159 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type state int
|
||||
|
||||
const (
|
||||
_ state = iota
|
||||
// normal state copies whole metrics into the given buffer until we can't
|
||||
// fit the next metric.
|
||||
normal
|
||||
// split state means that we have a metric that we were able to split, so
|
||||
// that we can fit it into multiple metrics (and calls to Read)
|
||||
split
|
||||
// overflow state means that we have a metric that didn't fit into a single
|
||||
// buffer, and needs to be split across multiple calls to Read.
|
||||
overflow
|
||||
// splitOverflow state means that a split metric didn't fit into a single
|
||||
// buffer, and needs to be split across multiple calls to Read.
|
||||
splitOverflow
|
||||
// done means we're done reading metrics, and now always return (0, io.EOF)
|
||||
done
|
||||
)
|
||||
|
||||
type reader struct {
|
||||
metrics []telegraf.Metric
|
||||
splitMetrics []telegraf.Metric
|
||||
buf []byte
|
||||
state state
|
||||
|
||||
// metric index
|
||||
iM int
|
||||
// split metric index
|
||||
iSM int
|
||||
// buffer index
|
||||
iB int
|
||||
}
|
||||
|
||||
func NewReader(metrics []telegraf.Metric) io.Reader {
|
||||
return &reader{
|
||||
metrics: metrics,
|
||||
state: normal,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *reader) Read(p []byte) (n int, err error) {
|
||||
var i int
|
||||
switch r.state {
|
||||
case done:
|
||||
return 0, io.EOF
|
||||
case normal:
|
||||
for {
|
||||
// this for-loop is the sunny-day scenario, where we are given a
|
||||
// buffer that is large enough to hold at least a single metric.
|
||||
// all of the cases below it are edge-cases.
|
||||
if r.metrics[r.iM].Len() <= len(p[i:]) {
|
||||
i += r.metrics[r.iM].SerializeTo(p[i:])
|
||||
} else {
|
||||
break
|
||||
}
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
}
|
||||
|
||||
// if we haven't written any bytes, check if we can split the current
|
||||
// metric into multiple full metrics at a smaller size.
|
||||
if i == 0 {
|
||||
tmp := r.metrics[r.iM].Split(len(p))
|
||||
if len(tmp) > 1 {
|
||||
r.splitMetrics = tmp
|
||||
r.state = split
|
||||
if r.splitMetrics[0].Len() <= len(p) {
|
||||
i += r.splitMetrics[0].SerializeTo(p)
|
||||
r.iSM = 1
|
||||
} else {
|
||||
// splitting didn't quite work, so we'll drop down and
|
||||
// overflow the metric.
|
||||
r.state = normal
|
||||
r.iSM = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if we haven't written any bytes and we're not at the end of the metrics
|
||||
// slice, then it means we have a single metric that is larger than the
|
||||
// provided buffer.
|
||||
if i == 0 {
|
||||
r.buf = r.metrics[r.iM].Serialize()
|
||||
i += copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
r.state = overflow
|
||||
}
|
||||
|
||||
case split:
|
||||
if r.splitMetrics[r.iSM].Len() <= len(p) {
|
||||
// write the current split metric
|
||||
i += r.splitMetrics[r.iSM].SerializeTo(p)
|
||||
r.iSM++
|
||||
if r.iSM >= len(r.splitMetrics) {
|
||||
// done writing the current split metrics
|
||||
r.iSM = 0
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
r.state = normal
|
||||
}
|
||||
} else {
|
||||
// This would only happen if we split the metric, and then a
|
||||
// subsequent buffer was smaller than the initial one given,
|
||||
// so that our split metric no longer fits.
|
||||
r.buf = r.splitMetrics[r.iSM].Serialize()
|
||||
i += copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
r.state = splitOverflow
|
||||
}
|
||||
|
||||
case splitOverflow:
|
||||
i = copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
if r.iB >= len(r.buf) {
|
||||
r.iB = 0
|
||||
r.iSM++
|
||||
if r.iSM == len(r.splitMetrics) {
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
r.state = normal
|
||||
} else {
|
||||
r.state = split
|
||||
}
|
||||
}
|
||||
|
||||
case overflow:
|
||||
i = copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
if r.iB >= len(r.buf) {
|
||||
r.iB = 0
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
r.state = normal
|
||||
}
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
@@ -1,713 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func BenchmarkMetricReader(b *testing.B) {
|
||||
metrics := make([]telegraf.Metric, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
metrics[i], _ = New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(1)}, time.Now())
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
r := NewReader(metrics)
|
||||
io.Copy(ioutil.Discard, r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricReader(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
metrics := make([]telegraf.Metric, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
metrics[i], _ = New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(1)}, ts)
|
||||
}
|
||||
|
||||
r := NewReader(metrics)
|
||||
|
||||
buf := make([]byte, 35)
|
||||
for i := 0; i < 10; i++ {
|
||||
n, err := r.Read(buf)
|
||||
if err != nil {
|
||||
assert.True(t, err == io.EOF, err.Error())
|
||||
}
|
||||
assert.Equal(t, 33, n)
|
||||
assert.Equal(t, "foo value=1i 1481032190000000000\n", string(buf[0:n]))
|
||||
}
|
||||
|
||||
// reader should now be done, and always return 0, io.EOF
|
||||
for i := 0; i < 10; i++ {
|
||||
n, err := r.Read(buf)
|
||||
assert.True(t, err == io.EOF, err.Error())
|
||||
assert.Equal(t, 0, n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricReader_OverflowMetric(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(10)}, ts)
|
||||
metrics := []telegraf.Metric{m}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 5)
|
||||
|
||||
tests := []struct {
|
||||
exp string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
"foo v",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"alue=",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"10i 1",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"48103",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"21900",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"00000",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"000\n",
|
||||
io.EOF,
|
||||
4,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
assert.Equal(t, test.exp, string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for when a metric is the same size as the buffer.
|
||||
//
|
||||
// Previously EOF would not be set until the next call to Read.
|
||||
func TestMetricReader_MetricSizeEqualsBufferSize(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"a": int64(1)}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, m1.Len())
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for when a metric requires to be split and one of the
|
||||
// split metrics is exactly the size of the buffer.
|
||||
//
|
||||
// Previously an empty string would be returned on the next Read without error,
|
||||
// and then next Read call would panic.
|
||||
func TestMetricReader_SplitWithExactLengthSplit(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"a": int64(1), "bb": int64(2)}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 30)
|
||||
|
||||
// foo a=1i,bb=2i 1481032190000000000\n // len 35
|
||||
//
|
||||
// Requires this specific split order:
|
||||
// foo a=1i 1481032190000000000\n // len 29
|
||||
// foo bb=2i 1481032190000000000\n // len 30
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for when a metric requires to be split and one of the
|
||||
// split metrics is larger than the buffer.
|
||||
//
|
||||
// Previously the metric index would be set incorrectly causing a panic.
|
||||
func TestMetricReader_SplitOverflowOversized(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
"bbb": int64(2),
|
||||
}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 30)
|
||||
|
||||
// foo a=1i,bbb=2i 1481032190000000000\n // len 36
|
||||
//
|
||||
// foo a=1i 1481032190000000000\n // len 29
|
||||
// foo bbb=2i 1481032190000000000\n // len 31
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for when a split metric exactly fits in the buffer.
|
||||
//
|
||||
// Previously the metric would be overflow split when not required.
|
||||
func TestMetricReader_SplitOverflowUneeded(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"a": int64(1), "b": int64(2)}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 29)
|
||||
|
||||
// foo a=1i,b=2i 1481032190000000000\n // len 34
|
||||
//
|
||||
// foo a=1i 1481032190000000000\n // len 29
|
||||
// foo b=2i 1481032190000000000\n // len 29
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricReader_OverflowMultipleMetrics(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(10)}, ts)
|
||||
metrics := []telegraf.Metric{m, m.Copy()}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 10)
|
||||
|
||||
tests := []struct {
|
||||
exp string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
"foo value=",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"10i 148103",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"2190000000",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"000\n",
|
||||
nil,
|
||||
4,
|
||||
},
|
||||
{
|
||||
"foo value=",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"10i 148103",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"2190000000",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"000\n",
|
||||
io.EOF,
|
||||
4,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
assert.Equal(t, test.exp, string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test splitting a metric
|
||||
func TestMetricReader_SplitMetric(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
"value3": int64(10),
|
||||
"value4": int64(10),
|
||||
"value5": int64(10),
|
||||
"value6": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 60)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
57,
|
||||
},
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
57,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test an array with one split metric and one unsplit
|
||||
func TestMetricReader_SplitMetric2(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
"value3": int64(10),
|
||||
"value4": int64(10),
|
||||
"value5": int64(10),
|
||||
"value6": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
m2, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1, m2}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 60)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
57,
|
||||
},
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
57,
|
||||
},
|
||||
{
|
||||
`foo value1=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
35,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test split that results in metrics that are still too long, which results in
|
||||
// the reader falling back to regular overflow.
|
||||
func TestMetricReader_SplitMetricTooLong(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 30)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i,value\d=10i 1481`,
|
||||
nil,
|
||||
30,
|
||||
},
|
||||
{
|
||||
`032190000000000\n`,
|
||||
io.EOF,
|
||||
16,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test split with a changing buffer size in the middle of subsequent calls
|
||||
// to Read
|
||||
func TestMetricReader_SplitMetricChangingBuffer(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
"value3": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
m2, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1, m2}
|
||||
|
||||
r := NewReader(metrics)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
buf []byte
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
`foo value\d=10i 148103219000000`,
|
||||
nil,
|
||||
30,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`0000\n`,
|
||||
nil,
|
||||
5,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`foo value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
`foo value1=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
make([]byte, 36),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(test.buf)
|
||||
assert.Equal(t, test.n, n, test.expRegex)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(test.buf[0:n])), string(test.buf[0:n]))
|
||||
assert.Equal(t, test.err, err, test.expRegex)
|
||||
}
|
||||
}
|
||||
|
||||
// test split with a changing buffer size in the middle of subsequent calls
|
||||
// to Read
|
||||
func TestMetricReader_SplitMetricChangingBuffer2(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
m2, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1, m2}
|
||||
|
||||
r := NewReader(metrics)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
buf []byte
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
`foo value\d=10i 148103219000000`,
|
||||
nil,
|
||||
30,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`0000\n`,
|
||||
nil,
|
||||
5,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`foo value1=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
make([]byte, 36),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(test.buf)
|
||||
assert.Equal(t, test.n, n, test.expRegex)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(test.buf[0:n])), string(test.buf[0:n]))
|
||||
assert.Equal(t, test.err, err, test.expRegex)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReader_Read(t *testing.T) {
|
||||
epoch := time.Unix(0, 0)
|
||||
|
||||
type args struct {
|
||||
name string
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
t time.Time
|
||||
mType []telegraf.ValueType
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
expected []byte
|
||||
}{
|
||||
{
|
||||
name: "escape backslashes in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape quote in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test"`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\"" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape quote and backslash in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\"`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\\"" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape multiple backslash in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\\`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\\\" 0`),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
buf := make([]byte, 512)
|
||||
m, err := New(tt.args.name, tt.args.tags, tt.args.fields, tt.args.t, tt.args.mType...)
|
||||
require.NoError(t, err)
|
||||
|
||||
r := NewReader([]telegraf.Metric{m})
|
||||
num, err := r.Read(buf)
|
||||
if err != io.EOF {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
line := string(buf[:num])
|
||||
// This is done so that we can use raw strings in the test spec
|
||||
noeol := strings.TrimRight(line, "\n")
|
||||
require.Equal(t, string(tt.expected), noeol)
|
||||
require.Equal(t, len(tt.expected)+1, num)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricRoundtrip(t *testing.T) {
|
||||
const lp = `nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=netstat,sr=database IpExtInBcastOctets=12570626154i,IpExtInBcastPkts=95541226i,IpExtInCEPkts=0i,IpExtInCsumErrors=0i,IpExtInECT0Pkts=55674i,IpExtInECT1Pkts=0i,IpExtInMcastOctets=5928296i,IpExtInMcastPkts=174365i,IpExtInNoECTPkts=17965863529i,IpExtInNoRoutes=20i,IpExtInOctets=3334866321815i,IpExtInTruncatedPkts=0i,IpExtOutBcastOctets=0i,IpExtOutBcastPkts=0i,IpExtOutMcastOctets=0i,IpExtOutMcastPkts=0i,IpExtOutOctets=31397892391399i,TcpExtArpFilter=0i,TcpExtBusyPollRxPackets=0i,TcpExtDelayedACKLocked=14094i,TcpExtDelayedACKLost=302083i,TcpExtDelayedACKs=55486507i,TcpExtEmbryonicRsts=11879i,TcpExtIPReversePathFilter=0i,TcpExtListenDrops=1736i,TcpExtListenOverflows=0i,TcpExtLockDroppedIcmps=0i,TcpExtOfoPruned=0i,TcpExtOutOfWindowIcmps=8i,TcpExtPAWSActive=0i,TcpExtPAWSEstab=974i,TcpExtPAWSPassive=0i,TcpExtPruneCalled=0i,TcpExtRcvPruned=0i,TcpExtSyncookiesFailed=12593i,TcpExtSyncookiesRecv=0i,TcpExtSyncookiesSent=0i,TcpExtTCPACKSkippedChallenge=0i,TcpExtTCPACKSkippedFinWait2=0i,TcpExtTCPACKSkippedPAWS=806i,TcpExtTCPACKSkippedSeq=519i,TcpExtTCPACKSkippedSynRecv=0i,TcpExtTCPACKSkippedTimeWait=0i,TcpExtTCPAbortFailed=0i,TcpExtTCPAbortOnClose=22i,TcpExtTCPAbortOnData=36593i,TcpExtTCPAbortOnLinger=0i,TcpExtTCPAbortOnMemory=0i,TcpExtTCPAbortOnTimeout=674i,TcpExtTCPAutoCorking=494253233i,TcpExtTCPBacklogDrop=0i,TcpExtTCPChallengeACK=281i,TcpExtTCPDSACKIgnoredNoUndo=93354i,TcpExtTCPDSACKIgnoredOld=336i,TcpExtTCPDSACKOfoRecv=0i,TcpExtTCPDSACKOfoSent=7i,TcpExtTCPDSACKOldSent=302073i,TcpExtTCPDSACKRecv=215884i,TcpExtTCPDSACKUndo=7633i,TcpExtTCPDeferAcceptDrop=0i,TcpExtTCPDirectCopyFromBacklog=0i,TcpExtTCPDirectCopyFromPrequeue=0i,TcpExtTCPFACKReorder=1320i,TcpExtTCPFastOpenActive=0i,TcpExtTCPFastOpenActiveFail=0i,TcpExtTCPFastOpenCookieReqd=0i,TcpExtTCPFastOpenListenOverflow=0i,TcpExtTCPFastOpenPassive=0i,TcpExtTCPFastOpenPassiveFail=0i,TcpExtTCPFastRetrans=350681i,TcpExtTCPForwardRetrans=142168i,TcpExtTCPFromZeroWindowAdv=4317i,TcpExtTCPFullUndo=29502i,TcpExtTCPHPAcks=10267073000i,TcpExtTCPHPHits=5629837098i,TcpExtTCPHPHitsToUser=0i,TcpExtTCPHystartDelayCwnd=285127i,TcpExtTCPHystartDelayDetect=12318i,TcpExtTCPHystartTrainCwnd=69160570i,TcpExtTCPHystartTrainDetect=3315799i,TcpExtTCPLossFailures=109i,TcpExtTCPLossProbeRecovery=110819i,TcpExtTCPLossProbes=233995i,TcpExtTCPLossUndo=5276i,TcpExtTCPLostRetransmit=397i,TcpExtTCPMD5NotFound=0i,TcpExtTCPMD5Unexpected=0i,TcpExtTCPMemoryPressures=0i,TcpExtTCPMinTTLDrop=0i,TcpExtTCPOFODrop=0i,TcpExtTCPOFOMerge=7i,TcpExtTCPOFOQueue=15196i,TcpExtTCPOrigDataSent=29055119435i,TcpExtTCPPartialUndo=21320i,TcpExtTCPPrequeueDropped=0i,TcpExtTCPPrequeued=0i,TcpExtTCPPureAcks=1236441827i,TcpExtTCPRcvCoalesce=225590473i,TcpExtTCPRcvCollapsed=0i,TcpExtTCPRenoFailures=0i,TcpExtTCPRenoRecovery=0i,TcpExtTCPRenoRecoveryFail=0i,TcpExtTCPRenoReorder=0i,TcpExtTCPReqQFullDoCookies=0i,TcpExtTCPReqQFullDrop=0i,TcpExtTCPRetransFail=41i,TcpExtTCPSACKDiscard=0i,TcpExtTCPSACKReneging=0i,TcpExtTCPSACKReorder=4307i,TcpExtTCPSYNChallenge=244i,TcpExtTCPSackFailures=1698i,TcpExtTCPSackMerged=184668i,TcpExtTCPSackRecovery=97369i,TcpExtTCPSackRecoveryFail=381i,TcpExtTCPSackShiftFallback=2697079i,TcpExtTCPSackShifted=760299i,TcpExtTCPSchedulerFailed=0i,TcpExtTCPSlowStartRetrans=9276i,TcpExtTCPSpuriousRTOs=959i,TcpExtTCPSpuriousRtxHostQueues=2973i,TcpExtTCPSynRetrans=200970i,TcpExtTCPTSReorder=15221i,TcpExtTCPTimeWaitOverflow=0i,TcpExtTCPTimeouts=70127i,TcpExtTCPToZeroWindowAdv=4317i,TcpExtTCPWantZeroWindowAdv=2133i,TcpExtTW=24809813i,TcpExtTWKilled=0i,TcpExtTWRecycled=0i 1496460785000000000
|
||||
nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=snmp,sr=database IcmpInAddrMaskReps=0i,IcmpInAddrMasks=90i,IcmpInCsumErrors=0i,IcmpInDestUnreachs=284401i,IcmpInEchoReps=9i,IcmpInEchos=1761912i,IcmpInErrors=407i,IcmpInMsgs=2047767i,IcmpInParmProbs=0i,IcmpInRedirects=0i,IcmpInSrcQuenchs=0i,IcmpInTimeExcds=46i,IcmpInTimestampReps=0i,IcmpInTimestamps=1309i,IcmpMsgInType0=9i,IcmpMsgInType11=46i,IcmpMsgInType13=1309i,IcmpMsgInType17=90i,IcmpMsgInType3=284401i,IcmpMsgInType8=1761912i,IcmpMsgOutType0=1761912i,IcmpMsgOutType14=1248i,IcmpMsgOutType3=108709i,IcmpMsgOutType8=9i,IcmpOutAddrMaskReps=0i,IcmpOutAddrMasks=0i,IcmpOutDestUnreachs=108709i,IcmpOutEchoReps=1761912i,IcmpOutEchos=9i,IcmpOutErrors=0i,IcmpOutMsgs=1871878i,IcmpOutParmProbs=0i,IcmpOutRedirects=0i,IcmpOutSrcQuenchs=0i,IcmpOutTimeExcds=0i,IcmpOutTimestampReps=1248i,IcmpOutTimestamps=0i,IpDefaultTTL=64i,IpForwDatagrams=0i,IpForwarding=2i,IpFragCreates=0i,IpFragFails=0i,IpFragOKs=0i,IpInAddrErrors=0i,IpInDelivers=17658795773i,IpInDiscards=0i,IpInHdrErrors=0i,IpInReceives=17659269339i,IpInUnknownProtos=0i,IpOutDiscards=236976i,IpOutNoRoutes=1009i,IpOutRequests=23466783734i,IpReasmFails=0i,IpReasmOKs=0i,IpReasmReqds=0i,IpReasmTimeout=0i,TcpActiveOpens=23308977i,TcpAttemptFails=3757543i,TcpCurrEstab=280i,TcpEstabResets=184792i,TcpInCsumErrors=0i,TcpInErrs=232i,TcpInSegs=17536573089i,TcpMaxConn=-1i,TcpOutRsts=4051451i,TcpOutSegs=29836254873i,TcpPassiveOpens=176546974i,TcpRetransSegs=878085i,TcpRtoAlgorithm=1i,TcpRtoMax=120000i,TcpRtoMin=200i,UdpInCsumErrors=0i,UdpInDatagrams=24441661i,UdpInErrors=0i,UdpLiteInCsumErrors=0i,UdpLiteInDatagrams=0i,UdpLiteInErrors=0i,UdpLiteNoPorts=0i,UdpLiteOutDatagrams=0i,UdpLiteRcvbufErrors=0i,UdpLiteSndbufErrors=0i,UdpNoPorts=17660i,UdpOutDatagrams=51807896i,UdpRcvbufErrors=0i,UdpSndbufErrors=236922i 1496460785000000000
|
||||
`
|
||||
metrics, err := Parse([]byte(lp))
|
||||
require.NoError(t, err)
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 128)
|
||||
_, err = r.Read(buf)
|
||||
require.NoError(t, err)
|
||||
metrics, err = Parse(buf)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
7
metric/uint_support.go
Normal file
7
metric/uint_support.go
Normal file
@@ -0,0 +1,7 @@
|
||||
// +build uint64
|
||||
|
||||
package metric
|
||||
|
||||
func init() {
|
||||
EnableUintSupport()
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
# BasicStats Aggregator Plugin
|
||||
|
||||
The BasicStats aggregator plugin give us count,max,min,mean,s2(variance), stdev for a set of values,
|
||||
The BasicStats aggregator plugin give us count,max,min,mean,sum,s2(variance), stdev for a set of values,
|
||||
emitting the aggregate every `period` seconds.
|
||||
|
||||
### Configuration:
|
||||
@@ -21,11 +21,11 @@ emitting the aggregate every `period` seconds.
|
||||
## BasicStats Arguments:
|
||||
|
||||
## Configures which basic stats to push as fields
|
||||
stats = ["count","min","max","mean","stdev","s2"]
|
||||
stats = ["count","min","max","mean","stdev","s2","sum"]
|
||||
```
|
||||
|
||||
- stats
|
||||
- If not specified, all stats are aggregated and pushed as fields
|
||||
- If not specified, then `count`, `min`, `max`, `mean`, `stdev`, and `s2` are aggregated and pushed as fields. `sum` is not aggregated by default to maintain backwards compatibility.
|
||||
- If empty array, no stats are aggregated
|
||||
|
||||
### Measurements & Fields:
|
||||
@@ -35,6 +35,7 @@ emitting the aggregate every `period` seconds.
|
||||
- field1_max
|
||||
- field1_min
|
||||
- field1_mean
|
||||
- field1_sum
|
||||
- field1_s2 (variance)
|
||||
- field1_stdev (standard deviation)
|
||||
|
||||
@@ -48,8 +49,8 @@ No tags are applied by this aggregator.
|
||||
$ telegraf --config telegraf.conf --quiet
|
||||
system,host=tars load1=1 1475583980000000000
|
||||
system,host=tars load1=1 1475583990000000000
|
||||
system,host=tars load1_count=2,load1_max=1,load1_min=1,load1_mean=1,load1_s2=0,load1_stdev=0 1475584010000000000
|
||||
system,host=tars load1_count=2,load1_max=1,load1_min=1,load1_mean=1,load1_sum=2,load1_s2=0,load1_stdev=0 1475584010000000000
|
||||
system,host=tars load1=1 1475584020000000000
|
||||
system,host=tars load1=3 1475584030000000000
|
||||
system,host=tars load1_count=2,load1_max=3,load1_min=1,load1_mean=2,load1_s2=2,load1_stdev=1.414162 1475584010000000000
|
||||
system,host=tars load1_count=2,load1_max=3,load1_min=1,load1_mean=2,load1_sum=4,load1_s2=2,load1_stdev=1.414162 1475584010000000000
|
||||
```
|
||||
|
||||
@@ -22,6 +22,7 @@ type configuredStats struct {
|
||||
mean bool
|
||||
variance bool
|
||||
stdev bool
|
||||
sum bool
|
||||
}
|
||||
|
||||
func NewBasicStats() *BasicStats {
|
||||
@@ -40,6 +41,7 @@ type basicstats struct {
|
||||
count float64
|
||||
min float64
|
||||
max float64
|
||||
sum float64
|
||||
mean float64
|
||||
M2 float64 //intermedia value for variance/stdev
|
||||
}
|
||||
@@ -77,6 +79,7 @@ func (m *BasicStats) Add(in telegraf.Metric) {
|
||||
min: fv,
|
||||
max: fv,
|
||||
mean: fv,
|
||||
sum: fv,
|
||||
M2: 0.0,
|
||||
}
|
||||
}
|
||||
@@ -92,6 +95,7 @@ func (m *BasicStats) Add(in telegraf.Metric) {
|
||||
min: fv,
|
||||
max: fv,
|
||||
mean: fv,
|
||||
sum: fv,
|
||||
M2: 0.0,
|
||||
}
|
||||
continue
|
||||
@@ -119,6 +123,8 @@ func (m *BasicStats) Add(in telegraf.Metric) {
|
||||
} else if fv > tmp.max {
|
||||
tmp.max = fv
|
||||
}
|
||||
//sum compute
|
||||
tmp.sum += fv
|
||||
//store final data
|
||||
m.cache[id].fields[k] = tmp
|
||||
}
|
||||
@@ -146,6 +152,9 @@ func (m *BasicStats) Push(acc telegraf.Accumulator) {
|
||||
if config.mean {
|
||||
fields[k+"_mean"] = v.mean
|
||||
}
|
||||
if config.sum {
|
||||
fields[k+"_sum"] = v.sum
|
||||
}
|
||||
|
||||
//v.count always >=1
|
||||
if v.count > 1 {
|
||||
@@ -187,6 +196,8 @@ func parseStats(names []string) *configuredStats {
|
||||
parsed.variance = true
|
||||
case "stdev":
|
||||
parsed.stdev = true
|
||||
case "sum":
|
||||
parsed.sum = true
|
||||
|
||||
default:
|
||||
log.Printf("W! Unrecognized basic stat '%s', ignoring", name)
|
||||
@@ -206,6 +217,7 @@ func defaultStats() *configuredStats {
|
||||
defaults.mean = true
|
||||
defaults.variance = true
|
||||
defaults.stdev = true
|
||||
defaults.sum = false
|
||||
|
||||
return defaults
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var m1, _ = metric.New("m1",
|
||||
@@ -250,6 +251,83 @@ func TestBasicStatsWithOnlyMean(t *testing.T) {
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test only aggregating sum
|
||||
func TestBasicStatsWithOnlySum(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"sum"}
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_sum": float64(2),
|
||||
"b_sum": float64(4),
|
||||
"c_sum": float64(6),
|
||||
"d_sum": float64(8),
|
||||
"e_sum": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Verify that sum doesn't suffer from floating point errors. Early
|
||||
// implementations of sum were calulated from mean and count, which
|
||||
// e.g. summed "1, 1, 5, 1" as "7.999999..." instead of 8.
|
||||
func TestBasicStatsWithOnlySumFloatingPointErrata(t *testing.T) {
|
||||
|
||||
var sum1, _ = metric.New("m1",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var sum2, _ = metric.New("m1",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var sum3, _ = metric.New("m1",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"a": int64(5),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var sum4, _ = metric.New("m1",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"sum"}
|
||||
|
||||
aggregator.Add(sum1)
|
||||
aggregator.Add(sum2)
|
||||
aggregator.Add(sum3)
|
||||
aggregator.Add(sum4)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_sum": float64(8),
|
||||
}
|
||||
expectedTags := map[string]string{}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test only aggregating variance
|
||||
func TestBasicStatsWithOnlyVariance(t *testing.T) {
|
||||
|
||||
@@ -328,6 +406,57 @@ func TestBasicStatsWithMinAndMax(t *testing.T) {
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test aggregating with all stats
|
||||
func TestBasicStatsWithAllStats(t *testing.T) {
|
||||
acc := testutil.Accumulator{}
|
||||
minmax := NewBasicStats()
|
||||
minmax.Stats = []string{"count", "min", "max", "mean", "stdev", "s2", "sum"}
|
||||
|
||||
minmax.Add(m1)
|
||||
minmax.Add(m2)
|
||||
minmax.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_count": float64(2), //a
|
||||
"a_max": float64(1),
|
||||
"a_min": float64(1),
|
||||
"a_mean": float64(1),
|
||||
"a_stdev": float64(0),
|
||||
"a_s2": float64(0),
|
||||
"a_sum": float64(2),
|
||||
"b_count": float64(2), //b
|
||||
"b_max": float64(3),
|
||||
"b_min": float64(1),
|
||||
"b_mean": float64(2),
|
||||
"b_s2": float64(2),
|
||||
"b_sum": float64(4),
|
||||
"b_stdev": math.Sqrt(2),
|
||||
"c_count": float64(2), //c
|
||||
"c_max": float64(4),
|
||||
"c_min": float64(2),
|
||||
"c_mean": float64(3),
|
||||
"c_s2": float64(2),
|
||||
"c_stdev": math.Sqrt(2),
|
||||
"c_sum": float64(6),
|
||||
"d_count": float64(2), //d
|
||||
"d_max": float64(6),
|
||||
"d_min": float64(2),
|
||||
"d_mean": float64(4),
|
||||
"d_s2": float64(8),
|
||||
"d_stdev": math.Sqrt(8),
|
||||
"d_sum": float64(8),
|
||||
"e_count": float64(1), //e
|
||||
"e_max": float64(200),
|
||||
"e_min": float64(200),
|
||||
"e_mean": float64(200),
|
||||
"e_sum": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test that if an empty array is passed, no points are pushed
|
||||
func TestBasicStatsWithNoStats(t *testing.T) {
|
||||
|
||||
@@ -357,3 +486,26 @@ func TestBasicStatsWithUnknownStat(t *testing.T) {
|
||||
|
||||
acc.AssertDoesNotContainMeasurement(t, "m1")
|
||||
}
|
||||
|
||||
// Test that if Stats isn't supplied, then we only do count, min, max, mean,
|
||||
// stdev, and s2. We purposely exclude sum for backwards compatability,
|
||||
// otherwise user's working systems will suddenly (and surprisingly) start
|
||||
// capturing sum without their input.
|
||||
func TestBasicStatsWithDefaultStats(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
assert.True(t, acc.HasField("m1", "a_count"))
|
||||
assert.True(t, acc.HasField("m1", "a_min"))
|
||||
assert.True(t, acc.HasField("m1", "a_max"))
|
||||
assert.True(t, acc.HasField("m1", "a_mean"))
|
||||
assert.True(t, acc.HasField("m1", "a_stdev"))
|
||||
assert.True(t, acc.HasField("m1", "a_s2"))
|
||||
assert.False(t, acc.HasField("m1", "a_sum"))
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/graylog"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/hddtemp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/http"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/http_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/http_response"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
|
||||
|
||||
@@ -145,23 +145,25 @@ func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error {
|
||||
go a.process(msgs, acc)
|
||||
|
||||
go func() {
|
||||
err := <-a.conn.NotifyClose(make(chan *amqp.Error))
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("I! AMQP consumer connection closed: %s; trying to reconnect", err)
|
||||
for {
|
||||
msgs, err := a.connect(amqpConf)
|
||||
if err != nil {
|
||||
log.Printf("E! AMQP connection failed: %s", err)
|
||||
time.Sleep(10 * time.Second)
|
||||
continue
|
||||
err := <-a.conn.NotifyClose(make(chan *amqp.Error))
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
a.wg.Add(1)
|
||||
go a.process(msgs, acc)
|
||||
break
|
||||
log.Printf("I! AMQP consumer connection closed: %s; trying to reconnect", err)
|
||||
for {
|
||||
msgs, err := a.connect(amqpConf)
|
||||
if err != nil {
|
||||
log.Printf("E! AMQP connection failed: %s", err)
|
||||
time.Sleep(10 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
a.wg.Add(1)
|
||||
go a.process(msgs, acc)
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -1,43 +1,54 @@
|
||||
# Telegraf Input Plugin: Consul
|
||||
# Consul Input Plugin
|
||||
|
||||
This plugin will collect statistics about all health checks registered in the Consul. It uses [Consul API](https://www.consul.io/docs/agent/http/health.html#health_state)
|
||||
to query the data. It will not report the [telemetry](https://www.consul.io/docs/agent/telemetry.html) but Consul can report those stats already using StatsD protocol if needed.
|
||||
This plugin will collect statistics about all health checks registered in the
|
||||
Consul. It uses [Consul API](https://www.consul.io/docs/agent/http/health.html#health_state)
|
||||
to query the data. It will not report the
|
||||
[telemetry](https://www.consul.io/docs/agent/telemetry.html) but Consul can
|
||||
report those stats already using StatsD protocol if needed.
|
||||
|
||||
## Configuration:
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
```toml
|
||||
# Gather health check statuses from services registered in Consul
|
||||
[[inputs.consul]]
|
||||
## Most of these values defaults to the one configured on a Consul's agent level.
|
||||
## Optional Consul server address (default: "")
|
||||
# address = ""
|
||||
## Optional URI scheme for the Consul server (default: "")
|
||||
# scheme = ""
|
||||
## Optional ACL token used in every request (default: "")
|
||||
## Consul server address
|
||||
# address = "localhost"
|
||||
|
||||
## URI scheme for the Consul server, one of "http", "https"
|
||||
# scheme = "http"
|
||||
|
||||
## ACL token used in every request
|
||||
# token = ""
|
||||
## Optional username used for request HTTP Basic Authentication (default: "")
|
||||
|
||||
## HTTP Basic Authentication username and password.
|
||||
# username = ""
|
||||
## Optional password used for HTTP Basic Authentication (default: "")
|
||||
# password = ""
|
||||
## Optional data centre to query the health checks from (default: "")
|
||||
|
||||
## Data centre to query the health checks from
|
||||
# datacentre = ""
|
||||
|
||||
## SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## If false, skip chain & host verification
|
||||
# insecure_skip_verify = true
|
||||
```
|
||||
|
||||
## Measurements:
|
||||
### Metrics:
|
||||
|
||||
### Consul:
|
||||
Tags:
|
||||
- node: on which node check/service is registered on
|
||||
- service_name: name of the service (this is the service name not the service ID)
|
||||
- check_id
|
||||
|
||||
Fields:
|
||||
- check_name
|
||||
- service_id
|
||||
- status
|
||||
- passing
|
||||
- critical
|
||||
- warning
|
||||
- consul_health_checks
|
||||
- tags:
|
||||
- node (node that check/service is registred on)
|
||||
- service_name
|
||||
- check_id
|
||||
- fields:
|
||||
- check_name
|
||||
- service_id
|
||||
- status
|
||||
- passing (integer)
|
||||
- critical (integer)
|
||||
- warning (integer)
|
||||
|
||||
`passing`, `critical`, and `warning` are integer representations of the health
|
||||
check state. A value of `1` represents that the status was the state of the
|
||||
@@ -46,8 +57,6 @@ the health check at this sample.
|
||||
## Example output
|
||||
|
||||
```
|
||||
$ telegraf --config ./telegraf.conf --input-filter consul --test
|
||||
* Plugin: consul, Collection 1
|
||||
> consul_health_checks,host=wolfpit,node=consul-server-node,check_id="serfHealth" check_name="Serf Health Status",service_id="",status="passing",passing=1i,critical=0i,warning=0i 1464698464486439902
|
||||
> consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com,check_id="service:www-example-com.test01" check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical",passing=0i,critical=1i,warning=0i 1464698464486519036
|
||||
consul_health_checks,host=wolfpit,node=consul-server-node,check_id="serfHealth" check_name="Serf Health Status",service_id="",status="passing",passing=1i,critical=0i,warning=0i 1464698464486439902
|
||||
consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com,check_id="service:www-example-com.test01" check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical",passing=0i,critical=1i,warning=0i 1464698464486519036
|
||||
```
|
||||
|
||||
@@ -31,19 +31,28 @@ type Consul struct {
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## Most of these values defaults to the one configured on a Consul's agent level.
|
||||
## Optional Consul server address (default: "localhost")
|
||||
## Consul server address
|
||||
# address = "localhost"
|
||||
## Optional URI scheme for the Consul server (default: "http")
|
||||
|
||||
## URI scheme for the Consul server, one of "http", "https"
|
||||
# scheme = "http"
|
||||
## Optional ACL token used in every request (default: "")
|
||||
|
||||
## ACL token used in every request
|
||||
# token = ""
|
||||
## Optional username used for request HTTP Basic Authentication (default: "")
|
||||
|
||||
## HTTP Basic Authentication username and password.
|
||||
# username = ""
|
||||
## Optional password used for HTTP Basic Authentication (default: "")
|
||||
# password = ""
|
||||
## Optional data centre to query the health checks from (default: "")
|
||||
|
||||
## Data centre to query the health checks from
|
||||
# datacentre = ""
|
||||
|
||||
## SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## If false, skip chain & host verification
|
||||
# insecure_skip_verify = true
|
||||
`
|
||||
|
||||
func (c *Consul) Description() string {
|
||||
|
||||
@@ -8,11 +8,18 @@ Depending on the work load of your DC/OS cluster, this plugin can quickly
|
||||
create a high number of series which, when unchecked, can cause high load on
|
||||
your database.
|
||||
|
||||
- Use [measurement filtering](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#measurement-filtering) liberally to exclude unneeded metrics as well as the node, container, and app inclue/exclude options.
|
||||
- Write to a database with an appropriate [retention policy](https://docs.influxdata.com/influxdb/v1.3/concepts/glossary/#retention-policy-rp).
|
||||
- Limit the number of series allowed in your database using the `max-series-per-database` and `max-values-per-tag` settings.
|
||||
- Consider enabling the [TSI](https://docs.influxdata.com/influxdb/v1.3/about_the_project/releasenotes-changelog/#release-notes-8) engine.
|
||||
- Monitor your [series cardinality](https://docs.influxdata.com/influxdb/v1.3/troubleshooting/frequently-asked-questions/#how-can-i-query-for-series-cardinality).
|
||||
- Use the
|
||||
[measurement filtering](https://docs.influxdata.com/telegraf/latest/administration/configuration/#measurement-filtering)
|
||||
options to exclude unneeded tags.
|
||||
- Write to a database with an appropriate
|
||||
[retention policy](https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/).
|
||||
- Limit series cardinality in your database using the
|
||||
[`max-series-per-database`](https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000) and
|
||||
[`max-values-per-tag`](https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000) settings.
|
||||
- Consider using the
|
||||
[Time Series Index](https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/).
|
||||
- Monitor your databases
|
||||
[series cardinality](https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality).
|
||||
|
||||
### Configuration:
|
||||
```toml
|
||||
|
||||
@@ -31,6 +31,7 @@ type Client interface {
|
||||
}
|
||||
|
||||
type APIError struct {
|
||||
URL string
|
||||
StatusCode int
|
||||
Title string
|
||||
Description string
|
||||
@@ -105,9 +106,9 @@ type claims struct {
|
||||
|
||||
func (e APIError) Error() string {
|
||||
if e.Description != "" {
|
||||
return fmt.Sprintf("%s: %s", e.Title, e.Description)
|
||||
return fmt.Sprintf("[%s] %s: %s", e.URL, e.Title, e.Description)
|
||||
}
|
||||
return e.Title
|
||||
return fmt.Sprintf("[%s] %s", e.URL, e.Title)
|
||||
}
|
||||
|
||||
func NewClusterClient(
|
||||
@@ -156,7 +157,8 @@ func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthTok
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", c.url("/acs/api/v1/auth/login"), bytes.NewBuffer(octets))
|
||||
loc := c.url("/acs/api/v1/auth/login")
|
||||
req, err := http.NewRequest("POST", loc, bytes.NewBuffer(octets))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -189,6 +191,7 @@ func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthTok
|
||||
err = dec.Decode(loginError)
|
||||
if err != nil {
|
||||
err := &APIError{
|
||||
URL: loc,
|
||||
StatusCode: resp.StatusCode,
|
||||
Title: resp.Status,
|
||||
}
|
||||
@@ -196,6 +199,7 @@ func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthTok
|
||||
}
|
||||
|
||||
err = &APIError{
|
||||
URL: loc,
|
||||
StatusCode: resp.StatusCode,
|
||||
Title: loginError.Title,
|
||||
Description: loginError.Description,
|
||||
@@ -301,6 +305,7 @@ func (c *ClusterClient) doGet(ctx context.Context, url string, v interface{}) er
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
return &APIError{
|
||||
URL: url,
|
||||
StatusCode: resp.StatusCode,
|
||||
Title: resp.Status,
|
||||
}
|
||||
@@ -315,7 +320,7 @@ func (c *ClusterClient) doGet(ctx context.Context, url string, v interface{}) er
|
||||
}
|
||||
|
||||
func (c *ClusterClient) url(path string) string {
|
||||
url := c.clusterURL
|
||||
url := *c.clusterURL
|
||||
url.Path = path
|
||||
return url.String()
|
||||
}
|
||||
|
||||
@@ -31,6 +31,9 @@ P0a+YZUeHNRqT2pPN9lMTAZGGi3CtcF2XScbLNEBeXge
|
||||
)
|
||||
|
||||
func TestLogin(t *testing.T) {
|
||||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
var tests = []struct {
|
||||
name string
|
||||
responseCode int
|
||||
@@ -40,16 +43,21 @@ func TestLogin(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "Login successful",
|
||||
responseCode: 200,
|
||||
responseCode: http.StatusOK,
|
||||
responseBody: `{"token": "XXX.YYY.ZZZ"}`,
|
||||
expectedError: nil,
|
||||
expectedToken: "XXX.YYY.ZZZ",
|
||||
},
|
||||
{
|
||||
name: "Unauthorized Error",
|
||||
responseCode: http.StatusUnauthorized,
|
||||
responseBody: `{"title": "x", "description": "y"}`,
|
||||
expectedError: &APIError{http.StatusUnauthorized, "x", "y"},
|
||||
name: "Unauthorized Error",
|
||||
responseCode: http.StatusUnauthorized,
|
||||
responseBody: `{"title": "x", "description": "y"}`,
|
||||
expectedError: &APIError{
|
||||
URL: ts.URL + "/acs/api/v1/auth/login",
|
||||
StatusCode: http.StatusUnauthorized,
|
||||
Title: "x",
|
||||
Description: "y",
|
||||
},
|
||||
expectedToken: "",
|
||||
},
|
||||
}
|
||||
@@ -59,11 +67,11 @@ func TestLogin(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(tt.responseCode)
|
||||
fmt.Fprintln(w, tt.responseBody)
|
||||
})
|
||||
ts := httptest.NewServer(handler)
|
||||
|
||||
u, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -82,13 +90,14 @@ func TestLogin(t *testing.T) {
|
||||
} else {
|
||||
require.Nil(t, auth)
|
||||
}
|
||||
|
||||
ts.Close()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSummary(t *testing.T) {
|
||||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
var tests = []struct {
|
||||
name string
|
||||
responseCode int
|
||||
@@ -98,7 +107,7 @@ func TestGetSummary(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "No nodes",
|
||||
responseCode: 200,
|
||||
responseCode: http.StatusOK,
|
||||
responseBody: `{"cluster": "a", "slaves": []}`,
|
||||
expectedValue: &Summary{Cluster: "a", Slaves: []Slave{}},
|
||||
expectedError: nil,
|
||||
@@ -108,11 +117,15 @@ func TestGetSummary(t *testing.T) {
|
||||
responseCode: http.StatusUnauthorized,
|
||||
responseBody: `<html></html>`,
|
||||
expectedValue: nil,
|
||||
expectedError: &APIError{StatusCode: http.StatusUnauthorized, Title: "401 Unauthorized"},
|
||||
expectedError: &APIError{
|
||||
URL: ts.URL + "/mesos/master/state-summary",
|
||||
StatusCode: http.StatusUnauthorized,
|
||||
Title: "401 Unauthorized",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Has nodes",
|
||||
responseCode: 200,
|
||||
responseCode: http.StatusOK,
|
||||
responseBody: `{"cluster": "a", "slaves": [{"id": "a"}, {"id": "b"}]}`,
|
||||
expectedValue: &Summary{
|
||||
Cluster: "a",
|
||||
@@ -127,12 +140,12 @@ func TestGetSummary(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// check the path
|
||||
w.WriteHeader(tt.responseCode)
|
||||
fmt.Fprintln(w, tt.responseBody)
|
||||
})
|
||||
ts := httptest.NewServer(handler)
|
||||
|
||||
u, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -142,14 +155,15 @@ func TestGetSummary(t *testing.T) {
|
||||
|
||||
require.Equal(t, tt.expectedError, err)
|
||||
require.Equal(t, tt.expectedValue, summary)
|
||||
|
||||
ts.Close()
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetNodeMetrics(t *testing.T) {
|
||||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
var tests = []struct {
|
||||
name string
|
||||
responseCode int
|
||||
@@ -159,7 +173,7 @@ func TestGetNodeMetrics(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "Empty Body",
|
||||
responseCode: 200,
|
||||
responseCode: http.StatusOK,
|
||||
responseBody: `{}`,
|
||||
expectedValue: &Metrics{},
|
||||
expectedError: nil,
|
||||
@@ -168,12 +182,12 @@ func TestGetNodeMetrics(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// check the path
|
||||
w.WriteHeader(tt.responseCode)
|
||||
fmt.Fprintln(w, tt.responseBody)
|
||||
})
|
||||
ts := httptest.NewServer(handler)
|
||||
|
||||
u, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -183,14 +197,15 @@ func TestGetNodeMetrics(t *testing.T) {
|
||||
|
||||
require.Equal(t, tt.expectedError, err)
|
||||
require.Equal(t, tt.expectedValue, m)
|
||||
|
||||
ts.Close()
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetContainerMetrics(t *testing.T) {
|
||||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
var tests = []struct {
|
||||
name string
|
||||
responseCode int
|
||||
@@ -199,8 +214,8 @@ func TestGetContainerMetrics(t *testing.T) {
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "204 No Contents",
|
||||
responseCode: 204,
|
||||
name: "204 No Content",
|
||||
responseCode: http.StatusNoContent,
|
||||
responseBody: ``,
|
||||
expectedValue: &Metrics{},
|
||||
expectedError: nil,
|
||||
@@ -209,12 +224,12 @@ func TestGetContainerMetrics(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// check the path
|
||||
w.WriteHeader(tt.responseCode)
|
||||
fmt.Fprintln(w, tt.responseBody)
|
||||
})
|
||||
ts := httptest.NewServer(handler)
|
||||
|
||||
u, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -224,8 +239,6 @@ func TestGetContainerMetrics(t *testing.T) {
|
||||
|
||||
require.Equal(t, tt.expectedError, err)
|
||||
require.Equal(t, tt.expectedValue, m)
|
||||
|
||||
ts.Close()
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -31,6 +31,11 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/)
|
||||
container_name_include = []
|
||||
container_name_exclude = []
|
||||
|
||||
## Container states to include and exclude. Globs accepted.
|
||||
## When empty only containers in the "running" state will be captured.
|
||||
# container_state_include = []
|
||||
# container_state_exclude = []
|
||||
|
||||
## Timeout for docker list, info, and stats commands
|
||||
timeout = "5s"
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
@@ -25,7 +26,7 @@ import (
|
||||
// Docker object
|
||||
type Docker struct {
|
||||
Endpoint string
|
||||
ContainerNames []string
|
||||
ContainerNames []string // deprecated in 1.4; use container_name_include
|
||||
|
||||
GatherServices bool `toml:"gather_services"`
|
||||
|
||||
@@ -39,6 +40,9 @@ type Docker struct {
|
||||
ContainerInclude []string `toml:"container_name_include"`
|
||||
ContainerExclude []string `toml:"container_name_exclude"`
|
||||
|
||||
ContainerStateInclude []string `toml:"container_state_include"`
|
||||
ContainerStateExclude []string `toml:"container_state_exclude"`
|
||||
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
@@ -53,6 +57,7 @@ type Docker struct {
|
||||
filtersCreated bool
|
||||
labelFilter filter.Filter
|
||||
containerFilter filter.Filter
|
||||
stateFilter filter.Filter
|
||||
}
|
||||
|
||||
// KB, MB, GB, TB, PB...human friendly
|
||||
@@ -67,7 +72,8 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`)
|
||||
sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`)
|
||||
containerStates = []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"}
|
||||
)
|
||||
|
||||
var sampleConfig = `
|
||||
@@ -87,6 +93,11 @@ var sampleConfig = `
|
||||
container_name_include = []
|
||||
container_name_exclude = []
|
||||
|
||||
## Container states to include and exclude. Globs accepted.
|
||||
## When empty only containers in the "running" state will be captured.
|
||||
# container_state_include = []
|
||||
# container_state_exclude = []
|
||||
|
||||
## Timeout for docker list, info, and stats commands
|
||||
timeout = "5s"
|
||||
|
||||
@@ -148,6 +159,10 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = d.createContainerStateFilters()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.filtersCreated = true
|
||||
}
|
||||
|
||||
@@ -164,8 +179,22 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
}
|
||||
|
||||
filterArgs := filters.NewArgs()
|
||||
for _, state := range containerStates {
|
||||
if d.stateFilter.Match(state) {
|
||||
filterArgs.Add("status", state)
|
||||
}
|
||||
}
|
||||
|
||||
// All container states were excluded
|
||||
if filterArgs.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// List containers
|
||||
opts := types.ContainerListOptions{}
|
||||
opts := types.ContainerListOptions{
|
||||
Filters: filterArgs,
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||
defer cancel()
|
||||
containers, err := d.client.ContainerList(ctx, opts)
|
||||
@@ -768,6 +797,18 @@ func (d *Docker) createLabelFilters() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Docker) createContainerStateFilters() error {
|
||||
if len(d.ContainerStateInclude) == 0 && len(d.ContainerStateExclude) == 0 {
|
||||
d.ContainerStateInclude = []string{"running"}
|
||||
}
|
||||
filter, err := filter.NewIncludeExcludeFilter(d.ContainerStateInclude, d.ContainerStateExclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.stateFilter = filter
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("docker", func() telegraf.Input {
|
||||
return &Docker{
|
||||
|
||||
@@ -3,6 +3,7 @@ package docker
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
@@ -711,3 +712,85 @@ func TestDockerGatherSwarmInfo(t *testing.T) {
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestContainerStateFilter(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
include []string
|
||||
exclude []string
|
||||
expected map[string][]string
|
||||
}{
|
||||
{
|
||||
name: "default",
|
||||
expected: map[string][]string{
|
||||
"status": []string{"running"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "include running",
|
||||
include: []string{"running"},
|
||||
expected: map[string][]string{
|
||||
"status": []string{"running"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "include glob",
|
||||
include: []string{"r*"},
|
||||
expected: map[string][]string{
|
||||
"status": []string{"restarting", "running", "removing"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "include all",
|
||||
include: []string{"*"},
|
||||
expected: map[string][]string{
|
||||
"status": []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "exclude all",
|
||||
exclude: []string{"*"},
|
||||
expected: map[string][]string{
|
||||
"status": []string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "exclude all",
|
||||
include: []string{"*"},
|
||||
exclude: []string{"exited"},
|
||||
expected: map[string][]string{
|
||||
"status": []string{"created", "restarting", "running", "removing", "paused", "dead"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
newClientFunc := func(host string, tlsConfig *tls.Config) (Client, error) {
|
||||
client := baseClient
|
||||
client.ContainerListF = func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
|
||||
for k, v := range tt.expected {
|
||||
actual := options.Filters.Get(k)
|
||||
sort.Strings(actual)
|
||||
sort.Strings(v)
|
||||
require.Equal(t, v, actual)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
d := Docker{
|
||||
newClient: newClientFunc,
|
||||
ContainerStateInclude: tt.include,
|
||||
ContainerStateExclude: tt.exclude,
|
||||
}
|
||||
|
||||
err := d.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/curre
|
||||
|
||||
## node_stats is a list of sub-stats that you want to have gathered. Valid options
|
||||
## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
|
||||
## "breakers". Per default, all stats are gathered.
|
||||
## "breaker". Per default, all stats are gathered.
|
||||
# node_stats = ["jvm", "http"]
|
||||
|
||||
## Optional SSL Config
|
||||
|
||||
@@ -105,7 +105,7 @@ const sampleConfig = `
|
||||
|
||||
## node_stats is a list of sub-stats that you want to have gathered. Valid options
|
||||
## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
|
||||
## "breakers". Per default, all stats are gathered.
|
||||
## "breaker". Per default, all stats are gathered.
|
||||
# node_stats = ["jvm", "http"]
|
||||
|
||||
## Optional SSL Config
|
||||
|
||||
@@ -41,6 +41,8 @@ const sampleConfig = `
|
||||
data_format = "influx"
|
||||
`
|
||||
|
||||
const MaxStderrBytes = 512
|
||||
|
||||
type Exec struct {
|
||||
Commands []string
|
||||
Command string
|
||||
@@ -96,15 +98,41 @@ func (c CommandRunner) Run(
|
||||
|
||||
cmd := exec.Command(split_cmd[0], split_cmd[1:]...)
|
||||
|
||||
var out bytes.Buffer
|
||||
var (
|
||||
out bytes.Buffer
|
||||
stderr bytes.Buffer
|
||||
)
|
||||
cmd.Stdout = &out
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
if err := internal.RunTimeout(cmd, e.Timeout.Duration); err != nil {
|
||||
switch e.parser.(type) {
|
||||
case *nagios.NagiosParser:
|
||||
AddNagiosState(err, acc)
|
||||
default:
|
||||
return nil, fmt.Errorf("exec: %s for command '%s'", err, command)
|
||||
var errMessage = ""
|
||||
if stderr.Len() > 0 {
|
||||
stderr = removeCarriageReturns(stderr)
|
||||
// Limit the number of bytes.
|
||||
didTruncate := false
|
||||
if stderr.Len() > MaxStderrBytes {
|
||||
stderr.Truncate(MaxStderrBytes)
|
||||
didTruncate = true
|
||||
}
|
||||
if i := bytes.IndexByte(stderr.Bytes(), '\n'); i > 0 {
|
||||
// Only show truncation if the newline wasn't the last character.
|
||||
if i < stderr.Len()-1 {
|
||||
didTruncate = true
|
||||
}
|
||||
stderr.Truncate(i)
|
||||
}
|
||||
if didTruncate {
|
||||
stderr.WriteString("...")
|
||||
}
|
||||
|
||||
errMessage = fmt.Sprintf(": %s", stderr.String())
|
||||
}
|
||||
return nil, fmt.Errorf("exec: %s for command '%s'%s", err, command, errMessage)
|
||||
}
|
||||
} else {
|
||||
switch e.parser.(type) {
|
||||
|
||||
@@ -144,83 +144,6 @@ func TestCommandError(t *testing.T) {
|
||||
assert.Equal(t, acc.NFields(), 0, "No new points should have been added")
|
||||
}
|
||||
|
||||
func TestLineProtocolParse(t *testing.T) {
|
||||
parser, _ := parsers.NewInfluxParser()
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(lineProtocol), nil),
|
||||
Commands: []string{"line-protocol"},
|
||||
parser: parser,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, acc.GatherError(e.Gather))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cpu", fields, tags)
|
||||
}
|
||||
|
||||
func TestLineProtocolEmptyParse(t *testing.T) {
|
||||
parser, _ := parsers.NewInfluxParser()
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(lineProtocolEmpty), nil),
|
||||
Commands: []string{"line-protocol"},
|
||||
parser: parser,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestLineProtocolShortParse(t *testing.T) {
|
||||
parser, _ := parsers.NewInfluxParser()
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(lineProtocolShort), nil),
|
||||
Commands: []string{"line-protocol"},
|
||||
parser: parser,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(e.Gather)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "buffer too short", "A buffer too short error was expected")
|
||||
}
|
||||
|
||||
func TestLineProtocolParseMultiple(t *testing.T) {
|
||||
parser, _ := parsers.NewInfluxParser()
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(lineProtocolMulti), nil),
|
||||
Commands: []string{"line-protocol"},
|
||||
parser: parser,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(e.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
}
|
||||
cpuTags := []string{"cpu0", "cpu1", "cpu2", "cpu3", "cpu4", "cpu5", "cpu6"}
|
||||
|
||||
for _, cpu := range cpuTags {
|
||||
tags["cpu"] = cpu
|
||||
acc.AssertContainsTaggedFields(t, "cpu", fields, tags)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecCommandWithGlob(t *testing.T) {
|
||||
parser, _ := parsers.NewValueParser("metric", "string", nil)
|
||||
e := NewExec()
|
||||
|
||||
52
plugins/inputs/http/README.md
Normal file
52
plugins/inputs/http/README.md
Normal file
@@ -0,0 +1,52 @@
|
||||
# HTTP Input Plugin
|
||||
|
||||
The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The endpoint should have metrics formatted in one of the supported [input data formats](../../../docs/DATA_FORMATS_INPUT.md). Each data format has its own unique set of configuration options which can be added to the input configuration.
|
||||
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Read formatted metrics from one or more HTTP endpoints
|
||||
[[inputs.http]]
|
||||
## One or more URLs from which to read formatted metrics
|
||||
urls = [
|
||||
"http://localhost/metrics"
|
||||
]
|
||||
|
||||
## HTTP method
|
||||
# method = "GET"
|
||||
|
||||
## Optional HTTP headers
|
||||
# headers = {"X-Special-Header" = "Special-Value"}
|
||||
|
||||
## Optional HTTP Basic Auth Credentials
|
||||
# username = "username"
|
||||
# password = "pa$$word"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Amount of time allowed to complete the HTTP request
|
||||
# timeout = "5s"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# data_format = "influx"
|
||||
|
||||
```
|
||||
|
||||
### Metrics:
|
||||
|
||||
The metrics collected by this input plugin will depend on the configured `data_format` and the payload returned by the HTTP endpoint(s).
|
||||
|
||||
The default values below are added if the input format does not specify a value:
|
||||
|
||||
- http
|
||||
- tags:
|
||||
- url
|
||||
204
plugins/inputs/http/http.go
Normal file
204
plugins/inputs/http/http.go
Normal file
@@ -0,0 +1,204 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
)
|
||||
|
||||
type HTTP struct {
|
||||
URLs []string `toml:"urls"`
|
||||
Method string
|
||||
|
||||
Headers map[string]string
|
||||
|
||||
// HTTP Basic Auth Credentials
|
||||
Username string
|
||||
Password string
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
|
||||
Timeout internal.Duration
|
||||
|
||||
client *http.Client
|
||||
|
||||
// The parser will automatically be set by Telegraf core code because
|
||||
// this plugin implements the ParserInput interface (i.e. the SetParser method)
|
||||
parser parsers.Parser
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## One or more URLs from which to read formatted metrics
|
||||
urls = [
|
||||
"http://localhost/metrics"
|
||||
]
|
||||
|
||||
## HTTP method
|
||||
# method = "GET"
|
||||
|
||||
## Optional HTTP headers
|
||||
# headers = {"X-Special-Header" = "Special-Value"}
|
||||
|
||||
## Optional HTTP Basic Auth Credentials
|
||||
# username = "username"
|
||||
# password = "pa$$word"
|
||||
|
||||
## Tag all metrics with the url
|
||||
# tag_url = true
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Amount of time allowed to complete the HTTP request
|
||||
# timeout = "5s"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# data_format = "influx"
|
||||
`
|
||||
|
||||
// SampleConfig returns the default configuration of the Input
|
||||
func (*HTTP) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
// Description returns a one-sentence description on the Input
|
||||
func (*HTTP) Description() string {
|
||||
return "Read formatted metrics from one or more HTTP endpoints"
|
||||
}
|
||||
|
||||
// Gather takes in an accumulator and adds the metrics that the Input
|
||||
// gathers. This is called every "interval"
|
||||
func (h *HTTP) Gather(acc telegraf.Accumulator) error {
|
||||
if h.parser == nil {
|
||||
return errors.New("Parser is not set")
|
||||
}
|
||||
|
||||
if h.client == nil {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
h.SSLCert, h.SSLKey, h.SSLCA, h.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.client = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: tlsCfg,
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
},
|
||||
Timeout: h.Timeout.Duration,
|
||||
}
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, u := range h.URLs {
|
||||
wg.Add(1)
|
||||
go func(url string) {
|
||||
defer wg.Done()
|
||||
if err := h.gatherURL(acc, url); err != nil {
|
||||
acc.AddError(fmt.Errorf("[url=%s]: %s", url, err))
|
||||
}
|
||||
}(u)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetParser takes the data_format from the config and finds the right parser for that format
|
||||
func (h *HTTP) SetParser(parser parsers.Parser) {
|
||||
h.parser = parser
|
||||
}
|
||||
|
||||
// Gathers data from a particular URL
|
||||
// Parameters:
|
||||
// acc : The telegraf Accumulator to use
|
||||
// url : endpoint to send request to
|
||||
//
|
||||
// Returns:
|
||||
// error: Any error that may have occurred
|
||||
func (h *HTTP) gatherURL(
|
||||
acc telegraf.Accumulator,
|
||||
url string,
|
||||
) error {
|
||||
request, err := http.NewRequest(h.Method, url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for k, v := range h.Headers {
|
||||
if strings.ToLower(k) == "host" {
|
||||
request.Host = v
|
||||
} else {
|
||||
request.Header.Add(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
if h.Username != "" || h.Password != "" {
|
||||
request.SetBasicAuth(h.Username, h.Password)
|
||||
}
|
||||
|
||||
resp, err := h.client.Do(request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("Received status code %d (%s), expected %d (%s)",
|
||||
resp.StatusCode,
|
||||
http.StatusText(resp.StatusCode),
|
||||
http.StatusOK,
|
||||
http.StatusText(http.StatusOK))
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
metrics, err := h.parser.Parse(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, metric := range metrics {
|
||||
if !metric.HasTag("url") {
|
||||
metric.AddTag("url", url)
|
||||
}
|
||||
acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("http", func() telegraf.Input {
|
||||
return &HTTP{
|
||||
Timeout: internal.Duration{Duration: time.Second * 5},
|
||||
Method: "GET",
|
||||
}
|
||||
})
|
||||
}
|
||||
139
plugins/inputs/http/http_test.go
Normal file
139
plugins/inputs/http/http_test.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package http_test
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
plugin "github.com/influxdata/telegraf/plugins/inputs/http"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestHTTPwithJSONFormat(t *testing.T) {
|
||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/endpoint" {
|
||||
_, _ = w.Write([]byte(simpleJSON))
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer fakeServer.Close()
|
||||
|
||||
url := fakeServer.URL + "/endpoint"
|
||||
plugin := &plugin.HTTP{
|
||||
URLs: []string{url},
|
||||
}
|
||||
metricName := "metricName"
|
||||
p, _ := parsers.NewJSONParser(metricName, nil, nil)
|
||||
plugin.SetParser(p)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, acc.GatherError(plugin.Gather))
|
||||
|
||||
require.Len(t, acc.Metrics, 1)
|
||||
|
||||
// basic check to see if we got the right field, value and tag
|
||||
var metric = acc.Metrics[0]
|
||||
require.Equal(t, metric.Measurement, metricName)
|
||||
require.Len(t, acc.Metrics[0].Fields, 1)
|
||||
require.Equal(t, acc.Metrics[0].Fields["a"], 1.2)
|
||||
require.Equal(t, acc.Metrics[0].Tags["url"], url)
|
||||
}
|
||||
|
||||
func TestHTTPHeaders(t *testing.T) {
|
||||
header := "X-Special-Header"
|
||||
headerValue := "Special-Value"
|
||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/endpoint" {
|
||||
if r.Header.Get(header) == headerValue {
|
||||
_, _ = w.Write([]byte(simpleJSON))
|
||||
} else {
|
||||
w.WriteHeader(http.StatusForbidden)
|
||||
}
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer fakeServer.Close()
|
||||
|
||||
url := fakeServer.URL + "/endpoint"
|
||||
plugin := &plugin.HTTP{
|
||||
URLs: []string{url},
|
||||
Headers: map[string]string{header: headerValue},
|
||||
}
|
||||
metricName := "metricName"
|
||||
p, _ := parsers.NewJSONParser(metricName, nil, nil)
|
||||
plugin.SetParser(p)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, acc.GatherError(plugin.Gather))
|
||||
}
|
||||
|
||||
func TestInvalidStatusCode(t *testing.T) {
|
||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}))
|
||||
defer fakeServer.Close()
|
||||
|
||||
url := fakeServer.URL + "/endpoint"
|
||||
plugin := &plugin.HTTP{
|
||||
URLs: []string{url},
|
||||
}
|
||||
|
||||
metricName := "metricName"
|
||||
p, _ := parsers.NewJSONParser(metricName, nil, nil)
|
||||
plugin.SetParser(p)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.Error(t, acc.GatherError(plugin.Gather))
|
||||
}
|
||||
|
||||
func TestMethod(t *testing.T) {
|
||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method == "POST" {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer fakeServer.Close()
|
||||
|
||||
plugin := &plugin.HTTP{
|
||||
URLs: []string{fakeServer.URL},
|
||||
Method: "POST",
|
||||
}
|
||||
|
||||
metricName := "metricName"
|
||||
p, _ := parsers.NewJSONParser(metricName, nil, nil)
|
||||
plugin.SetParser(p)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, acc.GatherError(plugin.Gather))
|
||||
}
|
||||
|
||||
func TestParserNotSet(t *testing.T) {
|
||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/endpoint" {
|
||||
_, _ = w.Write([]byte(simpleJSON))
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer fakeServer.Close()
|
||||
|
||||
url := fakeServer.URL + "/endpoint"
|
||||
plugin := &plugin.HTTP{
|
||||
URLs: []string{url},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.Error(t, acc.GatherError(plugin.Gather))
|
||||
}
|
||||
|
||||
const simpleJSON = `
|
||||
{
|
||||
"a": 1.2
|
||||
}
|
||||
`
|
||||
@@ -12,6 +12,8 @@ Enable TLS by specifying the file names of a service TLS certificate and key.
|
||||
|
||||
Enable mutually authenticated TLS and authorize client connections by signing certificate authority by including a list of allowed CA certificate file names in ````tls_allowed_cacerts````.
|
||||
|
||||
Enable basic HTTP authentication of clients by specifying a username and password to check for. These credentials will be received from the client _as plain text_ if TLS is not configured.
|
||||
|
||||
See: [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx).
|
||||
|
||||
**Example:**
|
||||
@@ -39,4 +41,8 @@ This is a sample configuration for the plugin.
|
||||
|
||||
## MTLS
|
||||
tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
|
||||
|
||||
## Basic authentication
|
||||
basic_username = "foobar"
|
||||
basic_password = "barfoo"
|
||||
```
|
||||
|
||||
@@ -3,6 +3,7 @@ package http_listener
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/subtle"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"io"
|
||||
@@ -32,6 +33,8 @@ const (
|
||||
DEFAULT_MAX_LINE_SIZE = 64 * 1024
|
||||
)
|
||||
|
||||
type TimeFunc func() time.Time
|
||||
|
||||
type HTTPListener struct {
|
||||
ServiceAddress string
|
||||
ReadTimeout internal.Duration
|
||||
@@ -44,14 +47,20 @@ type HTTPListener struct {
|
||||
TlsCert string
|
||||
TlsKey string
|
||||
|
||||
BasicUsername string
|
||||
BasicPassword string
|
||||
|
||||
TimeFunc
|
||||
|
||||
mu sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
|
||||
listener net.Listener
|
||||
|
||||
parser influx.InfluxParser
|
||||
acc telegraf.Accumulator
|
||||
pool *pool
|
||||
handler *influx.MetricHandler
|
||||
parser *influx.Parser
|
||||
acc telegraf.Accumulator
|
||||
pool *pool
|
||||
|
||||
BytesRecv selfstat.Stat
|
||||
RequestsServed selfstat.Stat
|
||||
@@ -64,6 +73,7 @@ type HTTPListener struct {
|
||||
PingsRecv selfstat.Stat
|
||||
NotFoundsServed selfstat.Stat
|
||||
BuffersCreated selfstat.Stat
|
||||
AuthFailures selfstat.Stat
|
||||
}
|
||||
|
||||
const sampleConfig = `
|
||||
@@ -90,6 +100,11 @@ const sampleConfig = `
|
||||
## Add service certificate and key
|
||||
tls_cert = "/etc/telegraf/cert.pem"
|
||||
tls_key = "/etc/telegraf/key.pem"
|
||||
|
||||
## Optional username and password to accept for HTTP basic authentication.
|
||||
## You probably want to make sure you have TLS configured above for this.
|
||||
# basic_username = "foobar"
|
||||
# basic_password = "barfoo"
|
||||
`
|
||||
|
||||
func (h *HTTPListener) SampleConfig() string {
|
||||
@@ -124,6 +139,7 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
|
||||
h.PingsRecv = selfstat.Register("http_listener", "pings_received", tags)
|
||||
h.NotFoundsServed = selfstat.Register("http_listener", "not_founds_served", tags)
|
||||
h.BuffersCreated = selfstat.Register("http_listener", "buffers_created", tags)
|
||||
h.AuthFailures = selfstat.Register("http_listener", "auth_failures", tags)
|
||||
|
||||
if h.MaxBodySize == 0 {
|
||||
h.MaxBodySize = DEFAULT_MAX_BODY_SIZE
|
||||
@@ -165,6 +181,9 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
|
||||
h.listener = listener
|
||||
h.Port = listener.Addr().(*net.TCPAddr).Port
|
||||
|
||||
h.handler = influx.NewMetricHandler()
|
||||
h.parser = influx.NewParser(h.handler)
|
||||
|
||||
h.wg.Add(1)
|
||||
go func() {
|
||||
defer h.wg.Done()
|
||||
@@ -194,25 +213,29 @@ func (h *HTTPListener) ServeHTTP(res http.ResponseWriter, req *http.Request) {
|
||||
case "/write":
|
||||
h.WritesRecv.Incr(1)
|
||||
defer h.WritesServed.Incr(1)
|
||||
h.serveWrite(res, req)
|
||||
h.AuthenticateIfSet(h.serveWrite, res, req)
|
||||
case "/query":
|
||||
h.QueriesRecv.Incr(1)
|
||||
defer h.QueriesServed.Incr(1)
|
||||
// Deliver a dummy response to the query endpoint, as some InfluxDB
|
||||
// clients test endpoint availability with a query
|
||||
res.Header().Set("Content-Type", "application/json")
|
||||
res.Header().Set("X-Influxdb-Version", "1.0")
|
||||
res.WriteHeader(http.StatusOK)
|
||||
res.Write([]byte("{\"results\":[]}"))
|
||||
h.AuthenticateIfSet(func(res http.ResponseWriter, req *http.Request) {
|
||||
res.Header().Set("Content-Type", "application/json")
|
||||
res.Header().Set("X-Influxdb-Version", "1.0")
|
||||
res.WriteHeader(http.StatusOK)
|
||||
res.Write([]byte("{\"results\":[]}"))
|
||||
}, res, req)
|
||||
case "/ping":
|
||||
h.PingsRecv.Incr(1)
|
||||
defer h.PingsServed.Incr(1)
|
||||
// respond to ping requests
|
||||
res.WriteHeader(http.StatusNoContent)
|
||||
h.AuthenticateIfSet(func(res http.ResponseWriter, req *http.Request) {
|
||||
res.WriteHeader(http.StatusNoContent)
|
||||
}, res, req)
|
||||
default:
|
||||
defer h.NotFoundsServed.Incr(1)
|
||||
// Don't know how to respond to calls to other endpoints
|
||||
http.NotFound(res, req)
|
||||
h.AuthenticateIfSet(http.NotFound, res, req)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -222,7 +245,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
|
||||
tooLarge(res)
|
||||
return
|
||||
}
|
||||
now := time.Now()
|
||||
now := h.TimeFunc()
|
||||
|
||||
precision := req.URL.Query().Get("precision")
|
||||
|
||||
@@ -321,7 +344,12 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
|
||||
}
|
||||
|
||||
func (h *HTTPListener) parse(b []byte, t time.Time, precision string) error {
|
||||
metrics, err := h.parser.ParseWithDefaultTimePrecision(b, t, precision)
|
||||
h.handler.SetTimePrecision(getPrecisionMultiplier(precision))
|
||||
h.handler.SetTimeFunc(func() time.Time { return t })
|
||||
metrics, err := h.parser.Parse(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, m := range metrics {
|
||||
h.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
|
||||
@@ -376,10 +404,45 @@ func (h *HTTPListener) getTLSConfig() *tls.Config {
|
||||
return tlsConf
|
||||
}
|
||||
|
||||
func (h *HTTPListener) AuthenticateIfSet(handler http.HandlerFunc, res http.ResponseWriter, req *http.Request) {
|
||||
if h.BasicUsername != "" && h.BasicPassword != "" {
|
||||
reqUsername, reqPassword, ok := req.BasicAuth()
|
||||
if !ok ||
|
||||
subtle.ConstantTimeCompare([]byte(reqUsername), []byte(h.BasicUsername)) != 1 ||
|
||||
subtle.ConstantTimeCompare([]byte(reqPassword), []byte(h.BasicPassword)) != 1 {
|
||||
|
||||
h.AuthFailures.Incr(1)
|
||||
http.Error(res, "Unauthorized.", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
handler(res, req)
|
||||
} else {
|
||||
handler(res, req)
|
||||
}
|
||||
}
|
||||
|
||||
func getPrecisionMultiplier(precision string) time.Duration {
|
||||
d := time.Nanosecond
|
||||
switch precision {
|
||||
case "u":
|
||||
d = time.Microsecond
|
||||
case "ms":
|
||||
d = time.Millisecond
|
||||
case "s":
|
||||
d = time.Second
|
||||
case "m":
|
||||
d = time.Minute
|
||||
case "h":
|
||||
d = time.Hour
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("http_listener", func() telegraf.Input {
|
||||
return &HTTPListener{
|
||||
ServiceAddress: ":8186",
|
||||
TimeFunc: time.Now,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,6 +1,6 @@
|
||||
# Example Input Plugin
|
||||
# HTTP Response Input Plugin
|
||||
|
||||
This input plugin will test HTTP/HTTPS connections.
|
||||
This input plugin checks HTTP/HTTPS connections.
|
||||
|
||||
### Configuration:
|
||||
|
||||
@@ -10,6 +10,9 @@ This input plugin will test HTTP/HTTPS connections.
|
||||
## Server address (default http://localhost)
|
||||
# address = "http://localhost"
|
||||
|
||||
## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set)
|
||||
# http_proxy = "http://localhost:8888"
|
||||
|
||||
## Set response_timeout (default 5 seconds)
|
||||
# response_timeout = "5s"
|
||||
|
||||
@@ -41,21 +44,38 @@ This input plugin will test HTTP/HTTPS connections.
|
||||
# Host = "github.com"
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
### Metrics:
|
||||
|
||||
- http_response
|
||||
- tags:
|
||||
- server (target URL)
|
||||
- method (request method)
|
||||
- status_code (response status code)
|
||||
- result ([see below](#result--result_code))
|
||||
- fields:
|
||||
- response_time (float, seconds)
|
||||
- http_response_code (int) #The code received
|
||||
- result_type (string) # success, timeout, response_string_mismatch, connection_failed
|
||||
- http_response_code (int, response status code)
|
||||
- result_type (string, deprecated in 1.6: use `result` tag and `result_code` field)
|
||||
- result_code (int, [see below](#result--result_code))
|
||||
|
||||
### Tags:
|
||||
#### `result` / `result_code`
|
||||
|
||||
Upon finishing polling the target server, the plugin registers the result of the operation in the `result` tag, and adds a numeric field called `result_code` corresponding with that tag value.
|
||||
|
||||
This tag is used to expose network and plugin errors. HTTP errors are considered a successful connection.
|
||||
|
||||
|Tag value |Corresponding field value|Description|
|
||||
--------------------------|-------------------------|-----------|
|
||||
|success | 0 |The HTTP request completed, even if the HTTP code represents an error|
|
||||
|response_string_mismatch | 1 |The option `response_string_match` was used, and the body of the response didn't match the regex|
|
||||
|body_read_error | 2 |The option `response_string_match` was used, but the plugin wans't able to read the body of the response. Responses with empty bodies (like 3xx, HEAD, etc) will trigger this error|
|
||||
|connection_failed | 3 |Catch all for any network error not specifically handled by the plugin|
|
||||
|timeout | 4 |The plugin timed out while awaiting the HTTP connection to complete|
|
||||
|dns_error | 5 |There was a DNS error while attempting to connect to the host|
|
||||
|
||||
- All measurements have the following tags:
|
||||
- server
|
||||
- method
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
http_response,method=GET,server=http://www.github.com http_response_code=200i,response_time=6.223266528 1459419354977857955
|
||||
http_response,method=GET,server=http://www.github.com,status_code=200,result=success http_response_code=200i,response_time=6.223266528,result_type="success",result_code=0i 1459419354977857955
|
||||
```
|
||||
|
||||
@@ -2,6 +2,7 @@ package http_response
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
@@ -9,6 +10,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -20,6 +22,7 @@ import (
|
||||
// HTTPResponse struct
|
||||
type HTTPResponse struct {
|
||||
Address string
|
||||
HTTPProxy string `toml:"http_proxy"`
|
||||
Body string
|
||||
Method string
|
||||
ResponseTimeout internal.Duration
|
||||
@@ -49,6 +52,9 @@ var sampleConfig = `
|
||||
## Server address (default http://localhost)
|
||||
# address = "http://localhost"
|
||||
|
||||
## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set)
|
||||
# http_proxy = "http://localhost:8888"
|
||||
|
||||
## Set response_timeout (default 5 seconds)
|
||||
# response_timeout = "5s"
|
||||
|
||||
@@ -88,6 +94,22 @@ func (h *HTTPResponse) SampleConfig() string {
|
||||
// ErrRedirectAttempted indicates that a redirect occurred
|
||||
var ErrRedirectAttempted = errors.New("redirect")
|
||||
|
||||
// Set the proxy. A configured proxy overwrites the system wide proxy.
|
||||
func getProxyFunc(http_proxy string) func(*http.Request) (*url.URL, error) {
|
||||
if http_proxy == "" {
|
||||
return http.ProxyFromEnvironment
|
||||
}
|
||||
proxyURL, err := url.Parse(http_proxy)
|
||||
if err != nil {
|
||||
return func(_ *http.Request) (*url.URL, error) {
|
||||
return nil, errors.New("bad proxy: " + err.Error())
|
||||
}
|
||||
}
|
||||
return func(r *http.Request) (*url.URL, error) {
|
||||
return proxyURL, nil
|
||||
}
|
||||
}
|
||||
|
||||
// CreateHttpClient creates an http client which will timeout at the specified
|
||||
// timeout period and can follow redirects if specified
|
||||
func (h *HTTPResponse) createHttpClient() (*http.Client, error) {
|
||||
@@ -98,7 +120,7 @@ func (h *HTTPResponse) createHttpClient() (*http.Client, error) {
|
||||
}
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Proxy: getProxyFunc(h.HTTPProxy),
|
||||
DisableKeepAlives: true,
|
||||
TLSClientConfig: tlsCfg,
|
||||
},
|
||||
@@ -113,10 +135,54 @@ func (h *HTTPResponse) createHttpClient() (*http.Client, error) {
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func setResult(result_string string, fields map[string]interface{}, tags map[string]string) {
|
||||
result_codes := map[string]int{
|
||||
"success": 0,
|
||||
"response_string_mismatch": 1,
|
||||
"body_read_error": 2,
|
||||
"connection_failed": 3,
|
||||
"timeout": 4,
|
||||
"dns_error": 5,
|
||||
}
|
||||
|
||||
tags["result"] = result_string
|
||||
fields["result_type"] = result_string
|
||||
fields["result_code"] = result_codes[result_string]
|
||||
}
|
||||
|
||||
func setError(err error, fields map[string]interface{}, tags map[string]string) error {
|
||||
if timeoutError, ok := err.(net.Error); ok && timeoutError.Timeout() {
|
||||
setResult("timeout", fields, tags)
|
||||
return timeoutError
|
||||
}
|
||||
|
||||
urlErr, isUrlErr := err.(*url.Error)
|
||||
if !isUrlErr {
|
||||
return nil
|
||||
}
|
||||
|
||||
opErr, isNetErr := (urlErr.Err).(*net.OpError)
|
||||
if isNetErr {
|
||||
switch e := (opErr.Err).(type) {
|
||||
case (*net.DNSError):
|
||||
setResult("dns_error", fields, tags)
|
||||
return e
|
||||
case (*net.ParseError):
|
||||
// Parse error has to do with parsing of IP addresses, so we
|
||||
// group it with address errors
|
||||
setResult("address_error", fields, tags)
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HTTPGather gathers all fields and returns any errors it encounters
|
||||
func (h *HTTPResponse) httpGather() (map[string]interface{}, error) {
|
||||
// Prepare fields
|
||||
func (h *HTTPResponse) httpGather() (map[string]interface{}, map[string]string, error) {
|
||||
// Prepare fields and tags
|
||||
fields := make(map[string]interface{})
|
||||
tags := map[string]string{"server": h.Address, "method": h.Method}
|
||||
|
||||
var body io.Reader
|
||||
if h.Body != "" {
|
||||
@@ -124,7 +190,7 @@ func (h *HTTPResponse) httpGather() (map[string]interface{}, error) {
|
||||
}
|
||||
request, err := http.NewRequest(h.Method, h.Address, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for key, val := range h.Headers {
|
||||
@@ -137,68 +203,87 @@ func (h *HTTPResponse) httpGather() (map[string]interface{}, error) {
|
||||
// Start Timer
|
||||
start := time.Now()
|
||||
resp, err := h.client.Do(request)
|
||||
response_time := time.Since(start).Seconds()
|
||||
|
||||
// If an error in returned, it means we are dealing with a network error, as
|
||||
// HTTP error codes do not generate errors in the net/http library
|
||||
if err != nil {
|
||||
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
||||
fields["result_type"] = "timeout"
|
||||
return fields, nil
|
||||
// Log error
|
||||
log.Printf("D! Network error while polling %s: %s", h.Address, err.Error())
|
||||
|
||||
// Get error details
|
||||
netErr := setError(err, fields, tags)
|
||||
|
||||
// If recognize the returnded error, get out
|
||||
if netErr != nil {
|
||||
return fields, tags, nil
|
||||
}
|
||||
fields["result_type"] = "connection_failed"
|
||||
if h.FollowRedirects {
|
||||
return fields, nil
|
||||
}
|
||||
if urlError, ok := err.(*url.Error); ok &&
|
||||
urlError.Err == ErrRedirectAttempted {
|
||||
|
||||
// Any error not recognized by `set_error` is considered a "connection_failed"
|
||||
setResult("connection_failed", fields, tags)
|
||||
|
||||
// If the error is a redirect we continue processing and log the HTTP code
|
||||
urlError, isUrlError := err.(*url.Error)
|
||||
if !h.FollowRedirects && isUrlError && urlError.Err == ErrRedirectAttempted {
|
||||
err = nil
|
||||
} else {
|
||||
return fields, nil
|
||||
// If the error isn't a timeout or a redirect stop
|
||||
// processing the request
|
||||
return fields, tags, nil
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := fields["response_time"]; !ok {
|
||||
fields["response_time"] = response_time
|
||||
}
|
||||
|
||||
// This function closes the response body, as
|
||||
// required by the net/http library
|
||||
defer func() {
|
||||
io.Copy(ioutil.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
}()
|
||||
|
||||
fields["response_time"] = time.Since(start).Seconds()
|
||||
// Set log the HTTP response code
|
||||
tags["status_code"] = strconv.Itoa(resp.StatusCode)
|
||||
fields["http_response_code"] = resp.StatusCode
|
||||
|
||||
// Check the response for a regex match.
|
||||
if h.ResponseStringMatch != "" {
|
||||
|
||||
// Compile once and reuse
|
||||
if h.compiledStringMatch == nil {
|
||||
h.compiledStringMatch = regexp.MustCompile(h.ResponseStringMatch)
|
||||
if err != nil {
|
||||
log.Printf("E! Failed to compile regular expression %s : %s", h.ResponseStringMatch, err)
|
||||
fields["result_type"] = "response_string_mismatch"
|
||||
return fields, nil
|
||||
}
|
||||
}
|
||||
|
||||
bodyBytes, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Printf("E! Failed to read body of HTTP Response : %s", err)
|
||||
fields["result_type"] = "response_string_mismatch"
|
||||
log.Printf("D! Failed to read body of HTTP Response : %s", err)
|
||||
setResult("body_read_error", fields, tags)
|
||||
fields["response_string_match"] = 0
|
||||
return fields, nil
|
||||
return fields, tags, nil
|
||||
}
|
||||
|
||||
if h.compiledStringMatch.Match(bodyBytes) {
|
||||
fields["result_type"] = "success"
|
||||
setResult("success", fields, tags)
|
||||
fields["response_string_match"] = 1
|
||||
} else {
|
||||
fields["result_type"] = "response_string_mismatch"
|
||||
setResult("response_string_mismatch", fields, tags)
|
||||
fields["response_string_match"] = 0
|
||||
}
|
||||
} else {
|
||||
fields["result_type"] = "success"
|
||||
setResult("success", fields, tags)
|
||||
}
|
||||
|
||||
return fields, nil
|
||||
return fields, tags, nil
|
||||
}
|
||||
|
||||
// Gather gets all metric fields and tags and returns any errors it encounters
|
||||
func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
|
||||
// Compile the body regex if it exist
|
||||
if h.compiledStringMatch == nil {
|
||||
var err error
|
||||
h.compiledStringMatch, err = regexp.Compile(h.ResponseStringMatch)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to compile regular expression %s : %s", h.ResponseStringMatch, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Set default values
|
||||
if h.ResponseTimeout.Duration < time.Second {
|
||||
h.ResponseTimeout.Duration = time.Second * 5
|
||||
@@ -217,9 +302,10 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
|
||||
if addr.Scheme != "http" && addr.Scheme != "https" {
|
||||
return errors.New("Only http and https are supported")
|
||||
}
|
||||
|
||||
// Prepare data
|
||||
tags := map[string]string{"server": h.Address, "method": h.Method}
|
||||
var fields map[string]interface{}
|
||||
var tags map[string]string
|
||||
|
||||
if h.client == nil {
|
||||
client, err := h.createHttpClient()
|
||||
@@ -230,10 +316,11 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
// Gather data
|
||||
fields, err = h.httpGather()
|
||||
fields, tags, err = h.httpGather()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add metrics
|
||||
acc.AddFields("http_response", fields, tags)
|
||||
return nil
|
||||
|
||||
@@ -15,6 +15,68 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Receives a list with fields that are expected to be absent
|
||||
func checkAbsentFields(t *testing.T, fields []string, acc *testutil.Accumulator) {
|
||||
for _, field := range fields {
|
||||
ok := acc.HasField("http_response", field)
|
||||
require.False(t, ok)
|
||||
}
|
||||
}
|
||||
|
||||
// Receives a list with tags that are expected to be absent
|
||||
func checkAbsentTags(t *testing.T, tags []string, acc *testutil.Accumulator) {
|
||||
for _, tag := range tags {
|
||||
ok := acc.HasTag("http_response", tag)
|
||||
require.False(t, ok)
|
||||
}
|
||||
}
|
||||
|
||||
// Receives a dictionary and with expected fields and their values. If a value is nil, it will only check
|
||||
// that the field exists, but not its contents
|
||||
func checkFields(t *testing.T, fields map[string]interface{}, acc *testutil.Accumulator) {
|
||||
for key, field := range fields {
|
||||
switch v := field.(type) {
|
||||
case int:
|
||||
value, ok := acc.IntField("http_response", key)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, field, value)
|
||||
case float64:
|
||||
value, ok := acc.FloatField("http_response", key)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, field, value)
|
||||
case string:
|
||||
value, ok := acc.StringField("http_response", key)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, field, value)
|
||||
case nil:
|
||||
ok := acc.HasField("http_response", key)
|
||||
require.True(t, ok)
|
||||
default:
|
||||
t.Log("Unsupported type for field: ", v)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Receives a dictionary and with expected tags and their values. If a value is nil, it will only check
|
||||
// that the tag exists, but not its contents
|
||||
func checkTags(t *testing.T, tags map[string]interface{}, acc *testutil.Accumulator) {
|
||||
for key, tag := range tags {
|
||||
switch v := tag.(type) {
|
||||
case string:
|
||||
ok := acc.HasTag("http_response", key)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, tag, acc.TagValue("http_response", key))
|
||||
case nil:
|
||||
ok := acc.HasTag("http_response", key)
|
||||
require.True(t, ok)
|
||||
default:
|
||||
t.Log("Unsupported type for tag: ", v)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setUpTestMux() http.Handler {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/redirect", func(w http.ResponseWriter, req *http.Request) {
|
||||
@@ -56,6 +118,24 @@ func setUpTestMux() http.Handler {
|
||||
return mux
|
||||
}
|
||||
|
||||
func checkOutput(t *testing.T, acc *testutil.Accumulator, presentFields map[string]interface{}, presentTags map[string]interface{}, absentFields []string, absentTags []string) {
|
||||
if presentFields != nil {
|
||||
checkFields(t, presentFields, acc)
|
||||
}
|
||||
|
||||
if presentTags != nil {
|
||||
checkTags(t, presentTags, acc)
|
||||
}
|
||||
|
||||
if absentFields != nil {
|
||||
checkAbsentFields(t, absentFields, acc)
|
||||
}
|
||||
|
||||
if absentTags != nil {
|
||||
checkAbsentTags(t, absentTags, acc)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeaders(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
cHeader := r.Header.Get("Content-Type")
|
||||
@@ -78,9 +158,20 @@ func TestHeaders(t *testing.T) {
|
||||
err := h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
expectedFields := map[string]interface{}{
|
||||
"http_response_code": http.StatusOK,
|
||||
"result_type": "success",
|
||||
"result_code": 0,
|
||||
"response_time": nil,
|
||||
}
|
||||
expectedTags := map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"status_code": "200",
|
||||
"result": "success",
|
||||
}
|
||||
absentFields := []string{"response_string_match"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
|
||||
}
|
||||
|
||||
func TestFields(t *testing.T) {
|
||||
@@ -103,12 +194,20 @@ func TestFields(t *testing.T) {
|
||||
err := h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
response_value, ok := acc.StringField("http_response", "result_type")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "success", response_value)
|
||||
expectedFields := map[string]interface{}{
|
||||
"http_response_code": http.StatusOK,
|
||||
"result_type": "success",
|
||||
"result_code": 0,
|
||||
"response_time": nil,
|
||||
}
|
||||
expectedTags := map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"status_code": "200",
|
||||
"result": "success",
|
||||
}
|
||||
absentFields := []string{"response_string_match"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
|
||||
}
|
||||
|
||||
func TestRedirects(t *testing.T) {
|
||||
@@ -130,9 +229,20 @@ func TestRedirects(t *testing.T) {
|
||||
err := h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
expectedFields := map[string]interface{}{
|
||||
"http_response_code": http.StatusOK,
|
||||
"result_type": "success",
|
||||
"result_code": 0,
|
||||
"response_time": nil,
|
||||
}
|
||||
expectedTags := map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"status_code": "200",
|
||||
"result": "success",
|
||||
}
|
||||
absentFields := []string{"response_string_match"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
|
||||
|
||||
h = &HTTPResponse{
|
||||
Address: ts.URL + "/badredirect",
|
||||
@@ -148,11 +258,21 @@ func TestRedirects(t *testing.T) {
|
||||
err = h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok = acc.IntField("http_response", "http_response_code")
|
||||
require.False(t, ok)
|
||||
response_value, ok := acc.StringField("http_response", "result_type")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "connection_failed", response_value)
|
||||
expectedFields = map[string]interface{}{
|
||||
"result_type": "connection_failed",
|
||||
"result_code": 3,
|
||||
}
|
||||
expectedTags = map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"result": "connection_failed",
|
||||
}
|
||||
absentFields = []string{"http_response_code", "response_time", "response_string_match"}
|
||||
absentTags := []string{"status_code"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, nil, nil)
|
||||
|
||||
expectedFields = map[string]interface{}{"result_type": "connection_failed"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, absentTags)
|
||||
}
|
||||
|
||||
func TestMethod(t *testing.T) {
|
||||
@@ -174,9 +294,20 @@ func TestMethod(t *testing.T) {
|
||||
err := h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
expectedFields := map[string]interface{}{
|
||||
"http_response_code": http.StatusOK,
|
||||
"result_type": "success",
|
||||
"result_code": 0,
|
||||
"response_time": nil,
|
||||
}
|
||||
expectedTags := map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "POST",
|
||||
"status_code": "200",
|
||||
"result": "success",
|
||||
}
|
||||
absentFields := []string{"response_string_match"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
|
||||
|
||||
h = &HTTPResponse{
|
||||
Address: ts.URL + "/mustbepostmethod",
|
||||
@@ -192,9 +323,20 @@ func TestMethod(t *testing.T) {
|
||||
err = h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok = acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusMethodNotAllowed, value)
|
||||
expectedFields = map[string]interface{}{
|
||||
"http_response_code": http.StatusMethodNotAllowed,
|
||||
"result_type": "success",
|
||||
"result_code": 0,
|
||||
"response_time": nil,
|
||||
}
|
||||
expectedTags = map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"status_code": "405",
|
||||
"result": "success",
|
||||
}
|
||||
absentFields = []string{"response_string_match"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
|
||||
|
||||
//check that lowercase methods work correctly
|
||||
h = &HTTPResponse{
|
||||
@@ -211,9 +353,20 @@ func TestMethod(t *testing.T) {
|
||||
err = h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok = acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusMethodNotAllowed, value)
|
||||
expectedFields = map[string]interface{}{
|
||||
"http_response_code": http.StatusMethodNotAllowed,
|
||||
"result_type": "success",
|
||||
"result_code": 0,
|
||||
"response_time": nil,
|
||||
}
|
||||
expectedTags = map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "head",
|
||||
"status_code": "405",
|
||||
"result": "success",
|
||||
}
|
||||
absentFields = []string{"response_string_match"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
|
||||
}
|
||||
|
||||
func TestBody(t *testing.T) {
|
||||
@@ -235,9 +388,20 @@ func TestBody(t *testing.T) {
|
||||
err := h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
expectedFields := map[string]interface{}{
|
||||
"http_response_code": http.StatusOK,
|
||||
"result_type": "success",
|
||||
"result_code": 0,
|
||||
"response_time": nil,
|
||||
}
|
||||
expectedTags := map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"status_code": "200",
|
||||
"result": "success",
|
||||
}
|
||||
absentFields := []string{"response_string_match"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
|
||||
|
||||
h = &HTTPResponse{
|
||||
Address: ts.URL + "/musthaveabody",
|
||||
@@ -252,9 +416,19 @@ func TestBody(t *testing.T) {
|
||||
err = h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok = acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusBadRequest, value)
|
||||
expectedFields = map[string]interface{}{
|
||||
"http_response_code": http.StatusBadRequest,
|
||||
"result_type": "success",
|
||||
"result_code": 0,
|
||||
}
|
||||
expectedTags = map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"status_code": "400",
|
||||
"result": "success",
|
||||
}
|
||||
absentFields = []string{"response_string_match"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
|
||||
}
|
||||
|
||||
func TestStringMatch(t *testing.T) {
|
||||
@@ -277,17 +451,20 @@ func TestStringMatch(t *testing.T) {
|
||||
err := h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
value, ok = acc.IntField("http_response", "response_string_match")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, 1, value)
|
||||
response_value, ok := acc.StringField("http_response", "result_type")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "success", response_value)
|
||||
_, ok = acc.FloatField("http_response", "response_time")
|
||||
require.True(t, ok)
|
||||
expectedFields := map[string]interface{}{
|
||||
"http_response_code": http.StatusOK,
|
||||
"response_string_match": 1,
|
||||
"result_type": "success",
|
||||
"result_code": 0,
|
||||
"response_time": nil,
|
||||
}
|
||||
expectedTags := map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"status_code": "200",
|
||||
"result": "success",
|
||||
}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, nil, nil)
|
||||
}
|
||||
|
||||
func TestStringMatchJson(t *testing.T) {
|
||||
@@ -310,17 +487,20 @@ func TestStringMatchJson(t *testing.T) {
|
||||
err := h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
value, ok = acc.IntField("http_response", "response_string_match")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, 1, value)
|
||||
response_value, ok := acc.StringField("http_response", "result_type")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "success", response_value)
|
||||
_, ok = acc.FloatField("http_response", "response_time")
|
||||
require.True(t, ok)
|
||||
expectedFields := map[string]interface{}{
|
||||
"http_response_code": http.StatusOK,
|
||||
"response_string_match": 1,
|
||||
"result_type": "success",
|
||||
"result_code": 0,
|
||||
"response_time": nil,
|
||||
}
|
||||
expectedTags := map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"status_code": "200",
|
||||
"result": "success",
|
||||
}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, nil, nil)
|
||||
}
|
||||
|
||||
func TestStringMatchFail(t *testing.T) {
|
||||
@@ -344,17 +524,20 @@ func TestStringMatchFail(t *testing.T) {
|
||||
err := h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
value, ok = acc.IntField("http_response", "response_string_match")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, 0, value)
|
||||
response_value, ok := acc.StringField("http_response", "result_type")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "response_string_mismatch", response_value)
|
||||
_, ok = acc.FloatField("http_response", "response_time")
|
||||
require.True(t, ok)
|
||||
expectedFields := map[string]interface{}{
|
||||
"http_response_code": http.StatusOK,
|
||||
"response_string_match": 0,
|
||||
"result_type": "response_string_mismatch",
|
||||
"result_code": 1,
|
||||
"response_time": nil,
|
||||
}
|
||||
expectedTags := map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"status_code": "200",
|
||||
"result": "response_string_mismatch",
|
||||
}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, nil, nil)
|
||||
}
|
||||
|
||||
func TestTimeout(t *testing.T) {
|
||||
@@ -380,11 +563,126 @@ func TestTimeout(t *testing.T) {
|
||||
err := h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.False(t, ok)
|
||||
response_value, ok := acc.StringField("http_response", "result_type")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "timeout", response_value)
|
||||
_, ok = acc.FloatField("http_response", "response_time")
|
||||
require.False(t, ok)
|
||||
expectedFields := map[string]interface{}{
|
||||
"result_type": "timeout",
|
||||
"result_code": 4,
|
||||
}
|
||||
expectedTags := map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"result": "timeout",
|
||||
}
|
||||
absentFields := []string{"http_response_code", "response_time", "response_string_match"}
|
||||
absentTags := []string{"status_code"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, absentTags)
|
||||
}
|
||||
|
||||
func TestPluginErrors(t *testing.T) {
|
||||
mux := setUpTestMux()
|
||||
ts := httptest.NewServer(mux)
|
||||
defer ts.Close()
|
||||
|
||||
// Bad regex test. Should return an error and return nothing
|
||||
h := &HTTPResponse{
|
||||
Address: ts.URL + "/good",
|
||||
Body: "{ 'test': 'data'}",
|
||||
Method: "GET",
|
||||
ResponseStringMatch: "bad regex:[[",
|
||||
ResponseTimeout: internal.Duration{Duration: time.Second * 20},
|
||||
Headers: map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := h.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
|
||||
absentFields := []string{"http_response_code", "response_time", "response_string_match", "result_type", "result_code"}
|
||||
absentTags := []string{"status_code", "result", "server", "method"}
|
||||
checkOutput(t, &acc, nil, nil, absentFields, absentTags)
|
||||
|
||||
// Attempt to read empty body test
|
||||
h = &HTTPResponse{
|
||||
Address: ts.URL + "/redirect",
|
||||
Body: "",
|
||||
Method: "GET",
|
||||
ResponseStringMatch: ".*",
|
||||
ResponseTimeout: internal.Duration{Duration: time.Second * 20},
|
||||
FollowRedirects: false,
|
||||
}
|
||||
|
||||
acc = testutil.Accumulator{}
|
||||
err = h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"http_response_code": http.StatusMovedPermanently,
|
||||
"response_string_match": 0,
|
||||
"result_type": "body_read_error",
|
||||
"result_code": 2,
|
||||
"response_time": nil,
|
||||
}
|
||||
expectedTags := map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"status_code": "301",
|
||||
"result": "body_read_error",
|
||||
}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, nil, nil)
|
||||
}
|
||||
|
||||
func TestNetworkErrors(t *testing.T) {
|
||||
// DNS error
|
||||
h := &HTTPResponse{
|
||||
Address: "https://nonexistent.nonexistent", // Any non-resolvable URL works here
|
||||
Body: "",
|
||||
Method: "GET",
|
||||
ResponseTimeout: internal.Duration{Duration: time.Second * 20},
|
||||
FollowRedirects: false,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"result_type": "dns_error",
|
||||
"result_code": 5,
|
||||
}
|
||||
expectedTags := map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"result": "dns_error",
|
||||
}
|
||||
absentFields := []string{"http_response_code", "response_time", "response_string_match"}
|
||||
absentTags := []string{"status_code"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, absentTags)
|
||||
|
||||
// Connecton failed
|
||||
h = &HTTPResponse{
|
||||
Address: "https://127.127.127.127", // Any non-routable IP works here
|
||||
Body: "",
|
||||
Method: "GET",
|
||||
ResponseTimeout: internal.Duration{Duration: time.Second * 20},
|
||||
FollowRedirects: false,
|
||||
}
|
||||
|
||||
acc = testutil.Accumulator{}
|
||||
err = h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedFields = map[string]interface{}{
|
||||
"result_type": "connection_failed",
|
||||
"result_code": 3,
|
||||
}
|
||||
expectedTags = map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"result": "connection_failed",
|
||||
}
|
||||
absentFields = []string{"http_response_code", "response_time", "response_string_match"}
|
||||
absentTags = []string{"status_code"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, absentTags)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
The httpjson plugin collects data from HTTP URLs which respond with JSON. It flattens the JSON and finds all numeric values, treating them as floats.
|
||||
|
||||
Deprecated (1.6): use the [http](../http) input.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
|
||||
@@ -14,6 +14,13 @@ The Kapacitor plugin will collect metrics from the given Kapacitor instances.
|
||||
|
||||
## Time limit for http requests
|
||||
timeout = "5s"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
```
|
||||
|
||||
### Measurements & Fields
|
||||
|
||||
@@ -21,6 +21,15 @@ type Kapacitor struct {
|
||||
|
||||
Timeout internal.Duration
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
@@ -38,12 +47,23 @@ func (*Kapacitor) SampleConfig() string {
|
||||
|
||||
## Time limit for http requests
|
||||
timeout = "5s"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
}
|
||||
|
||||
func (k *Kapacitor) Gather(acc telegraf.Accumulator) error {
|
||||
if k.client == nil {
|
||||
k.client = &http.Client{Timeout: k.Timeout.Duration}
|
||||
client, err := k.createHttpClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k.client = client
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
@@ -61,6 +81,23 @@ func (k *Kapacitor) Gather(acc telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *Kapacitor) createHttpClient() (*http.Client, error) {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
k.SSLCert, k.SSLKey, k.SSLCA, k.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: tlsCfg,
|
||||
},
|
||||
Timeout: k.Timeout.Duration,
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
type object struct {
|
||||
Name string `json:"name"`
|
||||
Values map[string]interface{} `json:"values"`
|
||||
|
||||
@@ -326,6 +326,10 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if len(fields) == 0 {
|
||||
return nil, fmt.Errorf("logparser_grok: must have one or more fields")
|
||||
}
|
||||
|
||||
return metric.New(p.Measurement, tags, fields, p.tsModder.tsMod(timestamp))
|
||||
}
|
||||
|
||||
|
||||
@@ -799,7 +799,7 @@ func TestTimezoneEmptyCompileFileAndParse(t *testing.T) {
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.UnixNano())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
|
||||
|
||||
metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`)
|
||||
require.NotNil(t, metricB)
|
||||
@@ -812,7 +812,7 @@ func TestTimezoneEmptyCompileFileAndParse(t *testing.T) {
|
||||
},
|
||||
metricB.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricB.Tags())
|
||||
assert.Equal(t, int64(1465044105000000000), metricB.UnixNano())
|
||||
assert.Equal(t, int64(1465044105000000000), metricB.Time().UnixNano())
|
||||
}
|
||||
|
||||
func TestTimezoneMalformedCompileFileAndParse(t *testing.T) {
|
||||
@@ -835,7 +835,7 @@ func TestTimezoneMalformedCompileFileAndParse(t *testing.T) {
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.UnixNano())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
|
||||
|
||||
metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`)
|
||||
require.NotNil(t, metricB)
|
||||
@@ -848,7 +848,7 @@ func TestTimezoneMalformedCompileFileAndParse(t *testing.T) {
|
||||
},
|
||||
metricB.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricB.Tags())
|
||||
assert.Equal(t, int64(1465044105000000000), metricB.UnixNano())
|
||||
assert.Equal(t, int64(1465044105000000000), metricB.Time().UnixNano())
|
||||
}
|
||||
|
||||
func TestTimezoneEuropeCompileFileAndParse(t *testing.T) {
|
||||
@@ -871,7 +871,7 @@ func TestTimezoneEuropeCompileFileAndParse(t *testing.T) {
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.UnixNano())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
|
||||
|
||||
metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`)
|
||||
require.NotNil(t, metricB)
|
||||
@@ -884,7 +884,7 @@ func TestTimezoneEuropeCompileFileAndParse(t *testing.T) {
|
||||
},
|
||||
metricB.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricB.Tags())
|
||||
assert.Equal(t, int64(1465036905000000000), metricB.UnixNano())
|
||||
assert.Equal(t, int64(1465036905000000000), metricB.Time().UnixNano())
|
||||
}
|
||||
|
||||
func TestTimezoneAmericasCompileFileAndParse(t *testing.T) {
|
||||
@@ -907,7 +907,7 @@ func TestTimezoneAmericasCompileFileAndParse(t *testing.T) {
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.UnixNano())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
|
||||
|
||||
metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`)
|
||||
require.NotNil(t, metricB)
|
||||
@@ -920,7 +920,7 @@ func TestTimezoneAmericasCompileFileAndParse(t *testing.T) {
|
||||
},
|
||||
metricB.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricB.Tags())
|
||||
assert.Equal(t, int64(1465058505000000000), metricB.UnixNano())
|
||||
assert.Equal(t, int64(1465058505000000000), metricB.Time().UnixNano())
|
||||
}
|
||||
|
||||
func TestTimezoneLocalCompileFileAndParse(t *testing.T) {
|
||||
@@ -943,7 +943,7 @@ func TestTimezoneLocalCompileFileAndParse(t *testing.T) {
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.UnixNano())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
|
||||
|
||||
metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`)
|
||||
require.NotNil(t, metricB)
|
||||
@@ -956,5 +956,5 @@ func TestTimezoneLocalCompileFileAndParse(t *testing.T) {
|
||||
},
|
||||
metricB.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricB.Tags())
|
||||
assert.Equal(t, time.Date(2016, time.June, 4, 12, 41, 45, 0, time.Local).UnixNano(), metricB.UnixNano())
|
||||
assert.Equal(t, time.Date(2016, time.June, 4, 12, 41, 45, 0, time.Local).UnixNano(), metricB.Time().UnixNano())
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ For more information, please check the [Mesos Observability Metrics](http://meso
|
||||
## Timeout, in ms.
|
||||
timeout = 100
|
||||
## A list of Mesos masters.
|
||||
masters = ["localhost:5050"]
|
||||
masters = ["http://localhost:5050"]
|
||||
## Master metrics groups to be collected, by default, all enabled.
|
||||
master_collections = [
|
||||
"resources",
|
||||
@@ -35,6 +35,13 @@ For more information, please check the [Mesos Observability Metrics](http://meso
|
||||
# "tasks",
|
||||
# "messages",
|
||||
# ]
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
```
|
||||
|
||||
By default this plugin is not configured to gather metrics from mesos. Since a mesos cluster can be deployed in numerous ways it does not provide any default
|
||||
@@ -235,7 +242,8 @@ Mesos slave metric groups
|
||||
### Tags:
|
||||
|
||||
- All master/slave measurements have the following tags:
|
||||
- server
|
||||
- server (network location of server: `host:port`)
|
||||
- url (URL origin of server: `scheme://host:port`)
|
||||
- role (master/slave)
|
||||
|
||||
- All master measurements have the extra tags:
|
||||
|
||||
@@ -7,11 +7,14 @@ import (
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
)
|
||||
@@ -30,6 +33,20 @@ type Mesos struct {
|
||||
Slaves []string
|
||||
SlaveCols []string `toml:"slave_collections"`
|
||||
//SlaveTasks bool
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
|
||||
initialized bool
|
||||
client *http.Client
|
||||
masterURLs []*url.URL
|
||||
slaveURLs []*url.URL
|
||||
}
|
||||
|
||||
var allMetrics = map[Role][]string{
|
||||
@@ -41,7 +58,7 @@ var sampleConfig = `
|
||||
## Timeout, in ms.
|
||||
timeout = 100
|
||||
## A list of Mesos masters.
|
||||
masters = ["localhost:5050"]
|
||||
masters = ["http://localhost:5050"]
|
||||
## Master metrics groups to be collected, by default, all enabled.
|
||||
master_collections = [
|
||||
"resources",
|
||||
@@ -65,6 +82,13 @@ var sampleConfig = `
|
||||
# "tasks",
|
||||
# "messages",
|
||||
# ]
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
// SampleConfig returns a sample configuration block
|
||||
@@ -77,7 +101,28 @@ func (m *Mesos) Description() string {
|
||||
return "Telegraf plugin for gathering metrics from N Mesos masters"
|
||||
}
|
||||
|
||||
func (m *Mesos) SetDefaults() {
|
||||
func parseURL(s string, role Role) (*url.URL, error) {
|
||||
if !strings.HasPrefix(s, "http://") && !strings.HasPrefix(s, "https://") {
|
||||
host, port, err := net.SplitHostPort(s)
|
||||
// no port specified
|
||||
if err != nil {
|
||||
host = s
|
||||
switch role {
|
||||
case MASTER:
|
||||
port = "5050"
|
||||
case SLAVE:
|
||||
port = "5051"
|
||||
}
|
||||
}
|
||||
|
||||
s = "http://" + host + ":" + port
|
||||
log.Printf("W! [inputs.mesos] Using %q as connection URL; please update your configuration to use an URL", s)
|
||||
}
|
||||
|
||||
return url.Parse(s)
|
||||
}
|
||||
|
||||
func (m *Mesos) initialize() error {
|
||||
if len(m.MasterCols) == 0 {
|
||||
m.MasterCols = allMetrics[MASTER]
|
||||
}
|
||||
@@ -87,33 +132,71 @@ func (m *Mesos) SetDefaults() {
|
||||
}
|
||||
|
||||
if m.Timeout == 0 {
|
||||
log.Println("I! [mesos] Missing timeout value, setting default value (100ms)")
|
||||
log.Println("I! [inputs.mesos] Missing timeout value, setting default value (100ms)")
|
||||
m.Timeout = 100
|
||||
}
|
||||
|
||||
rawQuery := "timeout=" + strconv.Itoa(m.Timeout) + "ms"
|
||||
|
||||
m.masterURLs = make([]*url.URL, 0, len(m.Masters))
|
||||
for _, master := range m.Masters {
|
||||
u, err := parseURL(master, MASTER)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
u.RawQuery = rawQuery
|
||||
m.masterURLs = append(m.masterURLs, u)
|
||||
}
|
||||
|
||||
m.slaveURLs = make([]*url.URL, 0, len(m.Slaves))
|
||||
for _, slave := range m.Slaves {
|
||||
u, err := parseURL(slave, SLAVE)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
u.RawQuery = rawQuery
|
||||
m.slaveURLs = append(m.slaveURLs, u)
|
||||
}
|
||||
|
||||
client, err := m.createHttpClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.client = client
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gather() metrics from given list of Mesos Masters
|
||||
func (m *Mesos) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
m.SetDefaults()
|
||||
|
||||
for _, v := range m.Masters {
|
||||
wg.Add(1)
|
||||
go func(c string) {
|
||||
acc.AddError(m.gatherMainMetrics(c, ":5050", MASTER, acc))
|
||||
wg.Done()
|
||||
return
|
||||
}(v)
|
||||
if !m.initialized {
|
||||
err := m.initialize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.initialized = true
|
||||
}
|
||||
|
||||
for _, v := range m.Slaves {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, master := range m.masterURLs {
|
||||
wg.Add(1)
|
||||
go func(c string) {
|
||||
acc.AddError(m.gatherMainMetrics(c, ":5051", SLAVE, acc))
|
||||
go func(master *url.URL) {
|
||||
acc.AddError(m.gatherMainMetrics(master, MASTER, acc))
|
||||
wg.Done()
|
||||
return
|
||||
}(v)
|
||||
}(master)
|
||||
}
|
||||
|
||||
for _, slave := range m.slaveURLs {
|
||||
wg.Add(1)
|
||||
go func(slave *url.URL) {
|
||||
acc.AddError(m.gatherMainMetrics(slave, SLAVE, acc))
|
||||
wg.Done()
|
||||
return
|
||||
}(slave)
|
||||
|
||||
// if !m.SlaveTasks {
|
||||
// continue
|
||||
@@ -121,7 +204,7 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
// wg.Add(1)
|
||||
// go func(c string) {
|
||||
// acc.AddError(m.gatherSlaveTaskMetrics(c, ":5051", acc))
|
||||
// acc.AddError(m.gatherSlaveTaskMetrics(slave, acc))
|
||||
// wg.Done()
|
||||
// return
|
||||
// }(v)
|
||||
@@ -132,6 +215,24 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Mesos) createHttpClient() (*http.Client, error) {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
m.SSLCert, m.SSLKey, m.SSLCA, m.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: tlsCfg,
|
||||
},
|
||||
Timeout: 4 * time.Second,
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// metricsDiff() returns set names for removal
|
||||
func metricsDiff(role Role, w []string) []string {
|
||||
b := []string{}
|
||||
@@ -393,15 +494,6 @@ func (m *Mesos) filterMetrics(role Role, metrics *map[string]interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
var tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
|
||||
var client = &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
|
||||
// TaskStats struct for JSON API output /monitor/statistics
|
||||
type TaskStats struct {
|
||||
ExecutorID string `json:"executor_id"`
|
||||
@@ -409,22 +501,15 @@ type TaskStats struct {
|
||||
Statistics map[string]interface{} `json:"statistics"`
|
||||
}
|
||||
|
||||
func (m *Mesos) gatherSlaveTaskMetrics(address string, defaultPort string, acc telegraf.Accumulator) error {
|
||||
func (m *Mesos) gatherSlaveTaskMetrics(u *url.URL, acc telegraf.Accumulator) error {
|
||||
var metrics []TaskStats
|
||||
|
||||
host, _, err := net.SplitHostPort(address)
|
||||
if err != nil {
|
||||
host = address
|
||||
address = address + defaultPort
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"server": host,
|
||||
"server": u.Hostname(),
|
||||
"url": urlTag(u),
|
||||
}
|
||||
|
||||
ts := strconv.Itoa(m.Timeout) + "ms"
|
||||
|
||||
resp, err := client.Get("http://" + address + "/monitor/statistics?timeout=" + ts)
|
||||
resp, err := m.client.Get(withPath(u, "/monitor/statistics").String())
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -459,24 +544,31 @@ func (m *Mesos) gatherSlaveTaskMetrics(address string, defaultPort string, acc t
|
||||
return nil
|
||||
}
|
||||
|
||||
func withPath(u *url.URL, path string) *url.URL {
|
||||
c := *u
|
||||
c.Path = path
|
||||
return &c
|
||||
}
|
||||
|
||||
func urlTag(u *url.URL) string {
|
||||
c := *u
|
||||
c.Path = ""
|
||||
c.User = nil
|
||||
c.RawQuery = ""
|
||||
return c.String()
|
||||
}
|
||||
|
||||
// This should not belong to the object
|
||||
func (m *Mesos) gatherMainMetrics(a string, defaultPort string, role Role, acc telegraf.Accumulator) error {
|
||||
func (m *Mesos) gatherMainMetrics(u *url.URL, role Role, acc telegraf.Accumulator) error {
|
||||
var jsonOut map[string]interface{}
|
||||
|
||||
host, _, err := net.SplitHostPort(a)
|
||||
if err != nil {
|
||||
host = a
|
||||
a = a + defaultPort
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"server": host,
|
||||
"server": u.Hostname(),
|
||||
"url": urlTag(u),
|
||||
"role": string(role),
|
||||
}
|
||||
|
||||
ts := strconv.Itoa(m.Timeout) + "ms"
|
||||
|
||||
resp, err := client.Get("http://" + a + "/metrics/snapshot?timeout=" + ts)
|
||||
resp, err := m.client.Get(withPath(u, "/metrics/snapshot").String())
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -6,10 +6,12 @@ import (
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var masterMetrics map[string]interface{}
|
||||
@@ -378,3 +380,19 @@ func TestSlaveFilter(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithPathDoesNotModify(t *testing.T) {
|
||||
u, err := url.Parse("http://localhost:5051")
|
||||
require.NoError(t, err)
|
||||
v := withPath(u, "/xyzzy")
|
||||
require.Equal(t, u.String(), "http://localhost:5051")
|
||||
require.Equal(t, v.String(), "http://localhost:5051/xyzzy")
|
||||
}
|
||||
|
||||
func TestURLTagDoesNotModify(t *testing.T) {
|
||||
u, err := url.Parse("http://a:b@localhost:5051?timeout=1ms")
|
||||
require.NoError(t, err)
|
||||
v := urlTag(u)
|
||||
require.Equal(t, u.String(), "http://a:b@localhost:5051?timeout=1ms")
|
||||
require.Equal(t, v, "http://localhost:5051")
|
||||
}
|
||||
|
||||
@@ -49,6 +49,10 @@ and create a single measurement containing values e.g.
|
||||
* resident_megabytes
|
||||
* updates_per_sec
|
||||
* vsize_megabytes
|
||||
* total_in_use
|
||||
* total_available
|
||||
* total_created
|
||||
* total_refreshing
|
||||
* ttl_deletes_per_sec
|
||||
* ttl_passes_per_sec
|
||||
* repl_lag
|
||||
|
||||
@@ -66,6 +66,13 @@ var DefaultClusterStats = map[string]string{
|
||||
"jumbo_chunks": "JumboChunksCount",
|
||||
}
|
||||
|
||||
var DefaultShardStats = map[string]string{
|
||||
"total_in_use": "TotalInUse",
|
||||
"total_available": "TotalAvailable",
|
||||
"total_created": "TotalCreated",
|
||||
"total_refreshing": "TotalRefreshing",
|
||||
}
|
||||
|
||||
var MmapStats = map[string]string{
|
||||
"mapped_megabytes": "Mapped",
|
||||
"non-mapped_megabytes": "NonMapped",
|
||||
@@ -127,7 +134,8 @@ func (d *MongodbData) AddDefaultStats() {
|
||||
d.addStat(statLine, DefaultReplStats)
|
||||
}
|
||||
d.addStat(statLine, DefaultClusterStats)
|
||||
if d.StatLine.StorageEngine == "mmapv1" {
|
||||
d.addStat(statLine, DefaultShardStats)
|
||||
if d.StatLine.StorageEngine == "mmapv1" || d.StatLine.StorageEngine == "rocksdb" {
|
||||
d.addStat(statLine, MmapStats)
|
||||
} else if d.StatLine.StorageEngine == "wiredTiger" {
|
||||
for key, value := range WiredTigerStats {
|
||||
|
||||
@@ -99,6 +99,27 @@ func TestAddWiredTigerStats(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddShardStats(t *testing.T) {
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
TotalInUse: 0,
|
||||
TotalAvailable: 0,
|
||||
TotalCreated: 0,
|
||||
TotalRefreshing: 0,
|
||||
},
|
||||
tags,
|
||||
)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
d.AddDefaultStats()
|
||||
d.flush(&acc)
|
||||
|
||||
for key, _ := range DefaultShardStats {
|
||||
assert.True(t, acc.HasInt64Field("mongodb", key))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateTag(t *testing.T) {
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
@@ -147,6 +168,10 @@ func TestStateTag(t *testing.T) {
|
||||
"ttl_deletes_per_sec": int64(0),
|
||||
"ttl_passes_per_sec": int64(0),
|
||||
"jumbo_chunks": int64(0),
|
||||
"total_in_use": int64(0),
|
||||
"total_available": int64(0),
|
||||
"total_created": int64(0),
|
||||
"total_refreshing": int64(0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "mongodb", fields, stateTags)
|
||||
}
|
||||
|
||||
@@ -55,8 +55,18 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error
|
||||
JumboChunksCount: int64(jumbo_chunks),
|
||||
}
|
||||
|
||||
result_db_stats := &DbStats{}
|
||||
resultShards := &ShardStats{}
|
||||
err = s.Session.DB("admin").Run(bson.D{
|
||||
{
|
||||
Name: "shardConnPoolStats",
|
||||
Value: 1,
|
||||
},
|
||||
}, &resultShards)
|
||||
if err != nil {
|
||||
log.Println("E! Error getting database shard stats (" + err.Error() + ")")
|
||||
}
|
||||
|
||||
result_db_stats := &DbStats{}
|
||||
if gatherDbStats == true {
|
||||
names := []string{}
|
||||
names, err = s.Session.DatabaseNames()
|
||||
@@ -88,6 +98,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error
|
||||
ReplSetStatus: result_repl,
|
||||
ClusterStatus: result_cluster,
|
||||
DbStats: result_db_stats,
|
||||
ShardStats: resultShards,
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
||||
@@ -34,6 +34,7 @@ type MongoStatus struct {
|
||||
ReplSetStatus *ReplSetStatus
|
||||
ClusterStatus *ClusterStatus
|
||||
DbStats *DbStats
|
||||
ShardStats *ShardStats
|
||||
}
|
||||
|
||||
type ServerStatus struct {
|
||||
@@ -116,6 +117,14 @@ type WiredTiger struct {
|
||||
Cache CacheStats `bson:"cache"`
|
||||
}
|
||||
|
||||
// ShardStats stores information from shardConnPoolStats.
|
||||
type ShardStats struct {
|
||||
TotalInUse int64 `bson:"totalInUse"`
|
||||
TotalAvailable int64 `bson:"totalAvailable"`
|
||||
TotalCreated int64 `bson:"totalCreated"`
|
||||
TotalRefreshing int64 `bson:"totalRefreshing"`
|
||||
}
|
||||
|
||||
type ConcurrentTransactions struct {
|
||||
Write ConcurrentTransStats `bson:"write"`
|
||||
Read ConcurrentTransStats `bson:"read"`
|
||||
@@ -450,6 +459,9 @@ type StatLine struct {
|
||||
|
||||
// DB stats field
|
||||
DbStatsLines []DbStatLine
|
||||
|
||||
// Shard stats
|
||||
TotalInUse, TotalAvailable, TotalCreated, TotalRefreshing int64
|
||||
}
|
||||
|
||||
type DbStatLine struct {
|
||||
@@ -783,5 +795,12 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
|
||||
returnVal.DbStatsLines = append(returnVal.DbStatsLines, *dbStatLine)
|
||||
}
|
||||
|
||||
// Set shard stats
|
||||
newShardStats := *newMongo.ShardStats
|
||||
returnVal.TotalInUse = newShardStats.TotalInUse
|
||||
returnVal.TotalAvailable = newShardStats.TotalAvailable
|
||||
returnVal.TotalCreated = newShardStats.TotalCreated
|
||||
returnVal.TotalRefreshing = newShardStats.TotalRefreshing
|
||||
|
||||
return returnVal
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# MySQL Input plugin
|
||||
# MySQL Input Plugin
|
||||
|
||||
This plugin gathers the statistic data from MySQL server
|
||||
|
||||
@@ -18,9 +18,9 @@ This plugin gathers the statistic data from MySQL server
|
||||
* File events statistics
|
||||
* Table schema statistics
|
||||
|
||||
## Configuration
|
||||
### Configuration
|
||||
|
||||
```
|
||||
```toml
|
||||
# Read metrics from one or many mysql servers
|
||||
[[inputs.mysql]]
|
||||
## specify servers via a url matching:
|
||||
@@ -81,14 +81,97 @@ This plugin gathers the statistic data from MySQL server
|
||||
#
|
||||
## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
|
||||
interval_slow = "30m"
|
||||
|
||||
|
||||
## Optional SSL Config (will be used if tls=custom parameter specified in server uri)
|
||||
ssl_ca = "/etc/telegraf/ca.pem"
|
||||
ssl_cert = "/etc/telegraf/cert.pem"
|
||||
ssl_key = "/etc/telegraf/key.pem"
|
||||
```
|
||||
|
||||
## Measurements & Fields
|
||||
#### Metric Version
|
||||
|
||||
When `metric_version = 2`, a variety of field type issues are corrected as well
|
||||
as naming inconsistencies. If you have existing data on the original version
|
||||
enabling this feature will cause a `field type error` when inserted into
|
||||
InfluxDB due to the change of types. For this reason, you should keep the
|
||||
`metric_version` unset until you are ready to migrate to the new format.
|
||||
|
||||
If preserving your old data is not required you may wish to drop conflicting
|
||||
measurements:
|
||||
```
|
||||
DROP SERIES from mysql
|
||||
DROP SERIES from mysql_variables
|
||||
DROP SERIES from mysql_innodb
|
||||
```
|
||||
|
||||
Otherwise, migration can be performed using the following steps:
|
||||
|
||||
1. Duplicate your `mysql` plugin configuration and add a `name_suffix` and
|
||||
`metric_version = 2`, this will result in collection using both the old and new
|
||||
style concurrently:
|
||||
```toml
|
||||
[[inputs.mysql]]
|
||||
servers = ["tcp(127.0.0.1:3306)/"]
|
||||
|
||||
[[inputs.mysql]]
|
||||
name_override = "_2"
|
||||
metric_version = 2
|
||||
|
||||
servers = ["tcp(127.0.0.1:3306)/"]
|
||||
```
|
||||
|
||||
2. Upgrade all affected Telegraf clients to version >=1.6.
|
||||
|
||||
New measurements will be created with the `name_suffix`, for example::
|
||||
- `mysql_v2`
|
||||
- `mysql_variables_v2`
|
||||
|
||||
3. Update charts, alerts, and other supporting code to the new format.
|
||||
4. You can now remove the old `mysql` plugin configuration and remove old
|
||||
measurements.
|
||||
|
||||
If you wish to remove the `name_suffix` you may use Kapacitor to copy the
|
||||
historical data to the default name. Do this only after retiring the old
|
||||
measurement name.
|
||||
|
||||
1. Use the techinique described above to write to multiple locations:
|
||||
```toml
|
||||
[[inputs.mysql]]
|
||||
servers = ["tcp(127.0.0.1:3306)/"]
|
||||
metric_version = 2
|
||||
|
||||
[[inputs.mysql]]
|
||||
name_override = "_2"
|
||||
metric_version = 2
|
||||
|
||||
servers = ["tcp(127.0.0.1:3306)/"]
|
||||
```
|
||||
2. Create a TICKScript to copy the historical data:
|
||||
```
|
||||
dbrp "telegraf"."autogen"
|
||||
|
||||
batch
|
||||
|query('''
|
||||
SELECT * FROM "telegraf"."autogen"."mysql_v2"
|
||||
''')
|
||||
.period(5m)
|
||||
.every(5m)
|
||||
|influxDBOut()
|
||||
.database('telegraf')
|
||||
.retentionPolicy('autogen')
|
||||
.measurement('mysql')
|
||||
```
|
||||
3. Define a task for your script:
|
||||
```sh
|
||||
kapacitor define copy-measurement -tick copy-measurement.task
|
||||
```
|
||||
4. Run the task over the data you would like to migrate:
|
||||
```sh
|
||||
kapacitor replay-live batch -start 2018-03-30T20:00:00Z -stop 2018-04-01T12:00:00Z -rec-time -task copy-measurement
|
||||
```
|
||||
5. Verify copied data and repeat for other measurements.
|
||||
|
||||
### Metrics:
|
||||
* Global statuses - all numeric and boolean values of `SHOW GLOBAL STATUSES`
|
||||
* Global variables - all numeric and boolean values of `SHOW GLOBAL VARIABLES`
|
||||
* Slave status - metrics from `SHOW SLAVE STATUS` the metrics are gathered when
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/mysql/v1"
|
||||
|
||||
"github.com/go-sql-driver/mysql"
|
||||
)
|
||||
@@ -40,6 +41,7 @@ type Mysql struct {
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
MetricVersion int `toml:"metric_version"`
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
@@ -52,6 +54,20 @@ var sampleConfig = `
|
||||
#
|
||||
## If no servers are specified, then localhost is used as the host.
|
||||
servers = ["tcp(127.0.0.1:3306)/"]
|
||||
|
||||
## Selects the metric output format.
|
||||
##
|
||||
## This option exists to maintain backwards compatibility, if you have
|
||||
## existing metrics do not set or change this value until you are ready to
|
||||
## migrate to the new format.
|
||||
##
|
||||
## If you do not have existing metrics from this plugin set to the latest
|
||||
## version.
|
||||
##
|
||||
## Telegraf >=1.6: metric_version = 2
|
||||
## <1.6: metric_version = 1 (or unset)
|
||||
metric_version = 2
|
||||
|
||||
## the limits for metrics form perf_events_statements
|
||||
perf_events_statements_digest_text_limit = 120
|
||||
perf_events_statements_limit = 250
|
||||
@@ -541,7 +557,7 @@ func (m *Mysql) gatherGlobalVariables(db *sql.DB, serv string, acc telegraf.Accu
|
||||
fields[key] = string(val)
|
||||
tags[key] = string(val)
|
||||
}
|
||||
if value, ok := parseValue(val); ok {
|
||||
if value, ok := m.parseValue(val); ok {
|
||||
fields[key] = value
|
||||
}
|
||||
// Send 20 fields at a time
|
||||
@@ -593,7 +609,7 @@ func (m *Mysql) gatherSlaveStatuses(db *sql.DB, serv string, acc telegraf.Accumu
|
||||
// range over columns, and try to parse values
|
||||
for i, col := range cols {
|
||||
col = strings.ToLower(col)
|
||||
if value, ok := parseValue(*vals[i].(*sql.RawBytes)); ok {
|
||||
if value, ok := m.parseValue(*vals[i].(*sql.RawBytes)); ok {
|
||||
fields["slave_"+col] = value
|
||||
}
|
||||
}
|
||||
@@ -662,10 +678,75 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, serv string, acc telegraf.Accum
|
||||
return err
|
||||
}
|
||||
|
||||
key = strings.ToLower(key)
|
||||
if m.MetricVersion < 2 {
|
||||
var found bool
|
||||
for _, mapped := range v1.Mappings {
|
||||
if strings.HasPrefix(key, mapped.OnServer) {
|
||||
// convert numeric values to integer
|
||||
i, _ := strconv.Atoi(string(val))
|
||||
fields[mapped.InExport+key[len(mapped.OnServer):]] = i
|
||||
found = true
|
||||
}
|
||||
}
|
||||
// Send 20 fields at a time
|
||||
if len(fields) >= 20 {
|
||||
acc.AddFields("mysql", fields, tags)
|
||||
fields = make(map[string]interface{})
|
||||
}
|
||||
if found {
|
||||
continue
|
||||
}
|
||||
|
||||
if value, ok := parseValue(val); ok {
|
||||
fields[key] = value
|
||||
// search for specific values
|
||||
switch key {
|
||||
case "Queries":
|
||||
i, err := strconv.ParseInt(string(val), 10, 64)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err))
|
||||
} else {
|
||||
fields["queries"] = i
|
||||
}
|
||||
case "Questions":
|
||||
i, err := strconv.ParseInt(string(val), 10, 64)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err))
|
||||
} else {
|
||||
fields["questions"] = i
|
||||
}
|
||||
case "Slow_queries":
|
||||
i, err := strconv.ParseInt(string(val), 10, 64)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err))
|
||||
} else {
|
||||
fields["slow_queries"] = i
|
||||
}
|
||||
case "Connections":
|
||||
i, err := strconv.ParseInt(string(val), 10, 64)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err))
|
||||
} else {
|
||||
fields["connections"] = i
|
||||
}
|
||||
case "Syncs":
|
||||
i, err := strconv.ParseInt(string(val), 10, 64)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err))
|
||||
} else {
|
||||
fields["syncs"] = i
|
||||
}
|
||||
case "Uptime":
|
||||
i, err := strconv.ParseInt(string(val), 10, 64)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err))
|
||||
} else {
|
||||
fields["uptime"] = i
|
||||
}
|
||||
}
|
||||
} else {
|
||||
key = strings.ToLower(key)
|
||||
if value, ok := m.parseValue(val); ok {
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
// Send 20 fields at a time
|
||||
@@ -820,7 +901,11 @@ func (m *Mysql) GatherProcessListStatuses(db *sql.DB, serv string, acc telegraf.
|
||||
for s, c := range stateCounts {
|
||||
fields[newNamespace("threads", s)] = c
|
||||
}
|
||||
acc.AddFields("mysql_process_list", fields, tags)
|
||||
if m.MetricVersion < 2 {
|
||||
acc.AddFields("mysql_info_schema", fields, tags)
|
||||
} else {
|
||||
acc.AddFields("mysql_process_list", fields, tags)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1033,7 +1118,11 @@ func (m *Mysql) gatherInfoSchemaAutoIncStatuses(db *sql.DB, serv string, acc tel
|
||||
fields["auto_increment_column"] = incValue
|
||||
fields["auto_increment_column_max"] = maxInt
|
||||
|
||||
acc.AddFields("mysql_table_schema", fields, tags)
|
||||
if m.MetricVersion < 2 {
|
||||
acc.AddFields("mysql_info_schema", fields, tags)
|
||||
} else {
|
||||
acc.AddFields("mysql_table_schema", fields, tags)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1059,7 +1148,7 @@ func (m *Mysql) gatherInnoDBMetrics(db *sql.DB, serv string, acc telegraf.Accumu
|
||||
return err
|
||||
}
|
||||
key = strings.ToLower(key)
|
||||
if value, ok := parseValue(val); ok {
|
||||
if value, ok := m.parseValue(val); ok {
|
||||
fields[key] = value
|
||||
}
|
||||
// Send 20 fields at a time
|
||||
@@ -1430,17 +1519,37 @@ func (m *Mysql) gatherTableSchema(db *sql.DB, serv string, acc telegraf.Accumula
|
||||
tags["schema"] = tableSchema
|
||||
tags["table"] = tableName
|
||||
|
||||
acc.AddFields("mysql_table_schema",
|
||||
map[string]interface{}{"rows": tableRows}, tags)
|
||||
if m.MetricVersion < 2 {
|
||||
acc.AddFields(newNamespace("info_schema", "table_rows"),
|
||||
map[string]interface{}{"value": tableRows}, tags)
|
||||
|
||||
acc.AddFields("mysql_table_schema",
|
||||
map[string]interface{}{"data_length": dataLength}, tags)
|
||||
dlTags := copyTags(tags)
|
||||
dlTags["component"] = "data_length"
|
||||
acc.AddFields(newNamespace("info_schema", "table_size", "data_length"),
|
||||
map[string]interface{}{"value": dataLength}, dlTags)
|
||||
|
||||
acc.AddFields("mysql_table_schema",
|
||||
map[string]interface{}{"index_length": indexLength}, tags)
|
||||
ilTags := copyTags(tags)
|
||||
ilTags["component"] = "index_length"
|
||||
acc.AddFields(newNamespace("info_schema", "table_size", "index_length"),
|
||||
map[string]interface{}{"value": indexLength}, ilTags)
|
||||
|
||||
acc.AddFields("mysql_table_schema",
|
||||
map[string]interface{}{"data_free": dataFree}, tags)
|
||||
dfTags := copyTags(tags)
|
||||
dfTags["component"] = "data_free"
|
||||
acc.AddFields(newNamespace("info_schema", "table_size", "data_free"),
|
||||
map[string]interface{}{"value": dataFree}, dfTags)
|
||||
} else {
|
||||
acc.AddFields("mysql_table_schema",
|
||||
map[string]interface{}{"rows": tableRows}, tags)
|
||||
|
||||
acc.AddFields("mysql_table_schema",
|
||||
map[string]interface{}{"data_length": dataLength}, tags)
|
||||
|
||||
acc.AddFields("mysql_table_schema",
|
||||
map[string]interface{}{"index_length": indexLength}, tags)
|
||||
|
||||
acc.AddFields("mysql_table_schema",
|
||||
map[string]interface{}{"data_free": dataFree}, tags)
|
||||
}
|
||||
|
||||
versionTags := copyTags(tags)
|
||||
versionTags["type"] = tableType
|
||||
@@ -1448,13 +1557,26 @@ func (m *Mysql) gatherTableSchema(db *sql.DB, serv string, acc telegraf.Accumula
|
||||
versionTags["row_format"] = rowFormat
|
||||
versionTags["create_options"] = createOptions
|
||||
|
||||
acc.AddFields("mysql_table_schema_version",
|
||||
map[string]interface{}{"table_version": version}, versionTags)
|
||||
if m.MetricVersion < 2 {
|
||||
acc.AddFields(newNamespace("info_schema", "table_version"),
|
||||
map[string]interface{}{"value": version}, versionTags)
|
||||
} else {
|
||||
acc.AddFields("mysql_table_schema_version",
|
||||
map[string]interface{}{"table_version": version}, versionTags)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Mysql) parseValue(value sql.RawBytes) (interface{}, bool) {
|
||||
if m.MetricVersion < 2 {
|
||||
return v1.ParseValue(value)
|
||||
} else {
|
||||
return parseValue(value)
|
||||
}
|
||||
}
|
||||
|
||||
// parseValue can be used to convert values such as "ON","OFF","Yes","No" to 0,1
|
||||
func parseValue(value sql.RawBytes) (interface{}, bool) {
|
||||
if bytes.EqualFold(value, []byte("YES")) || bytes.Compare(value, []byte("ON")) == 0 {
|
||||
|
||||
195
plugins/inputs/mysql/v1/mysql.go
Normal file
195
plugins/inputs/mysql/v1/mysql.go
Normal file
@@ -0,0 +1,195 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type Mapping struct {
|
||||
OnServer string
|
||||
InExport string
|
||||
}
|
||||
|
||||
var Mappings = []*Mapping{
|
||||
{
|
||||
OnServer: "Aborted_",
|
||||
InExport: "aborted_",
|
||||
},
|
||||
{
|
||||
OnServer: "Bytes_",
|
||||
InExport: "bytes_",
|
||||
},
|
||||
{
|
||||
OnServer: "Com_",
|
||||
InExport: "commands_",
|
||||
},
|
||||
{
|
||||
OnServer: "Created_",
|
||||
InExport: "created_",
|
||||
},
|
||||
{
|
||||
OnServer: "Handler_",
|
||||
InExport: "handler_",
|
||||
},
|
||||
{
|
||||
OnServer: "Innodb_",
|
||||
InExport: "innodb_",
|
||||
},
|
||||
{
|
||||
OnServer: "Key_",
|
||||
InExport: "key_",
|
||||
},
|
||||
{
|
||||
OnServer: "Open_",
|
||||
InExport: "open_",
|
||||
},
|
||||
{
|
||||
OnServer: "Opened_",
|
||||
InExport: "opened_",
|
||||
},
|
||||
{
|
||||
OnServer: "Qcache_",
|
||||
InExport: "qcache_",
|
||||
},
|
||||
{
|
||||
OnServer: "Table_",
|
||||
InExport: "table_",
|
||||
},
|
||||
{
|
||||
OnServer: "Tokudb_",
|
||||
InExport: "tokudb_",
|
||||
},
|
||||
{
|
||||
OnServer: "Threads_",
|
||||
InExport: "threads_",
|
||||
},
|
||||
{
|
||||
OnServer: "Access_",
|
||||
InExport: "access_",
|
||||
},
|
||||
{
|
||||
OnServer: "Aria__",
|
||||
InExport: "aria_",
|
||||
},
|
||||
{
|
||||
OnServer: "Binlog__",
|
||||
InExport: "binlog_",
|
||||
},
|
||||
{
|
||||
OnServer: "Busy_",
|
||||
InExport: "busy_",
|
||||
},
|
||||
{
|
||||
OnServer: "Connection_",
|
||||
InExport: "connection_",
|
||||
},
|
||||
{
|
||||
OnServer: "Delayed_",
|
||||
InExport: "delayed_",
|
||||
},
|
||||
{
|
||||
OnServer: "Empty_",
|
||||
InExport: "empty_",
|
||||
},
|
||||
{
|
||||
OnServer: "Executed_",
|
||||
InExport: "executed_",
|
||||
},
|
||||
{
|
||||
OnServer: "Executed_",
|
||||
InExport: "executed_",
|
||||
},
|
||||
{
|
||||
OnServer: "Feature_",
|
||||
InExport: "feature_",
|
||||
},
|
||||
{
|
||||
OnServer: "Flush_",
|
||||
InExport: "flush_",
|
||||
},
|
||||
{
|
||||
OnServer: "Last_",
|
||||
InExport: "last_",
|
||||
},
|
||||
{
|
||||
OnServer: "Master_",
|
||||
InExport: "master_",
|
||||
},
|
||||
{
|
||||
OnServer: "Max_",
|
||||
InExport: "max_",
|
||||
},
|
||||
{
|
||||
OnServer: "Memory_",
|
||||
InExport: "memory_",
|
||||
},
|
||||
{
|
||||
OnServer: "Not_",
|
||||
InExport: "not_",
|
||||
},
|
||||
{
|
||||
OnServer: "Performance_",
|
||||
InExport: "performance_",
|
||||
},
|
||||
{
|
||||
OnServer: "Prepared_",
|
||||
InExport: "prepared_",
|
||||
},
|
||||
{
|
||||
OnServer: "Rows_",
|
||||
InExport: "rows_",
|
||||
},
|
||||
{
|
||||
OnServer: "Rpl_",
|
||||
InExport: "rpl_",
|
||||
},
|
||||
{
|
||||
OnServer: "Select_",
|
||||
InExport: "select_",
|
||||
},
|
||||
{
|
||||
OnServer: "Slave_",
|
||||
InExport: "slave_",
|
||||
},
|
||||
{
|
||||
OnServer: "Slow_",
|
||||
InExport: "slow_",
|
||||
},
|
||||
{
|
||||
OnServer: "Sort_",
|
||||
InExport: "sort_",
|
||||
},
|
||||
{
|
||||
OnServer: "Subquery_",
|
||||
InExport: "subquery_",
|
||||
},
|
||||
{
|
||||
OnServer: "Tc_",
|
||||
InExport: "tc_",
|
||||
},
|
||||
{
|
||||
OnServer: "Threadpool_",
|
||||
InExport: "threadpool_",
|
||||
},
|
||||
{
|
||||
OnServer: "wsrep_",
|
||||
InExport: "wsrep_",
|
||||
},
|
||||
{
|
||||
OnServer: "Uptime_",
|
||||
InExport: "uptime_",
|
||||
},
|
||||
}
|
||||
|
||||
func ParseValue(value sql.RawBytes) (float64, bool) {
|
||||
if bytes.Compare(value, []byte("Yes")) == 0 || bytes.Compare(value, []byte("ON")) == 0 {
|
||||
return 1, true
|
||||
}
|
||||
|
||||
if bytes.Compare(value, []byte("No")) == 0 || bytes.Compare(value, []byte("OFF")) == 0 {
|
||||
return 0, true
|
||||
}
|
||||
n, err := strconv.ParseFloat(string(value), 64)
|
||||
return n, err == nil
|
||||
}
|
||||
@@ -59,7 +59,7 @@ func TestRunParserInvalidMsg(t *testing.T) {
|
||||
in <- natsMsg(invalidMsg)
|
||||
|
||||
acc.WaitError(1)
|
||||
assert.Contains(t, acc.Errors[0].Error(), "E! subject: telegraf, error: metric parsing error")
|
||||
assert.Contains(t, acc.Errors[0].Error(), "E! subject: telegraf, error: metric parse error")
|
||||
assert.EqualValues(t, 0, acc.NMetrics())
|
||||
}
|
||||
|
||||
|
||||
@@ -1,57 +1,78 @@
|
||||
# Telegraf plugin: passenger
|
||||
# Passenger Input Plugin
|
||||
|
||||
Get phusion passenger stat using their command line utility
|
||||
`passenger-status`
|
||||
Gather [Phusion Passenger](https://www.phusionpassenger.com/) metrics using the `passenger-status` command line utility.
|
||||
|
||||
# Measurements
|
||||
**Series Cardinality Warning**
|
||||
|
||||
Meta:
|
||||
Depending on your environment, this `passenger_process` measurement of this
|
||||
plugin can quickly create a high number of series which, when unchecked, can
|
||||
cause high load on your database. You can use the following techniques to
|
||||
manage your series cardinality:
|
||||
|
||||
- tags:
|
||||
- Use the
|
||||
[measurement filtering](https://docs.influxdata.com/telegraf/latest/administration/configuration/#measurement-filtering)
|
||||
options to exclude unneeded tags. In some environments, you may wish to use
|
||||
`tagexclude` to remove the `pid` and `process_group_id` tags.
|
||||
- Write to a database with an appropriate
|
||||
[retention policy](https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/).
|
||||
- Limit series cardinality in your database using the
|
||||
[`max-series-per-database`](https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000) and
|
||||
[`max-values-per-tag`](https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000) settings.
|
||||
- Consider using the
|
||||
[Time Series Index](https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/).
|
||||
- Monitor your databases
|
||||
[series cardinality](https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality).
|
||||
|
||||
* name
|
||||
* passenger_version
|
||||
* pid
|
||||
* code_revision
|
||||
### Configuration
|
||||
|
||||
Measurement names:
|
||||
```toml
|
||||
# Read metrics of passenger using passenger-status
|
||||
[[inputs.passenger]]
|
||||
## Path of passenger-status.
|
||||
##
|
||||
## Plugin gather metric via parsing XML output of passenger-status
|
||||
## More information about the tool:
|
||||
## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
|
||||
##
|
||||
## If no path is specified, then the plugin simply execute passenger-status
|
||||
## hopefully it can be found in your PATH
|
||||
command = "passenger-status -v --show=xml"
|
||||
```
|
||||
|
||||
- passenger:
|
||||
#### Permissions:
|
||||
|
||||
* Tags: `passenger_version`
|
||||
* Fields:
|
||||
Telegraf must have permission to execute the `passenger-status` command. On most systems, Telegraf runs as the `telegraf` user.
|
||||
|
||||
### Metrics:
|
||||
|
||||
- passenger
|
||||
- tags:
|
||||
- passenger_version
|
||||
- fields:
|
||||
- process_count
|
||||
- max
|
||||
- capacity_used
|
||||
- get_wait_list_size
|
||||
- max
|
||||
- capacity_used
|
||||
- get_wait_list_size
|
||||
|
||||
- passenger_supergroup:
|
||||
|
||||
* Tags: `name`
|
||||
* Fields:
|
||||
|
||||
- get_wait_list_size
|
||||
- capacity_used
|
||||
|
||||
- passenger_group:
|
||||
|
||||
* Tags:
|
||||
- passenger_supergroup
|
||||
- tags:
|
||||
- name
|
||||
- fields:
|
||||
- get_wait_list_size
|
||||
- capacity_used
|
||||
|
||||
- passenger_group
|
||||
- tags:
|
||||
- name
|
||||
- app_root
|
||||
- app_type
|
||||
|
||||
* Fields:
|
||||
|
||||
- fields:
|
||||
- get_wait_list_size
|
||||
- capacity_used
|
||||
- processes_being_spawned
|
||||
|
||||
- passenger_process:
|
||||
|
||||
* Tags:
|
||||
|
||||
- passenger_process
|
||||
- tags:
|
||||
- group_name
|
||||
- app_root
|
||||
- supergroup_name
|
||||
@@ -59,9 +80,7 @@ Measurement names:
|
||||
- code_revision
|
||||
- life_status
|
||||
- process_group_id
|
||||
|
||||
* Field:
|
||||
|
||||
- fields:
|
||||
- concurrency
|
||||
- sessions
|
||||
- busyness
|
||||
@@ -79,60 +98,11 @@ Measurement names:
|
||||
- real_memory
|
||||
- vmsize
|
||||
|
||||
# Example output
|
||||
|
||||
Using this configuration:
|
||||
|
||||
### Example Output:
|
||||
```
|
||||
[[inputs.passenger]]
|
||||
# Path of passenger-status.
|
||||
#
|
||||
# Plugin gather metric via parsing XML output of passenger-status
|
||||
# More information about the tool:
|
||||
# https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
|
||||
#
|
||||
#
|
||||
# If no path is specified, then the plugin simply execute passenger-status
|
||||
# hopefully it can be found in your PATH
|
||||
command = "passenger-status -v --show=xml"
|
||||
passenger,passenger_version=5.0.17 capacity_used=23i,get_wait_list_size=0i,max=23i,process_count=23i 1452984112799414257
|
||||
passenger_supergroup,name=/var/app/current/public capacity_used=23i,get_wait_list_size=0i 1452984112799496977
|
||||
passenger_group,app_root=/var/app/current,app_type=rack,name=/var/app/current/public capacity_used=23i,get_wait_list_size=0i,processes_being_spawned=0i 1452984112799527021
|
||||
passenger_process,app_root=/var/app/current,code_revision=899ac7f,group_name=/var/app/current/public,life_status=ALIVE,pid=11553,process_group_id=13608,supergroup_name=/var/app/current/public busyness=0i,concurrency=1i,cpu=58i,last_used=1452747071764940i,private_dirty=314900i,processed=951i,pss=319391i,real_memory=314900i,rss=418548i,sessions=0i,spawn_end_time=1452746845013365i,spawn_start_time=1452746844946982i,spawner_creation_time=1452746835922747i,swap=0i,uptime=226i,vmsize=1563580i 1452984112799571490
|
||||
passenger_process,app_root=/var/app/current,code_revision=899ac7f,group_name=/var/app/current/public,life_status=ALIVE,pid=11563,process_group_id=13608,supergroup_name=/var/app/current/public busyness=2147483647i,concurrency=1i,cpu=47i,last_used=1452747071709179i,private_dirty=309240i,processed=756i,pss=314036i,real_memory=309240i,rss=418296i,sessions=1i,spawn_end_time=1452746845172460i,spawn_start_time=1452746845136882i,spawner_creation_time=1452746835922747i,swap=0i,uptime=226i,vmsize=1563608i 1452984112799638581
|
||||
```
|
||||
|
||||
When run with:
|
||||
|
||||
```
|
||||
./telegraf --config telegraf.conf --input-filter passenger --test
|
||||
```
|
||||
|
||||
It produces:
|
||||
|
||||
```
|
||||
> passenger,passenger_version=5.0.17 capacity_used=23i,get_wait_list_size=0i,max=23i,process_count=23i 1452984112799414257
|
||||
> passenger_supergroup,name=/var/app/current/public capacity_used=23i,get_wait_list_size=0i 1452984112799496977
|
||||
> passenger_group,app_root=/var/app/current,app_type=rack,name=/var/app/current/public capacity_used=23i,get_wait_list_size=0i,processes_being_spawned=0i 1452984112799527021
|
||||
> passenger_process,app_root=/var/app/current,code_revision=899ac7f,group_name=/var/app/current/public,life_status=ALIVE,pid=11553,process_group_id=13608,supergroup_name=/var/app/current/public busyness=0i,concurrency=1i,cpu=58i,last_used=1452747071764940i,private_dirty=314900i,processed=951i,pss=319391i,real_memory=314900i,rss=418548i,sessions=0i,spawn_end_time=1452746845013365i,spawn_start_time=1452746844946982i,spawner_creation_time=1452746835922747i,swap=0i,uptime=226i,vmsize=1563580i 1452984112799571490
|
||||
> passenger_process,app_root=/var/app/current,code_revision=899ac7f,group_name=/var/app/current/public,life_status=ALIVE,pid=11563,process_group_id=13608,supergroup_name=/var/app/current/public busyness=2147483647i,concurrency=1i,cpu=47i,last_used=1452747071709179i,private_dirty=309240i,processed=756i,pss=314036i,real_memory=309240i,rss=418296i,sessions=1i,spawn_end_time=1452746845172460i,spawn_start_time=1452746845136882i,spawner_creation_time=1452746835922747i,swap=0i,uptime=226i,vmsize=1563608i 1452984112799638581
|
||||
```
|
||||
|
||||
# Note
|
||||
|
||||
You have to ensure that you can run the `passenger-status` command under
|
||||
telegraf user. Depend on how you install and configure passenger, this
|
||||
maybe an issue for you. If you are using passenger standlone, or compile
|
||||
yourself, it is straight forward. However, if you are using gem and
|
||||
`rvm`, it maybe harder to get this right.
|
||||
|
||||
Such as with `rvm`, you can use this command:
|
||||
|
||||
```
|
||||
~/.rvm/bin/rvm default do passenger-status -v --show=xml
|
||||
```
|
||||
|
||||
You can use `&` and `;` in the shell command to run comlicated shell command
|
||||
in order to get the passenger-status such as load the rvm shell, source the
|
||||
path
|
||||
```
|
||||
command = "source .rvm/scripts/rvm && passenger-status -v --show=xml"
|
||||
```
|
||||
|
||||
Anyway, just ensure that you can run the command under `telegraf` user, and it
|
||||
has to produce XML output.
|
||||
|
||||
@@ -102,7 +102,7 @@ func (p *process) getUptime() int64 {
|
||||
uptime += value * (24 * 60 * 60)
|
||||
}
|
||||
case strings.HasSuffix(v, "h"):
|
||||
iValue := strings.TrimSuffix(v, "y")
|
||||
iValue := strings.TrimSuffix(v, "h")
|
||||
value, err := strconv.ParseInt(iValue, 10, 64)
|
||||
if err == nil {
|
||||
uptime += value * (60 * 60)
|
||||
|
||||
@@ -126,7 +126,7 @@ func TestPassengerGenerateMetric(t *testing.T) {
|
||||
"spawn_start_time": int64(1452746844946982),
|
||||
"spawn_end_time": int64(1452746845013365),
|
||||
"last_used": int64(1452747071764940),
|
||||
"uptime": int64(226), // in seconds of 3m 46s
|
||||
"uptime": int64(191026), // in seconds of 2d 5h 3m 46s
|
||||
"cpu": int64(58),
|
||||
"rss": int64(418548),
|
||||
"pss": int64(319391),
|
||||
@@ -219,7 +219,7 @@ var sampleStat = `
|
||||
<spawn_end_time>1452746845013365</spawn_end_time>
|
||||
<last_used>1452747071764940</last_used>
|
||||
<last_used_desc>0s ago</last_used_desc>
|
||||
<uptime>3m 46s</uptime>
|
||||
<uptime>2d 5h 3m 46s</uptime>
|
||||
<code_revision>899ac7f</code_revision>
|
||||
<life_status>ALIVE</life_status>
|
||||
<enabled>ENABLED</enabled>
|
||||
@@ -263,7 +263,7 @@ var sampleStat = `
|
||||
<spawn_end_time>1452746845172460</spawn_end_time>
|
||||
<last_used>1452747071709179</last_used>
|
||||
<last_used_desc>0s ago</last_used_desc>
|
||||
<uptime>3m 46s</uptime>
|
||||
<uptime>2d 5h 3m 46s</uptime>
|
||||
<code_revision>899ac7f</code_revision>
|
||||
<life_status>ALIVE</life_status>
|
||||
<enabled>ENABLED</enabled>
|
||||
|
||||
@@ -1,65 +1,59 @@
|
||||
# Telegraf plugin: phpfpm
|
||||
# PHP-FPM Input Plugin
|
||||
|
||||
Get phpfpm stat using either HTTP status page or fpm socket.
|
||||
Get phpfpm stats using either HTTP status page or fpm socket.
|
||||
|
||||
# Measurements
|
||||
|
||||
Meta:
|
||||
|
||||
- tags: `pool=poolname`
|
||||
|
||||
Measurement names:
|
||||
|
||||
- phpfpm
|
||||
|
||||
Measurement field:
|
||||
|
||||
- accepted_conn
|
||||
- listen_queue
|
||||
- max_listen_queue
|
||||
- listen_queue_len
|
||||
- idle_processes
|
||||
- active_processes
|
||||
- total_processes
|
||||
- max_active_processes
|
||||
- max_children_reached
|
||||
- slow_requests
|
||||
|
||||
# Example output
|
||||
|
||||
Using this configuration:
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Read metrics of phpfpm, via HTTP status page or socket
|
||||
[[inputs.phpfpm]]
|
||||
## An array of addresses to gather stats about. Specify an ip or hostname
|
||||
## with optional port and path
|
||||
##
|
||||
## Plugin can be configured in three modes (either can be used):
|
||||
## - http: the URL must start with http:// or https://, ie:
|
||||
## "http://localhost/status"
|
||||
## "http://192.168.130.1/status?full"
|
||||
##
|
||||
## - unixsocket: path to fpm socket, ie:
|
||||
## "/var/run/php5-fpm.sock"
|
||||
## or using a custom fpm status path:
|
||||
## "/var/run/php5-fpm.sock:fpm-custom-status-path"
|
||||
##
|
||||
## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
|
||||
## "fcgi://10.0.0.12:9000/status"
|
||||
## "cgi://10.0.10.12:9001/status"
|
||||
##
|
||||
## Example of multiple gathering from local socket and remove host
|
||||
## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
|
||||
urls = ["http://localhost/status"]
|
||||
```
|
||||
[phpfpm]
|
||||
# An array of address to gather stats about. Specify an ip on hostname
|
||||
# with optional port and path. ie localhost, 10.10.3.33/server-status, etc.
|
||||
#
|
||||
# We can configure in three modes:
|
||||
# - unixsocket: the string is the path to fpm socket like
|
||||
# /var/run/php5-fpm.sock
|
||||
# - http: the URL has to start with http:// or https://
|
||||
# - fcgi: the URL has to start with fcgi:// or cgi://, and socket port must present
|
||||
#
|
||||
# If no servers are specified, then default to 127.0.0.1/server-status
|
||||
urls = ["http://localhost/status", "10.0.0.12:/var/run/php5-fpm-www2.sock", "fcgi://10.0.0.12:9000/status"]
|
||||
```
|
||||
|
||||
When run with:
|
||||
|
||||
```
|
||||
./telegraf --config telegraf.conf --input-filter phpfpm --test
|
||||
```
|
||||
|
||||
It produces:
|
||||
|
||||
```
|
||||
* Plugin: phpfpm, Collection 1
|
||||
> phpfpm,pool=www accepted_conn=13i,active_processes=2i,idle_processes=1i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083331187
|
||||
> phpfpm,pool=www2 accepted_conn=12i,active_processes=1i,idle_processes=2i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083691422
|
||||
> phpfpm,pool=www3 accepted_conn=11i,active_processes=1i,idle_processes=2i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083691658
|
||||
```
|
||||
|
||||
## Note
|
||||
|
||||
When using `unixsocket`, you have to ensure that telegraf runs on same
|
||||
host, and socket path is accessible to telegraf user.
|
||||
|
||||
### Metrics:
|
||||
|
||||
- phpfpm
|
||||
- tags:
|
||||
- pool
|
||||
- url
|
||||
- fields:
|
||||
- accepted_conn
|
||||
- listen_queue
|
||||
- max_listen_queue
|
||||
- listen_queue_len
|
||||
- idle_processes
|
||||
- active_processes
|
||||
- total_processes
|
||||
- max_active_processes
|
||||
- max_children_reached
|
||||
- slow_requests
|
||||
|
||||
# Example Output
|
||||
|
||||
```
|
||||
phpfpm,pool=www accepted_conn=13i,active_processes=2i,idle_processes=1i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083331187
|
||||
phpfpm,pool=www2 accepted_conn=12i,active_processes=1i,idle_processes=2i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083691422
|
||||
phpfpm,pool=www3 accepted_conn=11i,active_processes=1i,idle_processes=2i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083691658
|
||||
```
|
||||
|
||||
@@ -148,11 +148,11 @@ func (g *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return g.gatherFcgi(fcgi, statusPath, acc)
|
||||
return g.gatherFcgi(fcgi, statusPath, acc, addr)
|
||||
}
|
||||
|
||||
// Gather stat using fcgi protocol
|
||||
func (g *phpfpm) gatherFcgi(fcgi *conn, statusPath string, acc telegraf.Accumulator) error {
|
||||
func (g *phpfpm) gatherFcgi(fcgi *conn, statusPath string, acc telegraf.Accumulator, addr string) error {
|
||||
fpmOutput, fpmErr, err := fcgi.Request(map[string]string{
|
||||
"SCRIPT_NAME": "/" + statusPath,
|
||||
"SCRIPT_FILENAME": statusPath,
|
||||
@@ -164,7 +164,7 @@ func (g *phpfpm) gatherFcgi(fcgi *conn, statusPath string, acc telegraf.Accumula
|
||||
}, "/"+statusPath)
|
||||
|
||||
if len(fpmErr) == 0 && err == nil {
|
||||
importMetric(bytes.NewReader(fpmOutput), acc)
|
||||
importMetric(bytes.NewReader(fpmOutput), acc, addr)
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("Unable parse phpfpm status. Error: %v %v", string(fpmErr), err)
|
||||
@@ -192,12 +192,12 @@ func (g *phpfpm) gatherHttp(addr string, acc telegraf.Accumulator) error {
|
||||
addr, err)
|
||||
}
|
||||
|
||||
importMetric(res.Body, acc)
|
||||
importMetric(res.Body, acc, addr)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Import stat data into Telegraf system
|
||||
func importMetric(r io.Reader, acc telegraf.Accumulator) (poolStat, error) {
|
||||
func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) (poolStat, error) {
|
||||
stats := make(poolStat)
|
||||
var currentPool string
|
||||
|
||||
@@ -240,6 +240,7 @@ func importMetric(r io.Reader, acc telegraf.Accumulator) (poolStat, error) {
|
||||
for pool := range stats {
|
||||
tags := map[string]string{
|
||||
"pool": pool,
|
||||
"url": addr,
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
for k, v := range stats[pool] {
|
||||
|
||||
@@ -40,6 +40,7 @@ func TestPhpFpmGeneratesMetrics_From_Http(t *testing.T) {
|
||||
|
||||
tags := map[string]string{
|
||||
"pool": "www",
|
||||
"url": ts.URL,
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
@@ -80,6 +81,7 @@ func TestPhpFpmGeneratesMetrics_From_Fcgi(t *testing.T) {
|
||||
|
||||
tags := map[string]string{
|
||||
"pool": "www",
|
||||
"url": r.Urls[0],
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
@@ -124,6 +126,7 @@ func TestPhpFpmGeneratesMetrics_From_Socket(t *testing.T) {
|
||||
|
||||
tags := map[string]string{
|
||||
"pool": "www",
|
||||
"url": r.Urls[0],
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
@@ -168,6 +171,7 @@ func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) {
|
||||
|
||||
tags := map[string]string{
|
||||
"pool": "www",
|
||||
"url": r.Urls[0],
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
|
||||
@@ -17,6 +17,8 @@ urls = ["www.google.com"] # required
|
||||
# ping_interval = 1.0
|
||||
## per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
|
||||
# timeout = 1.0
|
||||
## total-ping deadline, in s. 0 == no deadline (ping -w <DEADLINE>)
|
||||
# deadline = 10
|
||||
## interface or source address to send ping from (ping -I <INTERFACE/SRC_ADDR>)
|
||||
## on Darwin and Freebsd only source address possible: (ping -S <SRC_ADDR>)
|
||||
# interface = ""
|
||||
|
||||
@@ -34,6 +34,9 @@ type Ping struct {
|
||||
// Ping timeout, in seconds. 0 means no timeout (ping -W <TIMEOUT>)
|
||||
Timeout float64
|
||||
|
||||
// Ping deadline, in seconds. 0 means no deadline. (ping -w <DEADLINE>)
|
||||
Deadline int
|
||||
|
||||
// Interface or source address to send ping from (ping -I/-S <INTERFACE/SRC_ADDR>)
|
||||
Interface string
|
||||
|
||||
@@ -60,6 +63,8 @@ const sampleConfig = `
|
||||
# ping_interval = 1.0
|
||||
## per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
|
||||
# timeout = 1.0
|
||||
## total-ping deadline, in s. 0 == no deadline (ping -w <DEADLINE>)
|
||||
# deadline = 10
|
||||
## interface or source address to send ping from (ping -I <INTERFACE/SRC_ADDR>)
|
||||
## on Darwin and Freebsd only source address possible: (ping -S <SRC_ADDR>)
|
||||
# interface = ""
|
||||
@@ -108,9 +113,9 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error {
|
||||
// Combine go err + stderr output
|
||||
out = strings.TrimSpace(out)
|
||||
if len(out) > 0 {
|
||||
acc.AddError(fmt.Errorf("%s, %s", out, err))
|
||||
acc.AddError(fmt.Errorf("host %s: %s, %s", u, out, err))
|
||||
} else {
|
||||
acc.AddError(err)
|
||||
acc.AddError(fmt.Errorf("host %s: %s", u, err))
|
||||
}
|
||||
acc.AddFields("ping", fields, tags)
|
||||
return
|
||||
@@ -129,16 +134,16 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error {
|
||||
fields["packets_transmitted"] = trans
|
||||
fields["packets_received"] = rec
|
||||
fields["percent_packet_loss"] = loss
|
||||
if min > 0 {
|
||||
if min >= 0 {
|
||||
fields["minimum_response_ms"] = min
|
||||
}
|
||||
if avg > 0 {
|
||||
if avg >= 0 {
|
||||
fields["average_response_ms"] = avg
|
||||
}
|
||||
if max > 0 {
|
||||
if max >= 0 {
|
||||
fields["maximum_response_ms"] = max
|
||||
}
|
||||
if stddev > 0 {
|
||||
if stddev >= 0 {
|
||||
fields["standard_deviation_ms"] = stddev
|
||||
}
|
||||
acc.AddFields("ping", fields, tags)
|
||||
@@ -179,6 +184,17 @@ func (p *Ping) args(url string) []string {
|
||||
args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', 1, 64))
|
||||
}
|
||||
}
|
||||
if p.Deadline > 0 {
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
args = append(args, "-t", strconv.Itoa(p.Deadline))
|
||||
case "linux":
|
||||
args = append(args, "-w", strconv.Itoa(p.Deadline))
|
||||
default:
|
||||
// Not sure the best option here, just assume GNU ping?
|
||||
args = append(args, "-w", strconv.Itoa(p.Deadline))
|
||||
}
|
||||
}
|
||||
if p.Interface != "" {
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
@@ -207,7 +223,7 @@ func (p *Ping) args(url string) []string {
|
||||
// It returns (<transmitted packets>, <received packets>, <average response>)
|
||||
func processPingOutput(out string) (int, int, float64, float64, float64, float64, error) {
|
||||
var trans, recv int
|
||||
var min, avg, max, stddev float64
|
||||
var min, avg, max, stddev float64 = -1.0, -1.0, -1.0, -1.0
|
||||
// Set this error to nil if we find a 'transmitted' line
|
||||
err := errors.New("Fatal error processing ping output")
|
||||
lines := strings.Split(out, "\n")
|
||||
@@ -255,6 +271,7 @@ func init() {
|
||||
PingInterval: 1.0,
|
||||
Count: 1,
|
||||
Timeout: 1.0,
|
||||
Deadline: 10,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -104,14 +104,22 @@ func TestArgs(t *testing.T) {
|
||||
case "darwin":
|
||||
expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W",
|
||||
"12000.0", "www.google.com"}
|
||||
case "freebsd":
|
||||
expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-t",
|
||||
"12.0", "www.google.com"}
|
||||
default:
|
||||
expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W",
|
||||
"12.0", "www.google.com"}
|
||||
}
|
||||
|
||||
p.Deadline = 24
|
||||
actual = p.args("www.google.com")
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W",
|
||||
"12000.0", "-t", "24", "www.google.com"}
|
||||
default:
|
||||
expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W",
|
||||
"12.0", "-w", "24", "www.google.com"}
|
||||
}
|
||||
|
||||
sort.Strings(actual)
|
||||
sort.Strings(expected)
|
||||
assert.True(t, reflect.DeepEqual(expected, actual),
|
||||
@@ -122,13 +130,10 @@ func TestArgs(t *testing.T) {
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W",
|
||||
"12000.0", "-i", "1.2", "www.google.com"}
|
||||
case "freebsd":
|
||||
expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-t",
|
||||
"12.0", "-i", "1.2", "www.google.com"}
|
||||
"12000.0", "-t", "24", "-i", "1.2", "www.google.com"}
|
||||
default:
|
||||
expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W",
|
||||
"12.0", "-i", "1.2", "www.google.com"}
|
||||
"12.0", "-w", "24", "-i", "1.2", "www.google.com"}
|
||||
}
|
||||
sort.Strings(actual)
|
||||
sort.Strings(expected)
|
||||
@@ -263,3 +268,26 @@ func TestFatalPingGather(t *testing.T) {
|
||||
assert.False(t, acc.HasMeasurement("maximum_response_ms"),
|
||||
"Fatal ping should not have packet measurements")
|
||||
}
|
||||
|
||||
func TestErrorWithHostNamePingGather(t *testing.T) {
|
||||
params := []struct {
|
||||
out string
|
||||
error error
|
||||
}{
|
||||
{"", errors.New("host www.amazon.com: So very bad")},
|
||||
{"so bad", errors.New("host www.amazon.com: so bad, So very bad")},
|
||||
}
|
||||
|
||||
for _, param := range params {
|
||||
var acc testutil.Accumulator
|
||||
p := Ping{
|
||||
Urls: []string{"www.amazon.com"},
|
||||
pingHost: func(timeout float64, args ...string) (string, error) {
|
||||
return param.out, errors.New("So very bad")
|
||||
},
|
||||
}
|
||||
acc.GatherError(p.Gather)
|
||||
assert.True(t, len(acc.Errors) > 0)
|
||||
assert.Contains(t, acc.Errors, param.error)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,32 +93,32 @@ func processPingOutput(out string) (int, int, int, int, int, int, error) {
|
||||
|
||||
// stats data should contain 4 members: entireExpression + ( Send, Receive, Lost )
|
||||
if len(stats) != 4 {
|
||||
return 0, 0, 0, 0, 0, 0, err
|
||||
return 0, 0, 0, -1, -1, -1, err
|
||||
}
|
||||
trans, err := strconv.Atoi(stats[1])
|
||||
if err != nil {
|
||||
return 0, 0, 0, 0, 0, 0, err
|
||||
return 0, 0, 0, -1, -1, -1, err
|
||||
}
|
||||
receivedPacket, err := strconv.Atoi(stats[2])
|
||||
if err != nil {
|
||||
return 0, 0, 0, 0, 0, 0, err
|
||||
return 0, 0, 0, -1, -1, -1, err
|
||||
}
|
||||
|
||||
// aproxs data should contain 4 members: entireExpression + ( min, max, avg )
|
||||
if len(aproxs) != 4 {
|
||||
return trans, receivedReply, receivedPacket, 0, 0, 0, err
|
||||
return trans, receivedReply, receivedPacket, -1, -1, -1, err
|
||||
}
|
||||
min, err := strconv.Atoi(aproxs[1])
|
||||
if err != nil {
|
||||
return trans, receivedReply, receivedPacket, 0, 0, 0, err
|
||||
return trans, receivedReply, receivedPacket, -1, -1, -1, err
|
||||
}
|
||||
max, err := strconv.Atoi(aproxs[2])
|
||||
if err != nil {
|
||||
return trans, receivedReply, receivedPacket, 0, 0, 0, err
|
||||
return trans, receivedReply, receivedPacket, -1, -1, -1, err
|
||||
}
|
||||
avg, err := strconv.Atoi(aproxs[3])
|
||||
if err != nil {
|
||||
return 0, 0, 0, 0, 0, 0, err
|
||||
return 0, 0, 0, -1, -1, -1, err
|
||||
}
|
||||
|
||||
return trans, receivedReply, receivedPacket, avg, min, max, err
|
||||
@@ -201,13 +201,13 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error {
|
||||
fields["packets_received"] = receivePacket
|
||||
fields["percent_packet_loss"] = lossPackets
|
||||
fields["percent_reply_loss"] = lossReply
|
||||
if avg > 0 {
|
||||
if avg >= 0 {
|
||||
fields["average_response_ms"] = float64(avg)
|
||||
}
|
||||
if min > 0 {
|
||||
if min >= 0 {
|
||||
fields["minimum_response_ms"] = float64(min)
|
||||
}
|
||||
if max > 0 {
|
||||
if max >= 0 {
|
||||
fields["maximum_response_ms"] = float64(max)
|
||||
}
|
||||
acc.AddFields("ping", fields, tags)
|
||||
|
||||
@@ -111,9 +111,11 @@ func TestParseValidPrometheus(t *testing.T) {
|
||||
"gauge": float64(1),
|
||||
}, metrics[0].Fields())
|
||||
assert.Equal(t, map[string]string{
|
||||
"osVersion": "CentOS Linux 7 (Core)",
|
||||
"dockerVersion": "1.8.2",
|
||||
"kernelVersion": "3.10.0-229.20.1.el7.x86_64",
|
||||
"osVersion": "CentOS Linux 7 (Core)",
|
||||
"cadvisorRevision": "",
|
||||
"cadvisorVersion": "",
|
||||
"dockerVersion": "1.8.2",
|
||||
"kernelVersion": "3.10.0-229.20.1.el7.x86_64",
|
||||
}, metrics[0].Tags())
|
||||
|
||||
// Counter value
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package smart
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"path"
|
||||
@@ -202,7 +203,10 @@ func gatherDisk(acc telegraf.Accumulator, usesudo, attributes bool, smartctl, no
|
||||
device_fields := make(map[string]interface{})
|
||||
device_fields["exit_status"] = exitStatus
|
||||
|
||||
for _, line := range strings.Split(outStr, "\n") {
|
||||
scanner := bufio.NewScanner(strings.NewReader(outStr))
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
model := modelInInfo.FindStringSubmatch(line)
|
||||
if len(model) > 1 {
|
||||
|
||||
@@ -85,7 +85,7 @@ Telegraf config:
|
||||
|
||||
[[inputs.snmp.table]]
|
||||
oid = "TEST::testTable"
|
||||
inherit_tags = "hostname"
|
||||
inherit_tags = [ "hostname" ]
|
||||
```
|
||||
|
||||
Resulting output:
|
||||
|
||||
@@ -26,6 +26,8 @@ type streamSocketListener struct {
|
||||
net.Listener
|
||||
*SocketListener
|
||||
|
||||
sockType string
|
||||
|
||||
connections map[string]net.Conn
|
||||
connectionsMtx sync.Mutex
|
||||
}
|
||||
@@ -42,6 +44,14 @@ func (ssl *streamSocketListener) listen() {
|
||||
break
|
||||
}
|
||||
|
||||
if ssl.ReadBufferSize > 0 {
|
||||
if srb, ok := c.(setReadBufferer); ok {
|
||||
srb.SetReadBuffer(ssl.ReadBufferSize)
|
||||
} else {
|
||||
log.Printf("W! Unable to set read buffer on a %s socket", ssl.sockType)
|
||||
}
|
||||
}
|
||||
|
||||
ssl.connectionsMtx.Lock()
|
||||
if ssl.MaxConnections > 0 && len(ssl.connections) >= ssl.MaxConnections {
|
||||
ssl.connectionsMtx.Unlock()
|
||||
@@ -237,17 +247,10 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if sl.ReadBufferSize > 0 {
|
||||
if srb, ok := l.(setReadBufferer); ok {
|
||||
srb.SetReadBuffer(sl.ReadBufferSize)
|
||||
} else {
|
||||
log.Printf("W! Unable to set read buffer on a %s socket", spl[0])
|
||||
}
|
||||
}
|
||||
|
||||
ssl := &streamSocketListener{
|
||||
Listener: l,
|
||||
SocketListener: sl,
|
||||
sockType: spl[0],
|
||||
}
|
||||
|
||||
sl.Closer = ssl
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package socket_listener
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"testing"
|
||||
@@ -11,9 +13,24 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// testEmptyLog is a helper function to ensure no data is written to log.
|
||||
// Should be called at the start of the test, and returns a function which should run at the end.
|
||||
func testEmptyLog(t *testing.T) func() {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
log.SetOutput(buf)
|
||||
|
||||
return func() {
|
||||
log.SetOutput(os.Stderr)
|
||||
assert.Empty(t, string(buf.Bytes()), "log not empty")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSocketListener_tcp(t *testing.T) {
|
||||
defer testEmptyLog(t)()
|
||||
|
||||
sl := newSocketListener()
|
||||
sl.ServiceAddress = "tcp://127.0.0.1:0"
|
||||
sl.ReadBufferSize = 1024
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
err := sl.Start(acc)
|
||||
@@ -27,8 +44,11 @@ func TestSocketListener_tcp(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSocketListener_udp(t *testing.T) {
|
||||
defer testEmptyLog(t)()
|
||||
|
||||
sl := newSocketListener()
|
||||
sl.ServiceAddress = "udp://127.0.0.1:0"
|
||||
sl.ReadBufferSize = 1024
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
err := sl.Start(acc)
|
||||
@@ -42,9 +62,12 @@ func TestSocketListener_udp(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSocketListener_unix(t *testing.T) {
|
||||
defer testEmptyLog(t)()
|
||||
|
||||
os.Create("/tmp/telegraf_test.sock")
|
||||
sl := newSocketListener()
|
||||
sl.ServiceAddress = "unix:///tmp/telegraf_test.sock"
|
||||
sl.ReadBufferSize = 1024
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
err := sl.Start(acc)
|
||||
@@ -58,9 +81,12 @@ func TestSocketListener_unix(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSocketListener_unixgram(t *testing.T) {
|
||||
defer testEmptyLog(t)()
|
||||
|
||||
os.Create("/tmp/telegraf_test.sock")
|
||||
sl := newSocketListener()
|
||||
sl.ServiceAddress = "unixgram:///tmp/telegraf_test.sock"
|
||||
sl.ReadBufferSize = 1024
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
err := sl.Start(acc)
|
||||
|
||||
@@ -5,6 +5,8 @@ The [solr](http://lucene.apache.org/solr/) plugin collects stats via the
|
||||
|
||||
More about [performance statistics](https://cwiki.apache.org/confluence/display/solr/Performance+Statistics+Reference)
|
||||
|
||||
Tested from 3.5 to 6.*
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
|
||||
@@ -3,6 +3,7 @@ package solr
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -78,22 +79,7 @@ type Core struct {
|
||||
// QueryHandler is an exported type that
|
||||
// contains query handler metrics
|
||||
type QueryHandler struct {
|
||||
Stats struct {
|
||||
One5minRateReqsPerSecond float64 `json:"15minRateReqsPerSecond"`
|
||||
FiveMinRateReqsPerSecond float64 `json:"5minRateReqsPerSecond"`
|
||||
Seven5thPcRequestTime float64 `json:"75thPcRequestTime"`
|
||||
Nine5thPcRequestTime float64 `json:"95thPcRequestTime"`
|
||||
Nine99thPcRequestTime float64 `json:"999thPcRequestTime"`
|
||||
Nine9thPcRequestTime float64 `json:"99thPcRequestTime"`
|
||||
AvgRequestsPerSecond float64 `json:"avgRequestsPerSecond"`
|
||||
AvgTimePerRequest float64 `json:"avgTimePerRequest"`
|
||||
Errors int64 `json:"errors"`
|
||||
HandlerStart int64 `json:"handlerStart"`
|
||||
MedianRequestTime float64 `json:"medianRequestTime"`
|
||||
Requests int64 `json:"requests"`
|
||||
Timeouts int64 `json:"timeouts"`
|
||||
TotalTime float64 `json:"totalTime"`
|
||||
} `json:"stats"`
|
||||
Stats interface{} `json:"stats"`
|
||||
}
|
||||
|
||||
// UpdateHandler is an exported type that
|
||||
@@ -286,22 +272,22 @@ func addQueryHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansDa
|
||||
}
|
||||
|
||||
for name, metrics := range queryMetrics {
|
||||
coreFields := map[string]interface{}{
|
||||
"15min_rate_reqs_per_second": metrics.Stats.One5minRateReqsPerSecond,
|
||||
"5min_rate_reqs_per_second": metrics.Stats.FiveMinRateReqsPerSecond,
|
||||
"75th_pc_request_time": metrics.Stats.Seven5thPcRequestTime,
|
||||
"95th_pc_request_time": metrics.Stats.Nine5thPcRequestTime,
|
||||
"999th_pc_request_time": metrics.Stats.Nine99thPcRequestTime,
|
||||
"99th_pc_request_time": metrics.Stats.Nine9thPcRequestTime,
|
||||
"avg_requests_per_second": metrics.Stats.AvgRequestsPerSecond,
|
||||
"avg_time_per_request": metrics.Stats.AvgTimePerRequest,
|
||||
"errors": metrics.Stats.Errors,
|
||||
"handler_start": metrics.Stats.HandlerStart,
|
||||
"median_request_time": metrics.Stats.MedianRequestTime,
|
||||
"requests": metrics.Stats.Requests,
|
||||
"timeouts": metrics.Stats.Timeouts,
|
||||
"total_time": metrics.Stats.TotalTime,
|
||||
var coreFields map[string]interface{}
|
||||
|
||||
if metrics.Stats == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
switch v := metrics.Stats.(type) {
|
||||
case []interface{}:
|
||||
m := convertArrayToMap(v)
|
||||
coreFields = convertQueryHandlerMap(m)
|
||||
case map[string]interface{}:
|
||||
coreFields = convertQueryHandlerMap(v)
|
||||
default:
|
||||
continue
|
||||
}
|
||||
|
||||
acc.AddFields(
|
||||
"solr_queryhandler",
|
||||
coreFields,
|
||||
@@ -310,10 +296,44 @@ func addQueryHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansDa
|
||||
"handler": name},
|
||||
time,
|
||||
)
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertArrayToMap(values []interface{}) map[string]interface{} {
|
||||
var key string
|
||||
result := make(map[string]interface{})
|
||||
for i, item := range values {
|
||||
if i%2 == 0 {
|
||||
key = fmt.Sprintf("%v", item)
|
||||
} else {
|
||||
result[key] = item
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func convertQueryHandlerMap(value map[string]interface{}) map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"15min_rate_reqs_per_second": getFloat(value["15minRateReqsPerSecond"]),
|
||||
"5min_rate_reqs_per_second": getFloat(value["5minRateReqsPerSecond"]),
|
||||
"75th_pc_request_time": getFloat(value["75thPcRequestTime"]),
|
||||
"95th_pc_request_time": getFloat(value["95thPcRequestTime"]),
|
||||
"99th_pc_request_time": getFloat(value["99thPcRequestTime"]),
|
||||
"999th_pc_request_time": getFloat(value["999thPcRequestTime"]),
|
||||
"avg_requests_per_second": getFloat(value["avgRequestsPerSecond"]),
|
||||
"avg_time_per_request": getFloat(value["avgTimePerRequest"]),
|
||||
"errors": getInt(value["errors"]),
|
||||
"handler_start": getInt(value["handlerStart"]),
|
||||
"median_request_time": getFloat(value["medianRequestTime"]),
|
||||
"requests": getInt(value["requests"]),
|
||||
"timeouts": getInt(value["timeouts"]),
|
||||
"total_time": getFloat(value["totalTime"]),
|
||||
}
|
||||
}
|
||||
|
||||
// Add update metrics section to accumulator
|
||||
func addUpdateHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error {
|
||||
var updateMetrics map[string]UpdateHandler
|
||||
@@ -366,13 +386,34 @@ func getFloat(unk interface{}) float64 {
|
||||
case float64:
|
||||
return i
|
||||
case string:
|
||||
f, _ := strconv.ParseFloat(i, 64)
|
||||
f, err := strconv.ParseFloat(i, 64)
|
||||
if err != nil || math.IsNaN(f) {
|
||||
return float64(0)
|
||||
}
|
||||
return f
|
||||
default:
|
||||
return float64(0)
|
||||
}
|
||||
}
|
||||
|
||||
// Get int64 from interface
|
||||
func getInt(unk interface{}) int64 {
|
||||
switch i := unk.(type) {
|
||||
case int64:
|
||||
return i
|
||||
case float64:
|
||||
return int64(i)
|
||||
case string:
|
||||
v, err := strconv.ParseInt(i, 10, 64)
|
||||
if err != nil {
|
||||
return int64(0)
|
||||
}
|
||||
return v
|
||||
default:
|
||||
return int64(0)
|
||||
}
|
||||
}
|
||||
|
||||
// Add cache metrics section to accumulator
|
||||
func addCacheMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error {
|
||||
if len(mBeansData.SolrMbeans) < 8 {
|
||||
|
||||
@@ -43,24 +43,37 @@ func TestGatherStats(t *testing.T) {
|
||||
map[string]string{"core": "main", "handler": "filterCache"})
|
||||
}
|
||||
|
||||
func createMockServer() *httptest.Server {
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if strings.Contains(r.URL.Path, "/solr/admin/cores") {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, statusResponse)
|
||||
} else if strings.Contains(r.URL.Path, "solr/main/admin") {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, mBeansMainResponse)
|
||||
} else if strings.Contains(r.URL.Path, "solr/core1/admin") {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, mBeansCore1Response)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
fmt.Fprintln(w, "nope")
|
||||
}
|
||||
}))
|
||||
}
|
||||
func TestSolr3GatherStats(t *testing.T) {
|
||||
ts := createMockSolr3Server()
|
||||
solr := NewSolr()
|
||||
solr.Servers = []string{ts.URL}
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, solr.Gather(&acc))
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "solr_admin",
|
||||
solrAdminMainCoreStatusExpected,
|
||||
map[string]string{"core": "main"})
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "solr_admin",
|
||||
solrAdminCore1StatusExpected,
|
||||
map[string]string{"core": "core1"})
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "solr_core",
|
||||
solr3CoreExpected,
|
||||
map[string]string{"core": "main", "handler": "searcher"})
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "solr_queryhandler",
|
||||
solr3QueryHandlerExpected,
|
||||
map[string]string{"core": "main", "handler": "org.apache.solr.handler.component.SearchHandler"})
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "solr_updatehandler",
|
||||
solr3UpdateHandlerExpected,
|
||||
map[string]string{"core": "main", "handler": "updateHandler"})
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "solr_cache",
|
||||
solr3CacheExpected,
|
||||
map[string]string{"core": "main", "handler": "filterCache"})
|
||||
}
|
||||
func TestNoCoreDataHandling(t *testing.T) {
|
||||
ts := createMockNoCoreDataServer()
|
||||
solr := NewSolr()
|
||||
@@ -83,6 +96,24 @@ func TestNoCoreDataHandling(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func createMockServer() *httptest.Server {
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if strings.Contains(r.URL.Path, "/solr/admin/cores") {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, statusResponse)
|
||||
} else if strings.Contains(r.URL.Path, "solr/main/admin") {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, mBeansMainResponse)
|
||||
} else if strings.Contains(r.URL.Path, "solr/core1/admin") {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, mBeansCore1Response)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
fmt.Fprintln(w, "nope")
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
func createMockNoCoreDataServer() *httptest.Server {
|
||||
var nodata string
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -101,3 +132,21 @@ func createMockNoCoreDataServer() *httptest.Server {
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
func createMockSolr3Server() *httptest.Server {
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if strings.Contains(r.URL.Path, "/solr/admin/cores") {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, statusResponse)
|
||||
} else if strings.Contains(r.URL.Path, "solr/main/admin") {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, mBeansSolr3MainResponse)
|
||||
} else if strings.Contains(r.URL.Path, "solr/core1/admin") {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, mBeansSolr3MainResponse)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
fmt.Fprintln(w, "nope")
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
789
plugins/inputs/solr/testdata3_test.go
Normal file
789
plugins/inputs/solr/testdata3_test.go
Normal file
@@ -0,0 +1,789 @@
|
||||
package solr
|
||||
|
||||
const mBeansSolr3MainResponse = `{
|
||||
"solr-mbeans": [
|
||||
"CORE",
|
||||
{
|
||||
"searcher": {
|
||||
"class": "org.apache.solr.search.SolrIndexSearcher",
|
||||
"version": "1.0",
|
||||
"description": "index searcher",
|
||||
"srcId": "$Id: SolrIndexSearcher.java 1201291 2011-11-12 18:02:03Z simonw $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"searcherName": "Searcher@4eea69e8 main",
|
||||
"caching": true,
|
||||
"numDocs": 117166,
|
||||
"maxDoc": 117305,
|
||||
"reader": "SolrIndexReader{this=2ee29b0,r=ReadOnlyDirectoryReader@2ee29b0,refCnt=1,segments=5}",
|
||||
"readerDir": "org.apache.lucene.store.MMapDirectory:org.apache.lucene.store.MMapDirectory@/usr/solrData/search/index lockFactory=org.apache.lucene.store.NativeFSLockFactory@178671d8",
|
||||
"indexVersion": 1491861981523,
|
||||
"openedAt": "2018-01-17T20:14:54.677Z",
|
||||
"registeredAt": "2018-01-17T20:14:54.679Z",
|
||||
"warmupTime": 1
|
||||
}
|
||||
},
|
||||
"core": {
|
||||
"class": "search",
|
||||
"version": "1.0",
|
||||
"description": "SolrCore",
|
||||
"srcId": "$Id: SolrCore.java 1190108 2011-10-28 01:13:25Z yonik $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/core/SolrCore.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"coreName": "search",
|
||||
"startTime": "2018-01-16T06:15:53.152Z",
|
||||
"refCount": 2,
|
||||
"aliases": [
|
||||
"search"
|
||||
]
|
||||
}
|
||||
},
|
||||
"Searcher@4eea69e8 main": {
|
||||
"class": "org.apache.solr.search.SolrIndexSearcher",
|
||||
"version": "1.0",
|
||||
"description": "index searcher",
|
||||
"srcId": "$Id: SolrIndexSearcher.java 1201291 2011-11-12 18:02:03Z simonw $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"searcherName": "Searcher@4eea69e8 main",
|
||||
"caching": true,
|
||||
"numDocs": 117166,
|
||||
"maxDoc": 117305,
|
||||
"reader": "SolrIndexReader{this=2ee29b0,r=ReadOnlyDirectoryReader@2ee29b0,refCnt=1,segments=5}",
|
||||
"readerDir": "org.apache.lucene.store.MMapDirectory:org.apache.lucene.store.MMapDirectory@/usr/solrData/search/index lockFactory=org.apache.lucene.store.NativeFSLockFactory@178671d8",
|
||||
"indexVersion": 1491861981523,
|
||||
"openedAt": "2018-01-17T20:14:54.677Z",
|
||||
"registeredAt": "2018-01-17T20:14:54.679Z",
|
||||
"warmupTime": 1
|
||||
}
|
||||
}
|
||||
},
|
||||
"QUERYHANDLER",
|
||||
{
|
||||
"/admin/system": {
|
||||
"class": "org.apache.solr.handler.admin.SystemInfoHandler",
|
||||
"version": "$Revision: 1067172 $",
|
||||
"description": "Get System Info",
|
||||
"srcId": "$Id: SystemInfoHandler.java 1067172 2011-02-04 12:50:14Z uschindler $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"handlerStart": 1516083353227,
|
||||
"requests": 0,
|
||||
"errors": 0,
|
||||
"timeouts": 0,
|
||||
"totalTime": 0,
|
||||
"avgTimePerRequest": "NaN",
|
||||
"avgRequestsPerSecond": 0
|
||||
}
|
||||
},
|
||||
"/admin/plugins": {
|
||||
"class": "org.apache.solr.handler.admin.PluginInfoHandler",
|
||||
"version": "$Revision: 1052938 $",
|
||||
"description": "Registry",
|
||||
"srcId": "$Id: PluginInfoHandler.java 1052938 2010-12-26 20:21:48Z rmuir $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/handler/admin/PluginInfoHandler.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"handlerStart": 1516083353227,
|
||||
"requests": 0,
|
||||
"errors": 0,
|
||||
"timeouts": 0,
|
||||
"totalTime": 0,
|
||||
"avgTimePerRequest": "NaN",
|
||||
"avgRequestsPerSecond": 0
|
||||
}
|
||||
},
|
||||
"/admin/file": {
|
||||
"class": "org.apache.solr.handler.admin.ShowFileRequestHandler",
|
||||
"version": "$Revision: 1146806 $",
|
||||
"description": "Admin Get File -- view config files directly",
|
||||
"srcId": "$Id: ShowFileRequestHandler.java 1146806 2011-07-14 17:01:37Z erick $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"handlerStart": 1516083353227,
|
||||
"requests": 0,
|
||||
"errors": 0,
|
||||
"timeouts": 0,
|
||||
"totalTime": 0,
|
||||
"avgTimePerRequest": "NaN",
|
||||
"avgRequestsPerSecond": 0
|
||||
}
|
||||
},
|
||||
"/update/javabin": {
|
||||
"class": "org.apache.solr.handler.BinaryUpdateRequestHandler",
|
||||
"version": "$Revision: 1165749 $",
|
||||
"description": "Add/Update multiple documents with javabin format",
|
||||
"srcId": "$Id: BinaryUpdateRequestHandler.java 1165749 2011-09-06 16:20:07Z janhoy $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/handler/BinaryUpdateRequestHandler.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"handlerStart": 1516083353158,
|
||||
"requests": 0,
|
||||
"errors": 0,
|
||||
"timeouts": 0,
|
||||
"totalTime": 0,
|
||||
"avgTimePerRequest": "NaN",
|
||||
"avgRequestsPerSecond": 0
|
||||
}
|
||||
},
|
||||
"/admin/luke": {
|
||||
"class": "org.apache.solr.handler.admin.LukeRequestHandler",
|
||||
"version": "$Revision: 1201265 $",
|
||||
"description": "Lucene Index Browser. Inspired and modeled after Luke: http://www.getopt.org/luke/",
|
||||
"srcId": "$Id: LukeRequestHandler.java 1201265 2011-11-12 14:09:28Z mikemccand $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java $",
|
||||
"docs": [
|
||||
"java.net.URL:http://wiki.apache.org/solr/LukeRequestHandler"
|
||||
],
|
||||
"stats": {
|
||||
"handlerStart": 1516083353227,
|
||||
"requests": 0,
|
||||
"errors": 0,
|
||||
"timeouts": 0,
|
||||
"totalTime": 0,
|
||||
"avgTimePerRequest": "NaN",
|
||||
"avgRequestsPerSecond": 0
|
||||
}
|
||||
},
|
||||
"/dataimport": {
|
||||
"class": "org.apache.solr.handler.dataimport.DataImportHandler",
|
||||
"version": "1.0",
|
||||
"description": "Manage data import from databases to Solr",
|
||||
"srcId": "$Id: DataImportHandler.java 1171306 2011-09-15 22:43:33Z janhoy $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImportHandler.java $",
|
||||
"docs": null,
|
||||
"stats": [
|
||||
"Status",
|
||||
"IDLE",
|
||||
"Documents Processed",
|
||||
"java.util.concurrent.atomic.AtomicLong:1",
|
||||
"Requests made to DataSource",
|
||||
"java.util.concurrent.atomic.AtomicLong:2",
|
||||
"Rows Fetched",
|
||||
"java.util.concurrent.atomic.AtomicLong:2",
|
||||
"Documents Deleted",
|
||||
"java.util.concurrent.atomic.AtomicLong:0",
|
||||
"Documents Skipped",
|
||||
"java.util.concurrent.atomic.AtomicLong:0",
|
||||
"Total Documents Processed",
|
||||
"java.util.concurrent.atomic.AtomicLong:351705",
|
||||
"Total Requests made to DataSource",
|
||||
"java.util.concurrent.atomic.AtomicLong:1438",
|
||||
"Total Rows Fetched",
|
||||
"java.util.concurrent.atomic.AtomicLong:876393",
|
||||
"Total Documents Deleted",
|
||||
"java.util.concurrent.atomic.AtomicLong:0",
|
||||
"Total Documents Skipped",
|
||||
"java.util.concurrent.atomic.AtomicLong:0",
|
||||
"handlerStart",
|
||||
1516083353155,
|
||||
"requests",
|
||||
2442,
|
||||
"errors",
|
||||
0,
|
||||
"timeouts",
|
||||
0,
|
||||
"totalTime",
|
||||
1748,
|
||||
"avgTimePerRequest",
|
||||
0.7158067,
|
||||
"avgRequestsPerSecond",
|
||||
0.017792022
|
||||
]
|
||||
},
|
||||
"/update": {
|
||||
"class": "org.apache.solr.handler.XmlUpdateRequestHandler",
|
||||
"version": "$Revision: 1165749 $",
|
||||
"description": "Add documents with XML",
|
||||
"srcId": "$Id: XmlUpdateRequestHandler.java 1165749 2011-09-06 16:20:07Z janhoy $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/handler/XmlUpdateRequestHandler.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"handlerStart": 1516083353157,
|
||||
"requests": 0,
|
||||
"errors": 0,
|
||||
"timeouts": 0,
|
||||
"totalTime": 0,
|
||||
"avgTimePerRequest": "NaN",
|
||||
"avgRequestsPerSecond": 0
|
||||
}
|
||||
},
|
||||
"/terms": {
|
||||
"class": "Lazy[solr.SearchHandler]",
|
||||
"version": "$Revision: 1086822 $",
|
||||
"description": "Lazy[solr.SearchHandler]",
|
||||
"srcId": "$Id: RequestHandlers.java 1086822 2011-03-30 02:23:07Z koji $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/core/RequestHandlers.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"note": "not initialized yet"
|
||||
}
|
||||
},
|
||||
"org.apache.solr.handler.XmlUpdateRequestHandler": {
|
||||
"class": "org.apache.solr.handler.XmlUpdateRequestHandler",
|
||||
"version": "$Revision: 1165749 $",
|
||||
"description": "Add documents with XML",
|
||||
"srcId": "$Id: XmlUpdateRequestHandler.java 1165749 2011-09-06 16:20:07Z janhoy $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/handler/XmlUpdateRequestHandler.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"handlerStart": 1516083353157,
|
||||
"requests": 0,
|
||||
"errors": 0,
|
||||
"timeouts": 0,
|
||||
"totalTime": 0,
|
||||
"avgTimePerRequest": "NaN",
|
||||
"avgRequestsPerSecond": 0
|
||||
}
|
||||
},
|
||||
"org.apache.solr.handler.PingRequestHandler": {
|
||||
"class": "org.apache.solr.handler.PingRequestHandler",
|
||||
"version": "$Revision: 1142180 $",
|
||||
"description": "Reports application health to a load-balancer",
|
||||
"srcId": "$Id: PingRequestHandler.java 1142180 2011-07-02 09:04:29Z uschindler $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/handler/PingRequestHandler.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"handlerStart": 1516083353163,
|
||||
"requests": 0,
|
||||
"errors": 0,
|
||||
"timeouts": 0,
|
||||
"totalTime": 0,
|
||||
"avgTimePerRequest": "NaN",
|
||||
"avgRequestsPerSecond": 0
|
||||
}
|
||||
},
|
||||
"/admin/threads": {
|
||||
"class": "org.apache.solr.handler.admin.ThreadDumpHandler",
|
||||
"version": "$Revision: 1052938 $",
|
||||
"description": "Thread Dump",
|
||||
"srcId": "$Id: ThreadDumpHandler.java 1052938 2010-12-26 20:21:48Z rmuir $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/handler/admin/ThreadDumpHandler.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"handlerStart": 1516083353227,
|
||||
"requests": 0,
|
||||
"errors": 0,
|
||||
"timeouts": 0,
|
||||
"totalTime": 0,
|
||||
"avgTimePerRequest": "NaN",
|
||||
"avgRequestsPerSecond": 0
|
||||
}
|
||||
},
|
||||
"org.apache.solr.handler.BinaryUpdateRequestHandler": {
|
||||
"class": "org.apache.solr.handler.BinaryUpdateRequestHandler",
|
||||
"version": "$Revision: 1165749 $",
|
||||
"description": "Add/Update multiple documents with javabin format",
|
||||
"srcId": "$Id: BinaryUpdateRequestHandler.java 1165749 2011-09-06 16:20:07Z janhoy $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/handler/BinaryUpdateRequestHandler.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"handlerStart": 1516083353158,
|
||||
"requests": 0,
|
||||
"errors": 0,
|
||||
"timeouts": 0,
|
||||
"totalTime": 0,
|
||||
"avgTimePerRequest": "NaN",
|
||||
"avgRequestsPerSecond": 0
|
||||
}
|
||||
},
|
||||
"org.apache.solr.handler.dataimport.DataImportHandler": {
|
||||
"class": "org.apache.solr.handler.dataimport.DataImportHandler",
|
||||
"version": "1.0",
|
||||
"description": "Manage data import from databases to Solr",
|
||||
"srcId": "$Id: DataImportHandler.java 1171306 2011-09-15 22:43:33Z janhoy $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImportHandler.java $",
|
||||
"docs": null,
|
||||
"stats": [
|
||||
"Status",
|
||||
"IDLE",
|
||||
"Documents Processed",
|
||||
"java.util.concurrent.atomic.AtomicLong:1",
|
||||
"Requests made to DataSource",
|
||||
"java.util.concurrent.atomic.AtomicLong:2",
|
||||
"Rows Fetched",
|
||||
"java.util.concurrent.atomic.AtomicLong:2",
|
||||
"Documents Deleted",
|
||||
"java.util.concurrent.atomic.AtomicLong:0",
|
||||
"Documents Skipped",
|
||||
"java.util.concurrent.atomic.AtomicLong:0",
|
||||
"Total Documents Processed",
|
||||
"java.util.concurrent.atomic.AtomicLong:351705",
|
||||
"Total Requests made to DataSource",
|
||||
"java.util.concurrent.atomic.AtomicLong:1438",
|
||||
"Total Rows Fetched",
|
||||
"java.util.concurrent.atomic.AtomicLong:876393",
|
||||
"Total Documents Deleted",
|
||||
"java.util.concurrent.atomic.AtomicLong:0",
|
||||
"Total Documents Skipped",
|
||||
"java.util.concurrent.atomic.AtomicLong:0",
|
||||
"handlerStart",
|
||||
1516083353155,
|
||||
"requests",
|
||||
2442,
|
||||
"errors",
|
||||
0,
|
||||
"timeouts",
|
||||
0,
|
||||
"totalTime",
|
||||
1748,
|
||||
"avgTimePerRequest",
|
||||
0.7158067,
|
||||
"avgRequestsPerSecond",
|
||||
0.017792022
|
||||
]
|
||||
},
|
||||
"/analysis/field": {
|
||||
"class": "Lazy[solr.FieldAnalysisRequestHandler]",
|
||||
"version": "$Revision: 1086822 $",
|
||||
"description": "Lazy[solr.FieldAnalysisRequestHandler]",
|
||||
"srcId": "$Id: RequestHandlers.java 1086822 2011-03-30 02:23:07Z koji $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/core/RequestHandlers.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"note": "not initialized yet"
|
||||
}
|
||||
},
|
||||
"/browse": {
|
||||
"class": "org.apache.solr.handler.component.SearchHandler",
|
||||
"version": "$Revision: 1052938 $",
|
||||
"description": "Search using components: org.apache.solr.handler.component.QueryComponent,org.apache.solr.handler.component.FacetComponent,org.apache.solr.handler.component.MoreLikeThisComponent,org.apache.solr.handler.component.HighlightComponent,org.apache.solr.handler.component.StatsComponent,org.apache.solr.handler.component.SpellCheckComponent,org.apache.solr.handler.component.DebugComponent,",
|
||||
"srcId": "$Id: SearchHandler.java 1052938 2010-12-26 20:21:48Z rmuir $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"handlerStart": 1516083353156,
|
||||
"requests": 0,
|
||||
"errors": 0,
|
||||
"timeouts": 0,
|
||||
"totalTime": 0,
|
||||
"avgTimePerRequest": "NaN",
|
||||
"avgRequestsPerSecond": 0
|
||||
}
|
||||
},
|
||||
"/admin/ping": {
|
||||
"class": "org.apache.solr.handler.PingRequestHandler",
|
||||
"version": "$Revision: 1142180 $",
|
||||
"description": "Reports application health to a load-balancer",
|
||||
"srcId": "$Id: PingRequestHandler.java 1142180 2011-07-02 09:04:29Z uschindler $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/handler/PingRequestHandler.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"handlerStart": 1516083353163,
|
||||
"requests": 0,
|
||||
"errors": 0,
|
||||
"timeouts": 0,
|
||||
"totalTime": 0,
|
||||
"avgTimePerRequest": "NaN",
|
||||
"avgRequestsPerSecond": 0
|
||||
}
|
||||
},
|
||||
"/admin/mbeans": {
|
||||
"class": "org.apache.solr.handler.admin.SolrInfoMBeanHandler",
|
||||
"version": "$Revision: 1065312 $",
|
||||
"description": "Get Info (and statistics) about all registered SolrInfoMBeans",
|
||||
"srcId": "$Id: SolrInfoMBeanHandler.java 1065312 2011-01-30 16:08:25Z rmuir $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"handlerStart": 1516083353227,
|
||||
"requests": 1078,
|
||||
"errors": 0,
|
||||
"timeouts": 0,
|
||||
"totalTime": 547,
|
||||
"avgTimePerRequest": 0.50742114,
|
||||
"avgRequestsPerSecond": 0.00785414
|
||||
}
|
||||
},
|
||||
"/analysis/document": {
|
||||
"class": "Lazy[solr.DocumentAnalysisRequestHandler]",
|
||||
"version": "$Revision: 1086822 $",
|
||||
"description": "Lazy[solr.DocumentAnalysisRequestHandler]",
|
||||
"srcId": "$Id: RequestHandlers.java 1086822 2011-03-30 02:23:07Z koji $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/core/RequestHandlers.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"note": "not initialized yet"
|
||||
}
|
||||
},
|
||||
"search": {
|
||||
"class": "org.apache.solr.handler.component.SearchHandler",
|
||||
"version": "$Revision: 1052938 $",
|
||||
"description": "Search using components: org.apache.solr.handler.component.QueryComponent,org.apache.solr.handler.component.FacetComponent,org.apache.solr.handler.component.MoreLikeThisComponent,org.apache.solr.handler.component.HighlightComponent,org.apache.solr.handler.component.StatsComponent,org.apache.solr.handler.component.DebugComponent,",
|
||||
"srcId": "$Id: SearchHandler.java 1052938 2010-12-26 20:21:48Z rmuir $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"handlerStart": 1516083353156,
|
||||
"requests": 0,
|
||||
"errors": 0,
|
||||
"timeouts": 0,
|
||||
"totalTime": 0,
|
||||
"avgTimePerRequest": "NaN",
|
||||
"avgRequestsPerSecond": 0
|
||||
}
|
||||
},
|
||||
"/update/csv": {
|
||||
"class": "Lazy[solr.CSVRequestHandler]",
|
||||
"version": "$Revision: 1086822 $",
|
||||
"description": "Lazy[solr.CSVRequestHandler]",
|
||||
"srcId": "$Id: RequestHandlers.java 1086822 2011-03-30 02:23:07Z koji $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/core/RequestHandlers.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"note": "not initialized yet"
|
||||
}
|
||||
},
|
||||
"/update/json": {
|
||||
"class": "Lazy[solr.JsonUpdateRequestHandler]",
|
||||
"version": "$Revision: 1086822 $ :: $Revision: 1102081 $",
|
||||
"description": "Add documents with JSON",
|
||||
"srcId": "$Id: RequestHandlers.java 1086822 2011-03-30 02:23:07Z koji $ :: $Id: JsonUpdateRequestHandler.java 1102081 2011-05-11 20:37:04Z yonik $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/core/RequestHandlers.java $\n$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/handler/JsonUpdateRequestHandler.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"handlerStart": 1516103486630,
|
||||
"requests": 2530,
|
||||
"errors": 26,
|
||||
"timeouts": 0,
|
||||
"totalTime": 132438,
|
||||
"avgTimePerRequest": 52.347034,
|
||||
"avgRequestsPerSecond": 0.02160195
|
||||
}
|
||||
},
|
||||
"/admin/": {
|
||||
"class": "org.apache.solr.handler.admin.AdminHandlers",
|
||||
"version": "$Revision: 953887 $",
|
||||
"description": "Register Standard Admin Handlers",
|
||||
"srcId": "$Id: AdminHandlers.java 953887 2010-06-11 21:53:43Z hossman $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/handler/admin/AdminHandlers.java $",
|
||||
"docs": null,
|
||||
"stats": null
|
||||
},
|
||||
"standard": {
|
||||
"class": "org.apache.solr.handler.component.SearchHandler",
|
||||
"version": "$Revision: 1052938 $",
|
||||
"description": "Search using components: org.apache.solr.handler.component.QueryComponent,org.apache.solr.handler.component.FacetComponent,org.apache.solr.handler.component.MoreLikeThisComponent,org.apache.solr.handler.component.HighlightComponent,org.apache.solr.handler.component.StatsComponent,org.apache.solr.handler.component.DebugComponent,",
|
||||
"srcId": "$Id: SearchHandler.java 1052938 2010-12-26 20:21:48Z rmuir $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"handlerStart": 1516083353155,
|
||||
"requests": 11480,
|
||||
"errors": 0,
|
||||
"timeouts": 0,
|
||||
"totalTime": 318753,
|
||||
"avgTimePerRequest": 27.765942,
|
||||
"avgRequestsPerSecond": 0.08364145
|
||||
}
|
||||
},
|
||||
"org.apache.solr.handler.admin.AdminHandlers": {
|
||||
"class": "org.apache.solr.handler.admin.AdminHandlers",
|
||||
"version": "$Revision: 953887 $",
|
||||
"description": "Register Standard Admin Handlers",
|
||||
"srcId": "$Id: AdminHandlers.java 953887 2010-06-11 21:53:43Z hossman $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/handler/admin/AdminHandlers.java $",
|
||||
"docs": null,
|
||||
"stats": null
|
||||
},
|
||||
"tvrh": {
|
||||
"class": "Lazy[solr.SearchHandler]",
|
||||
"version": "$Revision: 1086822 $",
|
||||
"description": "Lazy[solr.SearchHandler]",
|
||||
"srcId": "$Id: RequestHandlers.java 1086822 2011-03-30 02:23:07Z koji $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/core/RequestHandlers.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"note": "not initialized yet"
|
||||
}
|
||||
},
|
||||
"org.apache.solr.handler.DumpRequestHandler": {
|
||||
"class": "org.apache.solr.handler.DumpRequestHandler",
|
||||
"version": "$Revision: 1067172 $",
|
||||
"description": "Dump handler (debug)",
|
||||
"srcId": "$Id: DumpRequestHandler.java 1067172 2011-02-04 12:50:14Z uschindler $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/handler/DumpRequestHandler.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"handlerStart": 1516083353163,
|
||||
"requests": 0,
|
||||
"errors": 0,
|
||||
"timeouts": 0,
|
||||
"totalTime": 0,
|
||||
"avgTimePerRequest": "NaN",
|
||||
"avgRequestsPerSecond": 0
|
||||
}
|
||||
},
|
||||
"/update/extract": {
|
||||
"class": "Lazy[solr.extraction.ExtractingRequestHandler]",
|
||||
"version": "$Revision: 1086822 $",
|
||||
"description": "Lazy[solr.extraction.ExtractingRequestHandler]",
|
||||
"srcId": "$Id: RequestHandlers.java 1086822 2011-03-30 02:23:07Z koji $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/core/RequestHandlers.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"note": "not initialized yet"
|
||||
}
|
||||
},
|
||||
"/admin/properties": {
|
||||
"class": "org.apache.solr.handler.admin.PropertiesRequestHandler",
|
||||
"version": "$Revision: 898152 $",
|
||||
"description": "Get System Properties",
|
||||
"srcId": "$Id: PropertiesRequestHandler.java 898152 2010-01-12 02:19:56Z ryan $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/handler/admin/PropertiesRequestHandler.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"handlerStart": 1516083353227,
|
||||
"requests": 0,
|
||||
"errors": 0,
|
||||
"timeouts": 0,
|
||||
"totalTime": 0,
|
||||
"avgTimePerRequest": "NaN",
|
||||
"avgRequestsPerSecond": 0
|
||||
}
|
||||
},
|
||||
"org.apache.solr.handler.component.SearchHandler": {
|
||||
"class": "org.apache.solr.handler.component.SearchHandler",
|
||||
"version": "$Revision: 1052938 $",
|
||||
"description": "Search using components: org.apache.solr.handler.component.QueryComponent,org.apache.solr.handler.component.FacetComponent,org.apache.solr.handler.component.MoreLikeThisComponent,org.apache.solr.handler.component.HighlightComponent,org.apache.solr.handler.component.StatsComponent,org.apache.solr.handler.component.SpellCheckComponent,org.apache.solr.handler.component.DebugComponent,",
|
||||
"srcId": "$Id: SearchHandler.java 1052938 2010-12-26 20:21:48Z rmuir $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"handlerStart": 1516083353156,
|
||||
"requests": 0,
|
||||
"errors": 0,
|
||||
"timeouts": 0,
|
||||
"totalTime": 0,
|
||||
"avgTimePerRequest": "NaN",
|
||||
"avgRequestsPerSecond": 0
|
||||
}
|
||||
},
|
||||
"/spell": {
|
||||
"class": "Lazy[solr.SearchHandler]",
|
||||
"version": "$Revision: 1086822 $",
|
||||
"description": "Lazy[solr.SearchHandler]",
|
||||
"srcId": "$Id: RequestHandlers.java 1086822 2011-03-30 02:23:07Z koji $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/core/RequestHandlers.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"note": "not initialized yet"
|
||||
}
|
||||
},
|
||||
"/debug/dump": {
|
||||
"class": "org.apache.solr.handler.DumpRequestHandler",
|
||||
"version": "$Revision: 1067172 $",
|
||||
"description": "Dump handler (debug)",
|
||||
"srcId": "$Id: DumpRequestHandler.java 1067172 2011-02-04 12:50:14Z uschindler $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/handler/DumpRequestHandler.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"handlerStart": 1516083353163,
|
||||
"requests": 0,
|
||||
"errors": 0,
|
||||
"timeouts": 0,
|
||||
"totalTime": 0,
|
||||
"avgTimePerRequest": "NaN",
|
||||
"avgRequestsPerSecond": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"UPDATEHANDLER",
|
||||
{
|
||||
"updateHandler": {
|
||||
"class": "org.apache.solr.update.DirectUpdateHandler2",
|
||||
"version": "1.0",
|
||||
"description": "Update handler that efficiently directly updates the on-disk main lucene index",
|
||||
"srcId": "$Id: DirectUpdateHandler2.java 1203770 2011-11-18 17:55:52Z mikemccand $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"commits": 3220,
|
||||
"autocommits": 0,
|
||||
"optimizes": 3,
|
||||
"rollbacks": 0,
|
||||
"expungeDeletes": 0,
|
||||
"docsPending": 0,
|
||||
"adds": 0,
|
||||
"deletesById": 0,
|
||||
"deletesByQuery": 0,
|
||||
"errors": 0,
|
||||
"cumulative_adds": 354209,
|
||||
"cumulative_deletesById": 0,
|
||||
"cumulative_deletesByQuery": 3,
|
||||
"cumulative_errors": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"CACHE",
|
||||
{
|
||||
"queryResultCache": {
|
||||
"class": "org.apache.solr.search.LRUCache",
|
||||
"version": "1.0",
|
||||
"description": "LRU Cache(maxSize=512, initialSize=512)",
|
||||
"srcId": "$Id: LRUCache.java 1065312 2011-01-30 16:08:25Z rmuir $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/search/LRUCache.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"lookups": 4,
|
||||
"hits": 2,
|
||||
"hitratio": "0.50",
|
||||
"inserts": 2,
|
||||
"evictions": 0,
|
||||
"size": 2,
|
||||
"warmupTime": 0,
|
||||
"cumulative_lookups": 10630,
|
||||
"cumulative_hits": 5509,
|
||||
"cumulative_hitratio": "0.51",
|
||||
"cumulative_inserts": 5626,
|
||||
"cumulative_evictions": 0
|
||||
}
|
||||
},
|
||||
"fieldCache": {
|
||||
"class": "org.apache.solr.search.SolrFieldCacheMBean",
|
||||
"version": "1.0",
|
||||
"description": "Provides introspection of the Lucene FieldCache, this is **NOT** a cache that is managed by Solr.",
|
||||
"srcId": "$Id: SolrFieldCacheMBean.java 984594 2010-08-11 21:42:04Z yonik $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/search/SolrFieldCacheMBean.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"entries_count": 174,
|
||||
"entry#0": "'MMapIndexInput(path=\"/usr/solrData/search/index/_9eir.frq\")'=>'latlng_0_coordinate',double,org.apache.lucene.search.FieldCache.NUMERIC_UTILS_DOUBLE_PARSER=>[D#661647869",
|
||||
"insanity_count": 1,
|
||||
"insanity#0": "SUBREADER: Found caches for descendants of ReadOnlyDirectoryReader(segments_1wo _3kl(3.5):C133115/12 _3kw(3.5):C17/2 _3kx(3.5):C6 _3ky(3.5):C1 _3kz(3.5):C2 _3l0(3.5):C2 _3l1(3.5):C1 _3l2(3.5):C1 _3l3(3.5):C1 _3l4(3.5):C1)+owner\n\t'ReadOnlyDirectoryReader(segments_1wo _3kl(3.5):C133115/12 _3kw(3.5):C17/2 _3kx(3.5):C6 _3ky(3.5):C1 _3kz(3.5):C2 _3l0(3.5):C2 _3l1(3.5):C1 _3l2(3.5):C1 _3l3(3.5):C1 _3l4(3.5):C1)'=>'owner',class org.apache.lucene.search.FieldCache$StringIndex,null=>org.apache.lucene.search.FieldCache$StringIndex#927712538\n\t'MMapIndexInput(path=\"/usr/solrData/search/index/_3kx.frq\")'=>'owner',class org.apache.lucene.search.FieldCache$StringIndex,null=>org.apache.lucene.search.FieldCache$StringIndex#969886745\n\t'MMapIndexInput(path=\"/usr/solrData/search/index/_3kz.frq\")'=>'owner',class org.apache.lucene.search.FieldCache$StringIndex,null=>org.apache.lucene.search.FieldCache$StringIndex#495952608\n\t'MMapIndexInput(path=\"/usr/solrData/search/index/_3ky.frq\")'=>'owner',class org.apache.lucene.search.FieldCache$StringIndex,null=>org.apache.lucene.search.FieldCache$StringIndex#1581258843\n\t'MMapIndexInput(path=\"/usr/solrData/search/index/_3l1.frq\")'=>'owner',class org.apache.lucene.search.FieldCache$StringIndex,null=>org.apache.lucene.search.FieldCache$StringIndex#359550090\n\t'MMapIndexInput(path=\"/usr/solrData/search/index/_3kl.frq\")'=>'owner',class org.apache.lucene.search.FieldCache$StringIndex,null=>org.apache.lucene.search.FieldCache$StringIndex#1748227582\n\t'MMapIndexInput(path=\"/usr/solrData/search/index/_3l4.frq\")'=>'owner',class org.apache.lucene.search.FieldCache$StringIndex,null=>org.apache.lucene.search.FieldCache$StringIndex#1084424163\n\t'MMapIndexInput(path=\"/usr/solrData/search/index/_3l3.frq\")'=>'owner',class org.apache.lucene.search.FieldCache$StringIndex,null=>org.apache.lucene.search.FieldCache$StringIndex#1116912780\n\t'MMapIndexInput(path=\"/usr/solrData/search/index/_3l0.frq\")'=>'owner',class org.apache.lucene.search.FieldCache$StringIndex,null=>org.apache.lucene.search.FieldCache$StringIndex#1187916045\n\t'MMapIndexInput(path=\"/usr/solrData/search/index/_3l2.frq\")'=>'owner',class org.apache.lucene.search.FieldCache$StringIndex,null=>org.apache.lucene.search.FieldCache$StringIndex#62119827\n\t'MMapIndexInput(path=\"/usr/solrData/search/index/_3kw.frq\")'=>'owner',class org.apache.lucene.search.FieldCache$StringIndex,null=>org.apache.lucene.search.FieldCache$StringIndex#1756606907\n"
|
||||
}
|
||||
},
|
||||
"documentCache": {
|
||||
"class": "org.apache.solr.search.LRUCache",
|
||||
"version": "1.0",
|
||||
"description": "LRU Cache(maxSize=512, initialSize=512)",
|
||||
"srcId": "$Id: LRUCache.java 1065312 2011-01-30 16:08:25Z rmuir $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/search/LRUCache.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"lookups": 0,
|
||||
"hits": 0,
|
||||
"hitratio": "0.00",
|
||||
"inserts": 0,
|
||||
"evictions": 0,
|
||||
"size": 0,
|
||||
"warmupTime": 0,
|
||||
"cumulative_lookups": 180435,
|
||||
"cumulative_hits": 22584,
|
||||
"cumulative_hitratio": "0.12",
|
||||
"cumulative_inserts": 157851,
|
||||
"cumulative_evictions": 40344
|
||||
}
|
||||
},
|
||||
"fieldValueCache": {
|
||||
"class": "org.apache.solr.search.FastLRUCache",
|
||||
"version": "1.0",
|
||||
"description": "Concurrent LRU Cache(maxSize=10000, initialSize=10, minSize=9000, acceptableSize=9500, cleanupThread=false)",
|
||||
"srcId": "$Id: FastLRUCache.java 1170772 2011-09-14 19:09:56Z sarowe $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/search/FastLRUCache.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"lookups": 5,
|
||||
"hits": 3,
|
||||
"hitratio": "0.60",
|
||||
"inserts": 1,
|
||||
"evictions": 0,
|
||||
"size": 1,
|
||||
"warmupTime": 0,
|
||||
"cumulative_lookups": 8529,
|
||||
"cumulative_hits": 5432,
|
||||
"cumulative_hitratio": "0.63",
|
||||
"cumulative_inserts": 1437,
|
||||
"cumulative_evictions": 0,
|
||||
"item_parentCompanyId": "{field=parentCompanyId,memSize=785156,tindexSize=13056,time=136,phase1=135,nTerms=75696,bigTerms=0,termInstances=117166,uses=4}"
|
||||
}
|
||||
},
|
||||
"filterCache": {
|
||||
"class": "org.apache.solr.search.FastLRUCache",
|
||||
"version": "1.0",
|
||||
"description": "Concurrent LRU Cache(maxSize=512, initialSize=512, minSize=460, acceptableSize=486, cleanupThread=false)",
|
||||
"srcId": "$Id: FastLRUCache.java 1170772 2011-09-14 19:09:56Z sarowe $",
|
||||
"src": "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_3_5/solr/core/src/java/org/apache/solr/search/FastLRUCache.java $",
|
||||
"docs": null,
|
||||
"stats": {
|
||||
"lookups": 2,
|
||||
"hits": 2,
|
||||
"hitratio": "1.00",
|
||||
"inserts": 2,
|
||||
"evictions": 0,
|
||||
"size": 2,
|
||||
"warmupTime": 0,
|
||||
"cumulative_lookups": 4041,
|
||||
"cumulative_hits": 4041,
|
||||
"cumulative_hitratio": "1.00",
|
||||
"cumulative_inserts": 2828,
|
||||
"cumulative_evictions": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
`
|
||||
|
||||
var solr3CoreExpected = map[string]interface{}{
|
||||
"num_docs": int64(117166),
|
||||
"max_docs": int64(117305),
|
||||
"deleted_docs": int64(0),
|
||||
}
|
||||
|
||||
var solr3QueryHandlerExpected = map[string]interface{}{
|
||||
"15min_rate_reqs_per_second": float64(0),
|
||||
"5min_rate_reqs_per_second": float64(0),
|
||||
"75th_pc_request_time": float64(0),
|
||||
"95th_pc_request_time": float64(0),
|
||||
"999th_pc_request_time": float64(0),
|
||||
"99th_pc_request_time": float64(0),
|
||||
"avg_requests_per_second": float64(0),
|
||||
"avg_time_per_request": float64(0),
|
||||
"errors": int64(0),
|
||||
"handler_start": int64(1516083353156),
|
||||
"median_request_time": float64(0),
|
||||
"requests": int64(0),
|
||||
"timeouts": int64(0),
|
||||
"total_time": float64(0),
|
||||
}
|
||||
|
||||
var solr3UpdateHandlerExpected = map[string]interface{}{
|
||||
"adds": int64(0),
|
||||
"autocommit_max_docs": int64(0),
|
||||
"autocommit_max_time": int64(0),
|
||||
"autocommits": int64(0),
|
||||
"commits": int64(3220),
|
||||
"cumulative_adds": int64(354209),
|
||||
"cumulative_deletes_by_id": int64(0),
|
||||
"cumulative_deletes_by_query": int64(3),
|
||||
"cumulative_errors": int64(0),
|
||||
"deletes_by_id": int64(0),
|
||||
"deletes_by_query": int64(0),
|
||||
"docs_pending": int64(0),
|
||||
"errors": int64(0),
|
||||
"expunge_deletes": int64(0),
|
||||
"optimizes": int64(3),
|
||||
"rollbacks": int64(0),
|
||||
"soft_autocommits": int64(0),
|
||||
}
|
||||
|
||||
var solr3CacheExpected = map[string]interface{}{
|
||||
"cumulative_evictions": int64(0),
|
||||
"cumulative_hitratio": float64(1.00),
|
||||
"cumulative_hits": int64(4041),
|
||||
"cumulative_inserts": int64(2828),
|
||||
"cumulative_lookups": int64(4041),
|
||||
"evictions": int64(0),
|
||||
"hitratio": float64(1.00),
|
||||
"hits": int64(2),
|
||||
"inserts": int64(2),
|
||||
"lookups": int64(2),
|
||||
"size": int64(2),
|
||||
"warmup_time": int64(0),
|
||||
}
|
||||
@@ -52,8 +52,7 @@ GO
|
||||
## - DatabaseStats
|
||||
## - MemoryClerk
|
||||
## - VolumeSpace
|
||||
## - PerformanceMetrics
|
||||
# exclude_query = [ 'PerformanceCounters','WaitStatsCatagorized' ]
|
||||
exclude_query = [ 'DatabaseIO' ]
|
||||
```
|
||||
|
||||
### Metrics:
|
||||
@@ -89,10 +88,46 @@ The new (version 2) metrics provide:
|
||||
- *Log activity*: Log bytes flushed/sec, Log flushes/sec, Log Flush Wait Time
|
||||
- *Memory*: PLE, Page reads/sec, Page writes/sec, + more
|
||||
- *TempDB*: Free space, Version store usage, Active temp tables, temp table creation rate, + more
|
||||
- *Resource Governor*: CPU Usage, Requests/sec, Queued Requests, and Blocked tasks per workload group
|
||||
- *Resource Governor*: CPU Usage, Requests/sec, Queued Requests, and Blocked tasks per workload group + more
|
||||
- *Server properties*: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version
|
||||
- *Wait stats*: Wait time in ms, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the sasme categories used in Query Store.
|
||||
|
||||
The following metrics can be used directly, with no delta calculations:
|
||||
- SQLServer:Buffer Manager\Buffer cache hit ratio
|
||||
- SQLServer:Buffer Manager\Page life expectancy
|
||||
- SQLServer:Buffer Node\Page life expectancy
|
||||
- SQLServer:Database Replica\Log Apply Pending Queue
|
||||
- SQLServer:Database Replica\Log Apply Ready Queue
|
||||
- SQLServer:Database Replica\Log Send Queue
|
||||
- SQLServer:Database Replica\Recovery Queue
|
||||
- SQLServer:Databases\Data File(s) Size (KB)
|
||||
- SQLServer:Databases\Log File(s) Size (KB)
|
||||
- SQLServer:Databases\Log File(s) Used Size (KB)
|
||||
- SQLServer:Databases\XTP Memory Used (KB)
|
||||
- SQLServer:General Statistics\Active Temp Tables
|
||||
- SQLServer:General Statistics\Processes blocked
|
||||
- SQLServer:General Statistics\Temp Tables For Destruction
|
||||
- SQLServer:General Statistics\User Connections
|
||||
- SQLServer:Memory Broker Clerks\Memory broker clerk size
|
||||
- SQLServer:Memory Manager\Memory Grants Pending
|
||||
- SQLServer:Memory Manager\Target Server Memory (KB)
|
||||
- SQLServer:Memory Manager\Total Server Memory (KB)
|
||||
- SQLServer:Resource Pool Stats\Active memory grant amount (KB)
|
||||
- SQLServer:Resource Pool Stats\Disk Read Bytes/sec
|
||||
- SQLServer:Resource Pool Stats\Disk Read IO Throttled/sec
|
||||
- SQLServer:Resource Pool Stats\Disk Read IO/sec
|
||||
- SQLServer:Resource Pool Stats\Disk Write Bytes/sec
|
||||
- SQLServer:Resource Pool Stats\Disk Write IO Throttled/sec
|
||||
- SQLServer:Resource Pool Stats\Disk Write IO/sec
|
||||
- SQLServer:Resource Pool Stats\Used memory (KB)
|
||||
- SQLServer:Transactions\Free Space in tempdb (KB)
|
||||
- SQLServer:Transactions\Version Store Size (KB)
|
||||
- SQLServer:User Settable\Query
|
||||
- SQLServer:Workload Group Stats\Blocked tasks
|
||||
- SQLServer:Workload Group Stats\CPU usage %
|
||||
- SQLServer:Workload Group Stats\Queued requests
|
||||
- SQLServer:Workload Group Stats\Requests completed/sec
|
||||
|
||||
Version 2 queries have the following tags:
|
||||
- `host`: Physical host name
|
||||
- `sql_instance`: Physical host and instance name (hostname:instance)
|
||||
|
||||
|
||||
@@ -68,7 +68,7 @@ var sampleConfig = `
|
||||
## - MemoryClerk
|
||||
## - VolumeSpace
|
||||
## - PerformanceMetrics
|
||||
# exclude_query = [ 'PerformanceCounters','WaitStatsCatagorized' ]
|
||||
# exclude_query = [ 'DatabaseIO' ]
|
||||
`
|
||||
|
||||
// SampleConfig return the sample configuration
|
||||
@@ -244,108 +244,113 @@ func init() {
|
||||
// Thanks Bob Ward (http://aka.ms/bobwardms)
|
||||
// and the folks at Stack Overflow (https://github.com/opserver/Opserver/blob/9c89c7e9936b58ad237b30e6f4cc6cd59c406889/Opserver.Core/Data/SQL/SQLInstance.Memory.cs)
|
||||
// for putting most of the memory clerk definitions online!
|
||||
const sqlMemoryClerkV2 = `SELECT
|
||||
'sqlserver_memory_clerks' As [measurement],
|
||||
REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
|
||||
SERVERPROPERTY('ServerName') AS [host],
|
||||
const sqlMemoryClerkV2 = `DECLARE @SQL NVARCHAR(MAX) = 'SELECT
|
||||
"sqlserver_memory_clerks" As [measurement],
|
||||
REPLACE(@@SERVERNAME,"\",":") AS [sql_instance],
|
||||
ISNULL(clerk_names.name,mc.type) AS clerk_type,
|
||||
SUM(mc.pages_kb) AS size_kb
|
||||
SUM({pages_kb}) AS size_kb
|
||||
FROM
|
||||
sys.dm_os_memory_clerks AS mc WITH (NOLOCK)
|
||||
LEFT OUTER JOIN ( VALUES
|
||||
('CACHESTORE_BROKERDSH','Service Broker Dialog Security Header Cache'),
|
||||
('CACHESTORE_BROKERKEK','Service Broker Key Exchange Key Cache'),
|
||||
('CACHESTORE_BROKERREADONLY','Service Broker (Read-Only)'),
|
||||
('CACHESTORE_BROKERRSB','Service Broker Null Remote Service Binding Cache'),
|
||||
('CACHESTORE_BROKERTBLACS','Broker dormant rowsets'),
|
||||
('CACHESTORE_BROKERTO','Service Broker Transmission Object Cache'),
|
||||
('CACHESTORE_BROKERUSERCERTLOOKUP','Service Broker user certificates lookup result cache'),
|
||||
('CACHESTORE_CLRPROC','CLR Procedure Cache'),
|
||||
('CACHESTORE_CLRUDTINFO','CLR UDT Info'),
|
||||
('CACHESTORE_COLUMNSTOREOBJECTPOOL','Column Store Object Pool'),
|
||||
('CACHESTORE_CONVPRI','Conversation Priority Cache'),
|
||||
('CACHESTORE_EVENTS','Event Notification Cache'),
|
||||
('CACHESTORE_FULLTEXTSTOPLIST','Full Text Stoplist Cache'),
|
||||
('CACHESTORE_NOTIF','Notification Store'),
|
||||
('CACHESTORE_OBJCP','Object Plans'),
|
||||
('CACHESTORE_PHDR','Bound Trees'),
|
||||
('CACHESTORE_SEARCHPROPERTYLIST','Search Property List Cache'),
|
||||
('CACHESTORE_SEHOBTCOLUMNATTRIBUTE','SE Shared Column Metadata Cache'),
|
||||
('CACHESTORE_SQLCP','SQL Plans'),
|
||||
('CACHESTORE_STACKFRAMES','SOS_StackFramesStore'),
|
||||
('CACHESTORE_SYSTEMROWSET','System Rowset Store'),
|
||||
('CACHESTORE_TEMPTABLES','Temporary Tables & Table Variables'),
|
||||
('CACHESTORE_VIEWDEFINITIONS','View Definition Cache'),
|
||||
('CACHESTORE_XML_SELECTIVE_DG','XML DB Cache (Selective)'),
|
||||
('CACHESTORE_XMLDBATTRIBUTE','XML DB Cache (Attribute)'),
|
||||
('CACHESTORE_XMLDBELEMENT','XML DB Cache (Element)'),
|
||||
('CACHESTORE_XMLDBTYPE','XML DB Cache (Type)'),
|
||||
('CACHESTORE_XPROC','Extended Stored Procedures'),
|
||||
('MEMORYCLERK_FILETABLE','Memory Clerk (File Table)'),
|
||||
('MEMORYCLERK_FSCHUNKER','Memory Clerk (FS Chunker)'),
|
||||
('MEMORYCLERK_FULLTEXT','Full Text'),
|
||||
('MEMORYCLERK_FULLTEXT_SHMEM','Full-text IG'),
|
||||
('MEMORYCLERK_HADR','HADR'),
|
||||
('MEMORYCLERK_HOST','Host'),
|
||||
('MEMORYCLERK_LANGSVC','Language Service'),
|
||||
('MEMORYCLERK_LWC','Light Weight Cache'),
|
||||
('MEMORYCLERK_QSRANGEPREFETCH','QS Range Prefetch'),
|
||||
('MEMORYCLERK_SERIALIZATION','Serialization'),
|
||||
('MEMORYCLERK_SNI','SNI'),
|
||||
('MEMORYCLERK_SOSMEMMANAGER','SOS Memory Manager'),
|
||||
('MEMORYCLERK_SOSNODE','SOS Node'),
|
||||
('MEMORYCLERK_SOSOS','SOS Memory Clerk'),
|
||||
('MEMORYCLERK_SQLBUFFERPOOL','Buffer Pool'),
|
||||
('MEMORYCLERK_SQLCLR','CLR'),
|
||||
('MEMORYCLERK_SQLCLRASSEMBLY','CLR Assembly'),
|
||||
('MEMORYCLERK_SQLCONNECTIONPOOL','Connection Pool'),
|
||||
('MEMORYCLERK_SQLGENERAL','General'),
|
||||
('MEMORYCLERK_SQLHTTP','HTTP'),
|
||||
('MEMORYCLERK_SQLLOGPOOL','Log Pool'),
|
||||
('MEMORYCLERK_SQLOPTIMIZER','SQL Optimizer'),
|
||||
('MEMORYCLERK_SQLQERESERVATIONS','SQL Reservations'),
|
||||
('MEMORYCLERK_SQLQUERYCOMPILE','SQL Query Compile'),
|
||||
('MEMORYCLERK_SQLQUERYEXEC','SQL Query Exec'),
|
||||
('MEMORYCLERK_SQLQUERYPLAN','SQL Query Plan'),
|
||||
('MEMORYCLERK_SQLSERVICEBROKER','SQL Service Broker'),
|
||||
('MEMORYCLERK_SQLSERVICEBROKERTRANSPORT','Unified Communication Stack'),
|
||||
('MEMORYCLERK_SQLSOAP','SQL SOAP'),
|
||||
('MEMORYCLERK_SQLSOAPSESSIONSTORE','SQL SOAP (Session Store)'),
|
||||
('MEMORYCLERK_SQLSTORENG','SQL Storage Engine'),
|
||||
('MEMORYCLERK_SQLUTILITIES','SQL Utilities'),
|
||||
('MEMORYCLERK_SQLXML','SQL XML'),
|
||||
('MEMORYCLERK_SQLXP','SQL XP'),
|
||||
('MEMORYCLERK_TRACE_EVTNOTIF','Trace Event Notification'),
|
||||
('MEMORYCLERK_XE','XE Engine'),
|
||||
('MEMORYCLERK_XE_BUFFER','XE Buffer'),
|
||||
('MEMORYCLERK_XTP','In-Memory OLTP'),
|
||||
('OBJECTSTORE_LBSS','Lbss Cache (Object Store)'),
|
||||
('OBJECTSTORE_LOCK_MANAGER','Lock Manager (Object Store)'),
|
||||
('OBJECTSTORE_SECAUDIT_EVENT_BUFFER','Audit Event Buffer (Object Store)'),
|
||||
('OBJECTSTORE_SERVICE_BROKER','Service Broker (Object Store)'),
|
||||
('OBJECTSTORE_SNI_PACKET','SNI Packet (Object Store)'),
|
||||
('OBJECTSTORE_XACT_CACHE','Transactions Cache (Object Store)'),
|
||||
('USERSTORE_DBMETADATA','DB Metadata (User Store)'),
|
||||
('USERSTORE_OBJPERM','Object Permissions (User Store)'),
|
||||
('USERSTORE_SCHEMAMGR','Schema Manager (User Store)'),
|
||||
('USERSTORE_SXC','SXC (User Store)'),
|
||||
('USERSTORE_TOKENPERM','Token Permissions (User Store)'),
|
||||
('USERSTORE_QDSSTMT','QDS Statement Buffer (Pre-persist)'),
|
||||
('CACHESTORE_QDSRUNTIMESTATS','QDS Runtime Stats (Pre-persist)'),
|
||||
('CACHESTORE_QDSCONTEXTSETTINGS','QDS Unique Context Settings'),
|
||||
('MEMORYCLERK_QUERYDISKSTORE','QDS General'),
|
||||
('MEMORYCLERK_QUERYDISKSTORE_HASHMAP','QDS Query/Plan Hash Table')
|
||||
("CACHESTORE_BROKERDSH","Service Broker Dialog Security Header Cache"),
|
||||
("CACHESTORE_BROKERKEK","Service Broker Key Exchange Key Cache"),
|
||||
("CACHESTORE_BROKERREADONLY","Service Broker (Read-Only)"),
|
||||
("CACHESTORE_BROKERRSB","Service Broker Null Remote Service Binding Cache"),
|
||||
("CACHESTORE_BROKERTBLACS","Broker dormant rowsets"),
|
||||
("CACHESTORE_BROKERTO","Service Broker Transmission Object Cache"),
|
||||
("CACHESTORE_BROKERUSERCERTLOOKUP","Service Broker user certificates lookup result cache"),
|
||||
("CACHESTORE_CLRPROC","CLR Procedure Cache"),
|
||||
("CACHESTORE_CLRUDTINFO","CLR UDT Info"),
|
||||
("CACHESTORE_COLUMNSTOREOBJECTPOOL","Column Store Object Pool"),
|
||||
("CACHESTORE_CONVPRI","Conversation Priority Cache"),
|
||||
("CACHESTORE_EVENTS","Event Notification Cache"),
|
||||
("CACHESTORE_FULLTEXTSTOPLIST","Full Text Stoplist Cache"),
|
||||
("CACHESTORE_NOTIF","Notification Store"),
|
||||
("CACHESTORE_OBJCP","Object Plans"),
|
||||
("CACHESTORE_PHDR","Bound Trees"),
|
||||
("CACHESTORE_SEARCHPROPERTYLIST","Search Property List Cache"),
|
||||
("CACHESTORE_SEHOBTCOLUMNATTRIBUTE","SE Shared Column Metadata Cache"),
|
||||
("CACHESTORE_SQLCP","SQL Plans"),
|
||||
("CACHESTORE_STACKFRAMES","SOS_StackFramesStore"),
|
||||
("CACHESTORE_SYSTEMROWSET","System Rowset Store"),
|
||||
("CACHESTORE_TEMPTABLES","Temporary Tables & Table Variables"),
|
||||
("CACHESTORE_VIEWDEFINITIONS","View Definition Cache"),
|
||||
("CACHESTORE_XML_SELECTIVE_DG","XML DB Cache (Selective)"),
|
||||
("CACHESTORE_XMLDBATTRIBUTE","XML DB Cache (Attribute)"),
|
||||
("CACHESTORE_XMLDBELEMENT","XML DB Cache (Element)"),
|
||||
("CACHESTORE_XMLDBTYPE","XML DB Cache (Type)"),
|
||||
("CACHESTORE_XPROC","Extended Stored Procedures"),
|
||||
("MEMORYCLERK_FILETABLE","Memory Clerk (File Table)"),
|
||||
("MEMORYCLERK_FSCHUNKER","Memory Clerk (FS Chunker)"),
|
||||
("MEMORYCLERK_FULLTEXT","Full Text"),
|
||||
("MEMORYCLERK_FULLTEXT_SHMEM","Full-text IG"),
|
||||
("MEMORYCLERK_HADR","HADR"),
|
||||
("MEMORYCLERK_HOST","Host"),
|
||||
("MEMORYCLERK_LANGSVC","Language Service"),
|
||||
("MEMORYCLERK_LWC","Light Weight Cache"),
|
||||
("MEMORYCLERK_QSRANGEPREFETCH","QS Range Prefetch"),
|
||||
("MEMORYCLERK_SERIALIZATION","Serialization"),
|
||||
("MEMORYCLERK_SNI","SNI"),
|
||||
("MEMORYCLERK_SOSMEMMANAGER","SOS Memory Manager"),
|
||||
("MEMORYCLERK_SOSNODE","SOS Node"),
|
||||
("MEMORYCLERK_SOSOS","SOS Memory Clerk"),
|
||||
("MEMORYCLERK_SQLBUFFERPOOL","Buffer Pool"),
|
||||
("MEMORYCLERK_SQLCLR","CLR"),
|
||||
("MEMORYCLERK_SQLCLRASSEMBLY","CLR Assembly"),
|
||||
("MEMORYCLERK_SQLCONNECTIONPOOL","Connection Pool"),
|
||||
("MEMORYCLERK_SQLGENERAL","General"),
|
||||
("MEMORYCLERK_SQLHTTP","HTTP"),
|
||||
("MEMORYCLERK_SQLLOGPOOL","Log Pool"),
|
||||
("MEMORYCLERK_SQLOPTIMIZER","SQL Optimizer"),
|
||||
("MEMORYCLERK_SQLQERESERVATIONS","SQL Reservations"),
|
||||
("MEMORYCLERK_SQLQUERYCOMPILE","SQL Query Compile"),
|
||||
("MEMORYCLERK_SQLQUERYEXEC","SQL Query Exec"),
|
||||
("MEMORYCLERK_SQLQUERYPLAN","SQL Query Plan"),
|
||||
("MEMORYCLERK_SQLSERVICEBROKER","SQL Service Broker"),
|
||||
("MEMORYCLERK_SQLSERVICEBROKERTRANSPORT","Unified Communication Stack"),
|
||||
("MEMORYCLERK_SQLSOAP","SQL SOAP"),
|
||||
("MEMORYCLERK_SQLSOAPSESSIONSTORE","SQL SOAP (Session Store)"),
|
||||
("MEMORYCLERK_SQLSTORENG","SQL Storage Engine"),
|
||||
("MEMORYCLERK_SQLUTILITIES","SQL Utilities"),
|
||||
("MEMORYCLERK_SQLXML","SQL XML"),
|
||||
("MEMORYCLERK_SQLXP","SQL XP"),
|
||||
("MEMORYCLERK_TRACE_EVTNOTIF","Trace Event Notification"),
|
||||
("MEMORYCLERK_XE","XE Engine"),
|
||||
("MEMORYCLERK_XE_BUFFER","XE Buffer"),
|
||||
("MEMORYCLERK_XTP","In-Memory OLTP"),
|
||||
("OBJECTSTORE_LBSS","Lbss Cache (Object Store)"),
|
||||
("OBJECTSTORE_LOCK_MANAGER","Lock Manager (Object Store)"),
|
||||
("OBJECTSTORE_SECAUDIT_EVENT_BUFFER","Audit Event Buffer (Object Store)"),
|
||||
("OBJECTSTORE_SERVICE_BROKER","Service Broker (Object Store)"),
|
||||
("OBJECTSTORE_SNI_PACKET","SNI Packet (Object Store)"),
|
||||
("OBJECTSTORE_XACT_CACHE","Transactions Cache (Object Store)"),
|
||||
("USERSTORE_DBMETADATA","DB Metadata (User Store)"),
|
||||
("USERSTORE_OBJPERM","Object Permissions (User Store)"),
|
||||
("USERSTORE_SCHEMAMGR","Schema Manager (User Store)"),
|
||||
("USERSTORE_SXC","SXC (User Store)"),
|
||||
("USERSTORE_TOKENPERM","Token Permissions (User Store)"),
|
||||
("USERSTORE_QDSSTMT","QDS Statement Buffer (Pre-persist)"),
|
||||
("CACHESTORE_QDSRUNTIMESTATS","QDS Runtime Stats (Pre-persist)"),
|
||||
("CACHESTORE_QDSCONTEXTSETTINGS","QDS Unique Context Settings"),
|
||||
("MEMORYCLERK_QUERYDISKSTORE","QDS General"),
|
||||
("MEMORYCLERK_QUERYDISKSTORE_HASHMAP","QDS Query/Plan Hash Table")
|
||||
) AS clerk_names(system_name,name)
|
||||
ON mc.type = clerk_names.system_name
|
||||
GROUP BY ISNULL(clerk_names.name,mc.type)
|
||||
HAVING SUM(pages_kb) >= 1024
|
||||
OPTION( RECOMPILE );
|
||||
HAVING SUM({pages_kb}) >= 1024
|
||||
OPTION( RECOMPILE );'
|
||||
|
||||
IF CAST(LEFT(CAST(SERVERPROPERTY('productversion') as varchar), 2) AS INT) > 10 -- SQL Server 2008 Compat
|
||||
SET @SQL = REPLACE(REPLACE(@SQL,'{pages_kb}','mc.pages_kb'),'"','''')
|
||||
ELSE
|
||||
SET @SQL = REPLACE(REPLACE(@SQL,'{pages_kb}','mc.single_pages_kb + mc.multi_pages_kb'),'"','''')
|
||||
|
||||
EXEC(@SQL)
|
||||
`
|
||||
|
||||
const sqlDatabaseIOV2 = `SELECT
|
||||
'sqlserver_database_io' As [measurement],
|
||||
REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
|
||||
SERVERPROPERTY('ServerName') AS [host],
|
||||
DB_NAME([vfs].[database_id]) [database_name],
|
||||
vfs.io_stall_read_ms AS read_latency_ms,
|
||||
vfs.num_of_reads AS reads,
|
||||
@@ -368,13 +373,12 @@ const sqlServerPropertiesV2 = `DECLARE @sys_info TABLE (
|
||||
IF OBJECT_ID('master.sys.dm_os_sys_info') IS NOT NULL
|
||||
BEGIN
|
||||
INSERT INTO @sys_info ( cpu_count, server_memory, uptime )
|
||||
EXEC('SELECT cpu_count, physical_memory_kb, DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) FROM sys.dm_os_sys_info')
|
||||
EXEC('SELECT cpu_count, (select total_physical_memory_kb from sys.dm_os_sys_memory) AS physical_memory_kb, DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) FROM sys.dm_os_sys_info')
|
||||
END
|
||||
|
||||
SELECT
|
||||
'sqlserver_server_properties' As [measurement],
|
||||
REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
|
||||
SERVERPROPERTY('ServerName') AS [host],
|
||||
SUM( CASE WHEN state = 0 THEN 1 ELSE 0 END ) AS db_online,
|
||||
SUM( CASE WHEN state = 1 THEN 1 ELSE 0 END ) AS db_restoring,
|
||||
SUM( CASE WHEN state = 2 THEN 1 ELSE 0 END ) AS db_recovering,
|
||||
@@ -390,11 +394,11 @@ CROSS APPLY (
|
||||
SELECT *
|
||||
FROM @sys_info
|
||||
) AS sinfo
|
||||
WHERE database_id > 4
|
||||
OPTION( RECOMPILE );
|
||||
`
|
||||
|
||||
const sqlPerformanceCountersV2 string = `DECLARE @PCounters TABLE
|
||||
const sqlPerformanceCountersV2 string = `
|
||||
DECLARE @PCounters TABLE
|
||||
(
|
||||
object_name nvarchar(128),
|
||||
counter_name nvarchar(128),
|
||||
@@ -403,13 +407,12 @@ const sqlPerformanceCountersV2 string = `DECLARE @PCounters TABLE
|
||||
cntr_type INT,
|
||||
Primary Key(object_name, counter_name, instance_name)
|
||||
);
|
||||
|
||||
INSERT INTO @PCounters
|
||||
SELECT DISTINCT
|
||||
RTrim(spi.object_name) object_name,
|
||||
RTrim(spi.counter_name) counter_name,
|
||||
RTrim(spi.instance_name) instance_name,
|
||||
spi.cntr_value,
|
||||
CAST(spi.cntr_value AS BIGINT) AS cntr_value,
|
||||
spi.cntr_type
|
||||
FROM sys.dm_os_performance_counters AS spi
|
||||
WHERE (
|
||||
@@ -445,7 +448,10 @@ WHERE (
|
||||
'Memory Grants Pending',
|
||||
'Free list stalls/sec',
|
||||
'Buffer cache hit ratio',
|
||||
'Buffer cache hit ratio base'
|
||||
'Buffer cache hit ratio base',
|
||||
'Backup/Restore Throughput/sec',
|
||||
'Total Server Memory (KB)',
|
||||
'Target Server Memory (KB)'
|
||||
)
|
||||
) OR (
|
||||
instance_name IN ('_Total','Column store object pool')
|
||||
@@ -472,8 +478,7 @@ WHERE (
|
||||
'Redone Bytes/sec',
|
||||
'XTP Memory Used (KB)'
|
||||
) OR (
|
||||
object_name = 'SQLServer:Database Replica'
|
||||
AND counter_name IN (
|
||||
counter_name IN (
|
||||
'Log Bytes Received/sec',
|
||||
'Log Apply Pending Queue',
|
||||
'Redone Bytes/sec',
|
||||
@@ -483,31 +488,69 @@ WHERE (
|
||||
AND instance_name = '_Total'
|
||||
)
|
||||
) OR (
|
||||
object_name = 'SQLServer:Database Replica'
|
||||
AND counter_name IN ('Transaction Delay')
|
||||
counter_name IN ('Transaction Delay')
|
||||
) OR (
|
||||
object_name = 'SQLServer:Workload Group Stats'
|
||||
AND counter_name IN (
|
||||
counter_name IN (
|
||||
'CPU usage %',
|
||||
'CPU usage % base',
|
||||
'Queued requests',
|
||||
'Requests completed/sec',
|
||||
'Blocked tasks'
|
||||
)
|
||||
) OR (
|
||||
counter_name IN (
|
||||
'Active memory grant amount (KB)',
|
||||
'Disk Read Bytes/sec',
|
||||
'Disk Read IO Throttled/sec',
|
||||
'Disk Read IO/sec',
|
||||
'Disk Write Bytes/sec',
|
||||
'Disk Write IO Throttled/sec',
|
||||
'Disk Write IO/sec',
|
||||
'Used memory (KB)'
|
||||
)
|
||||
) OR (
|
||||
object_name LIKE '%User Settable%'
|
||||
OR object_name LIKE '%SQL Errors%'
|
||||
)
|
||||
|
||||
DECLARE @SQL NVARCHAR(MAX)
|
||||
SET @SQL = REPLACE('
|
||||
SELECT
|
||||
"SQLServer:Workload Group Stats" AS object,
|
||||
counter,
|
||||
instance,
|
||||
CAST(vs.value AS BIGINT) AS value,
|
||||
1
|
||||
FROM
|
||||
(
|
||||
SELECT
|
||||
rgwg.name AS instance,
|
||||
rgwg.total_request_count AS "Request Count",
|
||||
rgwg.total_queued_request_count AS "Queued Request Count",
|
||||
rgwg.total_cpu_limit_violation_count AS "CPU Limit Violation Count",
|
||||
rgwg.total_cpu_usage_ms AS "CPU Usage (time)",
|
||||
' + CASE WHEN SERVERPROPERTY('ProductMajorVersion') > 10 THEN 'rgwg.total_cpu_usage_preemptive_ms AS "Premptive CPU Usage (time)",' ELSE '' END + '
|
||||
rgwg.total_lock_wait_count AS "Lock Wait Count",
|
||||
rgwg.total_lock_wait_time_ms AS "Lock Wait Time",
|
||||
rgwg.total_reduced_memgrant_count AS "Reduced Memory Grant Count"
|
||||
FROM sys.dm_resource_governor_workload_groups AS rgwg
|
||||
INNER JOIN sys.dm_resource_governor_resource_pools AS rgrp
|
||||
ON rgwg.pool_id = rgrp.pool_id
|
||||
) AS rg
|
||||
UNPIVOT (
|
||||
value FOR counter IN ( [Request Count], [Queued Request Count], [CPU Limit Violation Count], [CPU Usage (time)], ' + CASE WHEN SERVERPROPERTY('ProductMajorVersion') > 10 THEN '[Premptive CPU Usage (time)], ' ELSE '' END + '[Lock Wait Count], [Lock Wait Time], [Reduced Memory Grant Count] )
|
||||
) AS vs'
|
||||
,'"','''')
|
||||
|
||||
INSERT INTO @PCounters
|
||||
EXEC( @SQL )
|
||||
|
||||
SELECT 'sqlserver_performance' AS [measurement],
|
||||
REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
|
||||
SERVERPROPERTY('ServerName') AS [host],
|
||||
pc.object_name AS [object],
|
||||
pc.counter_name AS [counter],
|
||||
CASE pc.instance_name WHEN '_Total' THEN 'Total' ELSE ISNULL(pc.instance_name,'') END AS [instance],
|
||||
CASE WHEN pc.cntr_type = 537003264 AND pc1.cntr_value > 0 THEN (pc.cntr_value * 1.0) / (pc1.cntr_value * 1.0) * 100 ELSE pc.cntr_value END AS [value],
|
||||
CASE
|
||||
WHEN pc.cntr_type = 272696576 THEN 'rate'
|
||||
WHEN pc.cntr_type IN (65792,537003264) THEN 'raw'
|
||||
ELSE 'unknown'
|
||||
END AS c_type
|
||||
CAST(CASE WHEN pc.cntr_type = 537003264 AND pc1.cntr_value > 0 THEN (pc.cntr_value * 1.0) / (pc1.cntr_value * 1.0) * 100 ELSE pc.cntr_value END AS float(10)) AS [value]
|
||||
FROM @PCounters AS pc
|
||||
LEFT OUTER JOIN @PCounters AS pc1
|
||||
ON (
|
||||
@@ -518,13 +561,12 @@ FROM @PCounters AS pc
|
||||
AND pc.instance_name = pc1.instance_name
|
||||
AND pc1.counter_name LIKE '%base'
|
||||
WHERE pc.counter_name NOT LIKE '% base'
|
||||
OPTION( RECOMPILE );
|
||||
OPTION(RECOMPILE);
|
||||
`
|
||||
|
||||
const sqlWaitStatsCategorizedV2 string = `SELECT
|
||||
'sqlserver_waitstats' AS [measurement],
|
||||
REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
|
||||
SERVERPROPERTY('ServerName') AS [host],
|
||||
ws.wait_type,
|
||||
wait_time_ms,
|
||||
wait_time_ms - signal_wait_time_ms AS [resource_wait_ms],
|
||||
@@ -1086,7 +1128,6 @@ BEGIN
|
||||
SELECT TOP(1)
|
||||
'sqlserver_azurestats' AS [measurement],
|
||||
REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
|
||||
SERVERPROPERTY('ServerName') AS [host],
|
||||
avg_cpu_percent,
|
||||
avg_data_io_percent,
|
||||
avg_log_write_percent,
|
||||
@@ -2036,30 +2077,30 @@ DECLARE @delayInterval char(8) = CONVERT(Char(8), DATEADD(SECOND, @secondsBetwee
|
||||
|
||||
DECLARE @w1 TABLE
|
||||
(
|
||||
WaitType nvarchar(64) NOT NULL,
|
||||
WaitType nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL,
|
||||
WaitTimeInMs bigint NOT NULL,
|
||||
WaitTaskCount bigint NOT NULL,
|
||||
CollectionDate datetime NOT NULL
|
||||
)
|
||||
DECLARE @w2 TABLE
|
||||
(
|
||||
WaitType nvarchar(64) NOT NULL,
|
||||
WaitType nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL,
|
||||
WaitTimeInMs bigint NOT NULL,
|
||||
WaitTaskCount bigint NOT NULL,
|
||||
CollectionDate datetime NOT NULL
|
||||
)
|
||||
DECLARE @w3 TABLE
|
||||
(
|
||||
WaitType nvarchar(64) NOT NULL
|
||||
WaitType nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL
|
||||
)
|
||||
DECLARE @w4 TABLE
|
||||
(
|
||||
WaitType nvarchar(64) NOT NULL,
|
||||
WaitCategory nvarchar(64) NOT NULL
|
||||
WaitType nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL,
|
||||
WaitCategory nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL
|
||||
)
|
||||
DECLARE @w5 TABLE
|
||||
(
|
||||
WaitCategory nvarchar(64) NOT NULL,
|
||||
WaitCategory nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL,
|
||||
WaitTimeInMs bigint NOT NULL,
|
||||
WaitTaskCount bigint NOT NULL
|
||||
)
|
||||
@@ -2300,12 +2341,12 @@ INSERT @w4 (WaitType, WaitCategory) VALUES ('ABR', 'OTHER') ,
|
||||
|
||||
INSERT @w1 (WaitType, WaitTimeInMs, WaitTaskCount, CollectionDate)
|
||||
SELECT
|
||||
WaitType = wait_type
|
||||
WaitType = wait_type collate SQL_Latin1_General_CP1_CI_AS
|
||||
, WaitTimeInMs = SUM(wait_time_ms)
|
||||
, WaitTaskCount = SUM(waiting_tasks_count)
|
||||
, CollectionDate = GETDATE()
|
||||
FROM sys.dm_os_wait_stats
|
||||
WHERE [wait_type] NOT IN
|
||||
WHERE [wait_type] collate SQL_Latin1_General_CP1_CI_AS NOT IN
|
||||
(
|
||||
SELECT WaitType FROM @w3
|
||||
)
|
||||
@@ -2316,12 +2357,12 @@ WAITFOR DELAY @delayInterval;
|
||||
|
||||
INSERT @w2 (WaitType, WaitTimeInMs, WaitTaskCount, CollectionDate)
|
||||
SELECT
|
||||
WaitType = wait_type
|
||||
WaitType = wait_type collate SQL_Latin1_General_CP1_CI_AS
|
||||
, WaitTimeInMs = SUM(wait_time_ms)
|
||||
, WaitTaskCount = SUM(waiting_tasks_count)
|
||||
, CollectionDate = GETDATE()
|
||||
FROM sys.dm_os_wait_stats
|
||||
WHERE [wait_type] NOT IN
|
||||
WHERE [wait_type] collate SQL_Latin1_General_CP1_CI_AS NOT IN
|
||||
(
|
||||
SELECT WaitType FROM @w3
|
||||
)
|
||||
|
||||
@@ -10,6 +10,14 @@
|
||||
|
||||
## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
|
||||
max_tcp_connections = 250
|
||||
|
||||
## Enable TCP keep alive probes (default=false)
|
||||
tcp_keep_alive = false
|
||||
|
||||
## Specifies the keep-alive period for an active network connection.
|
||||
## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false.
|
||||
## Defaults to the OS configuration.
|
||||
# tcp_keep_alive_period = "2h"
|
||||
|
||||
## Address and port to host UDP listener on
|
||||
service_address = ":8125"
|
||||
@@ -157,6 +165,8 @@ metric type:
|
||||
- **protocol** string: Protocol used in listener - tcp or udp options
|
||||
- **max_tcp_connections** []int: Maximum number of concurrent TCP connections
|
||||
to allow. Used when protocol is set to tcp.
|
||||
- **tcp_keep_alive** boolean: Enable TCP keep alive probes
|
||||
- **tcp_keep_alive_period** internal.Duration: Specifies the keep-alive period for an active network connection
|
||||
- **service_address** string: Address to listen for statsd UDP packets on
|
||||
- **delete_gauges** boolean: Delete gauges on every collection interval
|
||||
- **delete_counters** boolean: Delete counters on every collection interval
|
||||
|
||||
@@ -113,6 +113,9 @@ type Statsd struct {
|
||||
|
||||
MaxTCPConnections int `toml:"max_tcp_connections"`
|
||||
|
||||
TCPKeepAlive bool `toml:"tcp_keep_alive"`
|
||||
TCPKeepAlivePeriod *internal.Duration `toml:"tcp_keep_alive_period"`
|
||||
|
||||
graphiteParser *graphite.GraphiteParser
|
||||
|
||||
acc telegraf.Accumulator
|
||||
@@ -177,6 +180,14 @@ const sampleConfig = `
|
||||
## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
|
||||
max_tcp_connections = 250
|
||||
|
||||
## Enable TCP keep alive probes (default=false)
|
||||
tcp_keep_alive = false
|
||||
|
||||
## Specifies the keep-alive period for an active network connection.
|
||||
## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false.
|
||||
## Defaults to the OS configuration.
|
||||
# tcp_keep_alive_period = "2h"
|
||||
|
||||
## Address and port to host UDP listener on
|
||||
service_address = ":8125"
|
||||
|
||||
@@ -361,6 +372,18 @@ func (s *Statsd) tcpListen() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if s.TCPKeepAlive {
|
||||
if err = conn.SetKeepAlive(true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s.TCPKeepAlivePeriod != nil {
|
||||
if err = conn.SetKeepAlivePeriod(s.TCPKeepAlivePeriod.Duration); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-s.accept:
|
||||
// not over connection limit, handle the connection properly.
|
||||
@@ -863,6 +886,7 @@ func init() {
|
||||
Protocol: defaultProtocol,
|
||||
ServiceAddress: ":8125",
|
||||
MaxTCPConnections: 250,
|
||||
TCPKeepAlive: false,
|
||||
MetricSeparator: "_",
|
||||
AllowedPendingMessages: defaultAllowPendingMessage,
|
||||
DeleteCounters: true,
|
||||
|
||||
@@ -14,6 +14,11 @@ This plugin gathers metrics about network interface and protocol usage (Linux on
|
||||
##
|
||||
# interfaces = ["eth*", "enp0s[0-1]", "lo"]
|
||||
##
|
||||
## On linux systems telegraf also collects protocol stats.
|
||||
## Setting ignore_protocol_stats to true will skip reporting of protocol metrics.
|
||||
##
|
||||
# ignore_protocol_stats = false
|
||||
##
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
@@ -14,8 +14,9 @@ type NetIOStats struct {
|
||||
filter filter.Filter
|
||||
ps PS
|
||||
|
||||
skipChecks bool
|
||||
Interfaces []string
|
||||
skipChecks bool
|
||||
IgnoreProtocolStats bool
|
||||
Interfaces []string
|
||||
}
|
||||
|
||||
func (_ *NetIOStats) Description() string {
|
||||
@@ -28,6 +29,12 @@ var netSampleConfig = `
|
||||
## regardless of status.
|
||||
##
|
||||
# interfaces = ["eth0"]
|
||||
##
|
||||
## On linux systems telegraf also collects protocol stats.
|
||||
## Setting ignore_protocol_stats to true will skip reporting of protocol metrics.
|
||||
##
|
||||
# ignore_protocol_stats = false
|
||||
##
|
||||
`
|
||||
|
||||
func (_ *NetIOStats) SampleConfig() string {
|
||||
@@ -91,19 +98,21 @@ func (s *NetIOStats) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
// Get system wide stats for different network protocols
|
||||
// (ignore these stats if the call fails)
|
||||
netprotos, _ := s.ps.NetProto()
|
||||
fields := make(map[string]interface{})
|
||||
for _, proto := range netprotos {
|
||||
for stat, value := range proto.Stats {
|
||||
name := fmt.Sprintf("%s_%s", strings.ToLower(proto.Protocol),
|
||||
strings.ToLower(stat))
|
||||
fields[name] = value
|
||||
if !s.IgnoreProtocolStats {
|
||||
netprotos, _ := s.ps.NetProto()
|
||||
fields := make(map[string]interface{})
|
||||
for _, proto := range netprotos {
|
||||
for stat, value := range proto.Stats {
|
||||
name := fmt.Sprintf("%s_%s", strings.ToLower(proto.Protocol),
|
||||
strings.ToLower(stat))
|
||||
fields[name] = value
|
||||
}
|
||||
}
|
||||
tags := map[string]string{
|
||||
"interface": "all",
|
||||
}
|
||||
acc.AddFields("net", fields, tags)
|
||||
}
|
||||
tags := map[string]string{
|
||||
"interface": "all",
|
||||
}
|
||||
acc.AddFields("net", fields, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user