Compare commits
94 Commits
release-1.
...
aggregatin
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4f61d2a09c | ||
|
|
c03e8918a2 | ||
|
|
83345ec2b3 | ||
|
|
f094f83da5 | ||
|
|
0768022240 | ||
|
|
92956104d6 | ||
|
|
964856eb5f | ||
|
|
377547aa4c | ||
|
|
1662b6feb9 | ||
|
|
908170b207 | ||
|
|
ec47cab950 | ||
|
|
06671777e9 | ||
|
|
46a8bdbfe5 | ||
|
|
abdff033cc | ||
|
|
535e9e9a68 | ||
|
|
c256f17870 | ||
|
|
b8d5df2076 | ||
|
|
538baee8a4 | ||
|
|
d3d8d52e2f | ||
|
|
286f14f730 | ||
|
|
9f4752ba12 | ||
|
|
f639f994b5 | ||
|
|
911f0e4b57 | ||
|
|
86a3b8cad4 | ||
|
|
a3500cc33a | ||
|
|
bf0c59f56c | ||
|
|
c7b3667ac4 | ||
|
|
638853be05 | ||
|
|
ee9a2f73a1 | ||
|
|
648d7ae922 | ||
|
|
13937d511d | ||
|
|
fe4d3cd117 | ||
|
|
eacf11fcd8 | ||
|
|
3a8ca4d08d | ||
|
|
00e3363d45 | ||
|
|
29b37e67c2 | ||
|
|
42fee824f8 | ||
|
|
120be7e87b | ||
|
|
9e4a330ee5 | ||
|
|
78d4a95ce6 | ||
|
|
571ce86d10 | ||
|
|
dd2c60e620 | ||
|
|
1486ae25c0 | ||
|
|
da5b46e770 | ||
|
|
9ef902f4a1 | ||
|
|
058510464c | ||
|
|
0b4f4b089f | ||
|
|
7c592558d8 | ||
|
|
1e1d9e8acb | ||
|
|
3b3d16273d | ||
|
|
3046f957d5 | ||
|
|
bcf1cf59c1 | ||
|
|
c8d2ba2bc8 | ||
|
|
04ab9a4fe4 | ||
|
|
e4009234e9 | ||
|
|
8d516d26e9 | ||
|
|
0a02363c03 | ||
|
|
2c19d74829 | ||
|
|
3f4e1af222 | ||
|
|
10c7324d74 | ||
|
|
55cfc383f3 | ||
|
|
7b8f12b377 | ||
|
|
15f19375e7 | ||
|
|
93e2381f42 | ||
|
|
387bae9b9f | ||
|
|
34416e0da8 | ||
|
|
32f56140a3 | ||
|
|
64a23c0b18 | ||
|
|
af68975e2f | ||
|
|
0223b22b3e | ||
|
|
1890efbb70 | ||
|
|
e4f8a82ee6 | ||
|
|
a28de4b5cd | ||
|
|
caac224276 | ||
|
|
fe31ce9d7d | ||
|
|
01ede2ea0b | ||
|
|
fb6390e7ab | ||
|
|
ff40da6019 | ||
|
|
43a044542e | ||
|
|
00203fa889 | ||
|
|
7177e0473f | ||
|
|
252101b7c6 | ||
|
|
efdf36746c | ||
|
|
df78133bf3 | ||
|
|
bf915fa79c | ||
|
|
c160b56229 | ||
|
|
627f0e5d9d | ||
|
|
4551b4c5d2 | ||
|
|
a9afd2f030 | ||
|
|
caf860bc88 | ||
|
|
beeab2c509 | ||
|
|
a50acadc44 | ||
|
|
265d0e6d84 | ||
|
|
413cf6dd23 |
@@ -1,49 +1,105 @@
|
||||
---
|
||||
defaults: &defaults
|
||||
docker:
|
||||
- image: 'circleci/golang:1.9.4'
|
||||
working_directory: '/go/src/github.com/influxdata/telegraf'
|
||||
defaults:
|
||||
defaults: &defaults
|
||||
working_directory: '/go/src/github.com/influxdata/telegraf'
|
||||
go-1_8: &go-1_8
|
||||
docker:
|
||||
- image: 'circleci/golang:1.8.7'
|
||||
go-1_9: &go-1_9
|
||||
docker:
|
||||
- image: 'circleci/golang:1.9.5'
|
||||
go-1_10: &go-1_10
|
||||
docker:
|
||||
- image: 'circleci/golang:1.10.1'
|
||||
|
||||
version: 2
|
||||
jobs:
|
||||
build:
|
||||
<<: *defaults
|
||||
deps:
|
||||
<<: [ *defaults, *go-1_10 ]
|
||||
steps:
|
||||
- checkout
|
||||
- run: 'make deps'
|
||||
- run: 'make test-ci'
|
||||
release:
|
||||
<<: *defaults
|
||||
- persist_to_workspace:
|
||||
root: '/go/src'
|
||||
paths:
|
||||
- '*'
|
||||
test-go-1.8:
|
||||
<<: [ *defaults, *go-1_8 ]
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: '/go/src'
|
||||
- run: 'make test-ci'
|
||||
test-go-1.9:
|
||||
<<: [ *defaults, *go-1_9 ]
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: '/go/src'
|
||||
- run: 'make test-ci'
|
||||
test-go-1.10:
|
||||
<<: [ *defaults, *go-1_10 ]
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: '/go/src'
|
||||
- run: 'make test-ci'
|
||||
- run: 'GOARCH=386 make test-ci'
|
||||
release:
|
||||
<<: [ *defaults, *go-1_10 ]
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: '/go/src'
|
||||
- run: './scripts/release.sh'
|
||||
- store_artifacts:
|
||||
path: './artifacts'
|
||||
destination: '.'
|
||||
nightly:
|
||||
<<: *defaults
|
||||
<<: [ *defaults, *go-1_10 ]
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: '/go/src'
|
||||
- run: './scripts/release.sh'
|
||||
- store_artifacts:
|
||||
path: './artifacts'
|
||||
destination: '.'
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build_and_release:
|
||||
jobs:
|
||||
- 'build'
|
||||
- 'deps'
|
||||
- 'test-go-1.8':
|
||||
requires:
|
||||
- 'deps'
|
||||
- 'test-go-1.9':
|
||||
requires:
|
||||
- 'deps'
|
||||
- 'test-go-1.10':
|
||||
requires:
|
||||
- 'deps'
|
||||
- 'release':
|
||||
requires:
|
||||
- 'build'
|
||||
- 'test-go-1.8'
|
||||
- 'test-go-1.9'
|
||||
- 'test-go-1.10'
|
||||
nightly:
|
||||
jobs:
|
||||
- 'build'
|
||||
- 'deps'
|
||||
- 'test-go-1.8':
|
||||
requires:
|
||||
- 'deps'
|
||||
- 'test-go-1.9':
|
||||
requires:
|
||||
- 'deps'
|
||||
- 'test-go-1.10':
|
||||
requires:
|
||||
- 'deps'
|
||||
- 'nightly':
|
||||
requires:
|
||||
- 'build'
|
||||
- 'test-go-1.8'
|
||||
- 'test-go-1.9'
|
||||
- 'test-go-1.10'
|
||||
triggers:
|
||||
- schedule:
|
||||
cron: "0 18 * * *"
|
||||
cron: "0 7 * * *"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
/build
|
||||
/telegraf
|
||||
/telegraf.exe
|
||||
/telegraf.gz
|
||||
|
||||
52
CHANGELOG.md
52
CHANGELOG.md
@@ -1,37 +1,45 @@
|
||||
## v1.6.4 [2018-06-05]
|
||||
## v1.7 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- The `cassandra` input plugin has been deprecated in favor of the `jolokia2`
|
||||
input plugin which is much more configurable and more performant. There is
|
||||
an [example configuration](./plugins/inputs/jolokia2/examples) to help you
|
||||
get started.
|
||||
|
||||
### New Inputs
|
||||
|
||||
- [fibaro](./plugins/inputs/fibaro/README.md) - Contributed by @dynek
|
||||
- [mcrouter](./plugins/inputs/mcrouter/README.md) - Contributed by @cthayer
|
||||
- [nvidia_smi](./plugins/inputs/nvidia_smi/README.md) - Contributed by @jackzampolin
|
||||
|
||||
### Features
|
||||
|
||||
- [#3964](https://github.com/influxdata/telegraf/pull/3964): Add repl_oplog_window_sec metric to mongodb input.
|
||||
- [#3819](https://github.com/influxdata/telegraf/pull/3819): Add per-host shard metrics in mongodb input.
|
||||
- [#3999](https://github.com/influxdata/telegraf/pull/3999): Skip files with leading `..` in config directory.
|
||||
- [#4021](https://github.com/influxdata/telegraf/pull/4021): Add TLS support to socket_writer and socket_listener plugins.
|
||||
- [#4025](https://github.com/influxdata/telegraf/pull/4025): Add snmp input option to strip non fixed length index suffixes.
|
||||
- [#4035](https://github.com/influxdata/telegraf/pull/4035): Add server version tag to docker input.
|
||||
- [#4044](https://github.com/influxdata/telegraf/pull/4044): Add support for LeoFS 1.4 to leofs input.
|
||||
- [#4068](https://github.com/influxdata/telegraf/pull/4068): Add parameter to force the interval of gather for sysstat.
|
||||
- [#3877](https://github.com/influxdata/telegraf/pull/3877): Support busybox ping in the ping input.
|
||||
- [#4077](https://github.com/influxdata/telegraf/pull/4077): Add input plugin for McRouter.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#4203](https://github.com/influxdata/telegraf/issues/4203): Fix snmp overriding of auto-configured table fields.
|
||||
- [#4218](https://github.com/influxdata/telegraf/issues/4218): Fix uint support in cloudwatch output.
|
||||
- [#4188](https://github.com/influxdata/telegraf/pull/4188): Fix documentation of instance_name option in varnish input.
|
||||
- [#4195](https://github.com/influxdata/telegraf/pull/4195): Revert to previous aerospike library version due to memory leak.
|
||||
- [#4018](https://github.com/influxdata/telegraf/pull/4018): Write to working file outputs if any files are not writeable.
|
||||
- [#4036](https://github.com/influxdata/telegraf/pull/4036): Add all win_perf_counters fields for a series in a single metric.
|
||||
|
||||
## v1.6.3 [2018-05-21]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#4127](https://github.com/influxdata/telegraf/issues/4127): Fix intermittent panic in aerospike input.
|
||||
- [#4130](https://github.com/influxdata/telegraf/issues/4130): Fix connection leak in jolokia2_agent.
|
||||
- [#4136](https://github.com/influxdata/telegraf/pull/4130): Fix jolokia2 timeout parsing.
|
||||
- [#4142](https://github.com/influxdata/telegraf/pull/4142): Fix error parsing dropwizard metrics.
|
||||
- [#4149](https://github.com/influxdata/telegraf/issues/4149): Fix librato output support for uint and bool.
|
||||
- [#4176](https://github.com/influxdata/telegraf/pull/4176): Fix waitgroup deadlock if url is incorrect in apache input.
|
||||
|
||||
## v1.6.2 [2018-05-08]
|
||||
## v1.6.2 [unreleased]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#4078](https://github.com/influxdata/telegraf/pull/4078): Use same timestamp for fields in system input.
|
||||
- [#4091](https://github.com/influxdata/telegraf/pull/4091): Fix handling of uint64 in datadog output.
|
||||
- [#4099](https://github.com/influxdata/telegraf/pull/4099): Ignore UTF8 BOM in JSON parser.
|
||||
- [#4104](https://github.com/influxdata/telegraf/issues/4104): Fix case for slave metrics in mysql input.
|
||||
- [#4110](https://github.com/influxdata/telegraf/issues/4110): Fix uint support in cratedb output.
|
||||
|
||||
## v1.6.1 [2018-04-23]
|
||||
|
||||
### Bugfixes
|
||||
- [#3835](https://github.com/influxdata/telegraf/pull/3835): Report mem input fields as gauges instead counters.
|
||||
|
||||
- [#3835](https://github.com/influxdata/telegraf/issues/3835): Report mem input fields as gauges instead counters.
|
||||
- [#4030](https://github.com/influxdata/telegraf/issues/4030): Fix graphite outputs unsigned integers in wrong format.
|
||||
|
||||
5
Godeps
5
Godeps
@@ -26,7 +26,7 @@ github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
|
||||
github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7
|
||||
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
|
||||
github.com/google/go-cmp f94e52cad91c65a63acc1e75d4be223ea22e99bc
|
||||
github.com/gorilla/mux 392c28fe23e1c45ddba891b0320b3b5df220beea
|
||||
github.com/gorilla/mux 53c1911da2b537f792e7cafcb446b05ffe33b996
|
||||
github.com/go-redis/redis 73b70592cdaa9e6abdfcfbf97b4a90d80728c836
|
||||
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
@@ -48,6 +48,7 @@ github.com/multiplay/go-ts3 07477f49b8dfa3ada231afc7b7b17617d42afe8e
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/nats-io/gnatsd 393bbb7c031433e68707c8810fda0bfcfbe6ab9b
|
||||
github.com/nats-io/go-nats ea9585611a4ab58a205b9b125ebd74c389a6b898
|
||||
github.com/nats-io/nats ea9585611a4ab58a205b9b125ebd74c389a6b898
|
||||
github.com/nats-io/nuid 289cccf02c178dc782430d534e3c1f5b72af807f
|
||||
github.com/nsqio/go-nsq eee57a3ac4174c55924125bb15eeeda8cffb6e6f
|
||||
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
|
||||
@@ -65,7 +66,7 @@ github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
|
||||
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
|
||||
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
|
||||
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
|
||||
github.com/shirou/gopsutil c95755e4bcd7a62bb8bd33f3a597a7c7f35e2cf3
|
||||
github.com/shirou/gopsutil a5c2888e464b14fa882c2a059e0f95716bd45cf1
|
||||
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
||||
github.com/Shopify/sarama 3b1b38866a79f06deddf0487d5c27ba0697ccd65
|
||||
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
|
||||
|
||||
13
Makefile
13
Makefile
@@ -4,7 +4,7 @@ BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
|
||||
COMMIT := $(shell git rev-parse --short HEAD)
|
||||
GOFILES ?= $(shell git ls-files '*.go')
|
||||
GOFMT ?= $(shell gofmt -l $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)))
|
||||
BUILDFLAGS ?=
|
||||
BUILDFLAGS ?=
|
||||
|
||||
ifdef GOBIN
|
||||
PATH := $(GOBIN):$(PATH)
|
||||
@@ -12,8 +12,6 @@ else
|
||||
PATH := $(subst :,/bin:,$(GOPATH))/bin:$(PATH)
|
||||
endif
|
||||
|
||||
TELEGRAF := telegraf$(shell go tool dist env | grep -q 'GOOS=.windows.' && echo .exe)
|
||||
|
||||
LDFLAGS := $(LDFLAGS) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)
|
||||
ifdef VERSION
|
||||
LDFLAGS += -X main.version=$(VERSION)
|
||||
@@ -29,7 +27,7 @@ deps:
|
||||
gdm restore
|
||||
|
||||
telegraf:
|
||||
go build -i -o $(TELEGRAF) -ldflags "$(LDFLAGS)" ./cmd/telegraf/telegraf.go
|
||||
go build -i -ldflags "$(LDFLAGS)" ./cmd/telegraf
|
||||
|
||||
go-install:
|
||||
go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf
|
||||
@@ -46,7 +44,7 @@ fmt:
|
||||
|
||||
fmtcheck:
|
||||
@echo '[INFO] running gofmt to identify incorrectly formatted code...'
|
||||
@if [ ! -z $(GOFMT) ]; then \
|
||||
@if [ ! -z "$(GOFMT)" ]; then \
|
||||
echo "[ERROR] gofmt has found errors in the following files:" ; \
|
||||
echo "$(GOFMT)" ; \
|
||||
echo "" ;\
|
||||
@@ -60,12 +58,13 @@ test-windows:
|
||||
go test ./plugins/inputs/win_perf_counters/...
|
||||
go test ./plugins/inputs/win_services/...
|
||||
go test ./plugins/inputs/procstat/...
|
||||
go test ./plugins/inputs/ntpq/...
|
||||
|
||||
# vet runs the Go source code static analysis tool `vet` to find
|
||||
# any common errors.
|
||||
vet:
|
||||
@echo 'go vet $$(go list ./... | grep -v ./plugins/parsers/influx)'
|
||||
@go vet $$(go list ./... | grep -v ./plugins/parsers/influx) ; if [ $$? -eq 1 ]; then \
|
||||
@go vet $$(go list ./... | grep -v ./plugins/parsers/influx) ; if [ $$? -ne 0 ]; then \
|
||||
echo ""; \
|
||||
echo "go vet has found suspicious constructs. Please remediate any reported errors"; \
|
||||
echo "to fix them before submitting code for review."; \
|
||||
@@ -73,7 +72,7 @@ vet:
|
||||
fi
|
||||
|
||||
test-ci: fmtcheck vet
|
||||
go test -short./...
|
||||
go test -short ./...
|
||||
|
||||
test-all: fmtcheck vet
|
||||
go test ./...
|
||||
|
||||
@@ -5,7 +5,7 @@ and writing metrics.
|
||||
|
||||
Design goals are to have a minimal memory footprint with a plugin system so
|
||||
that developers in the community can easily add support for collecting metrics
|
||||
from local or remote services.
|
||||
. For an example configuration referencet from local or remote services.
|
||||
|
||||
Telegraf is plugin-driven and has the concept of 4 distinct plugins:
|
||||
|
||||
@@ -130,7 +130,7 @@ configuration options.
|
||||
* [aws cloudwatch](./plugins/inputs/cloudwatch)
|
||||
* [bcache](./plugins/inputs/bcache)
|
||||
* [bond](./plugins/inputs/bond)
|
||||
* [cassandra](./plugins/inputs/cassandra)
|
||||
* [cassandra](./plugins/inputs/cassandra) (deprecated, use [jolokia2](./plugins/inputs/jolokia2))
|
||||
* [ceph](./plugins/inputs/ceph)
|
||||
* [cgroup](./plugins/inputs/cgroup)
|
||||
* [chrony](./plugins/inputs/chrony)
|
||||
@@ -147,6 +147,7 @@ configuration options.
|
||||
* [elasticsearch](./plugins/inputs/elasticsearch)
|
||||
* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
|
||||
* [fail2ban](./plugins/inputs/fail2ban)
|
||||
* [fibaro](./plugins/inputs/fibaro)
|
||||
* [filestat](./plugins/inputs/filestat)
|
||||
* [fluentd](./plugins/inputs/fluentd)
|
||||
* [graylog](./plugins/inputs/graylog)
|
||||
@@ -162,12 +163,13 @@ configuration options.
|
||||
* [iptables](./plugins/inputs/iptables)
|
||||
* [ipset](./plugins/inputs/ipset)
|
||||
* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2))
|
||||
* [jolokia2](./plugins/inputs/jolokia2)
|
||||
* [jolokia2](./plugins/inputs/jolokia2) (java, cassandra, kafka)
|
||||
* [kapacitor](./plugins/inputs/kapacitor)
|
||||
* [kubernetes](./plugins/inputs/kubernetes)
|
||||
* [leofs](./plugins/inputs/leofs)
|
||||
* [lustre2](./plugins/inputs/lustre2)
|
||||
* [mailchimp](./plugins/inputs/mailchimp)
|
||||
* [mcrouter](./plugins/inputs/mcrouter)
|
||||
* [memcached](./plugins/inputs/memcached)
|
||||
* [mesos](./plugins/inputs/mesos)
|
||||
* [minecraft](./plugins/inputs/minecraft)
|
||||
@@ -180,6 +182,7 @@ configuration options.
|
||||
* [nsq](./plugins/inputs/nsq)
|
||||
* [nstat](./plugins/inputs/nstat)
|
||||
* [ntpq](./plugins/inputs/ntpq)
|
||||
* [nvidia_smi](./plugins/inputs/nvidia_smi)
|
||||
* [openldap](./plugins/inputs/openldap)
|
||||
* [opensmtpd](./plugins/inputs/opensmtpd)
|
||||
* [pf](./plugins/inputs/pf)
|
||||
|
||||
@@ -203,11 +203,6 @@ func (a *Agent) Test() error {
|
||||
input.SetTrace(true)
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
|
||||
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name())
|
||||
if input.Config.Interval != 0 {
|
||||
fmt.Printf("* Internal: %s\n", input.Config.Interval)
|
||||
}
|
||||
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -217,7 +212,6 @@ func (a *Agent) Test() error {
|
||||
switch input.Name() {
|
||||
case "inputs.cpu", "inputs.mongodb", "inputs.procstat":
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name())
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -13,11 +13,11 @@ platform: x64
|
||||
|
||||
install:
|
||||
- IF NOT EXIST "C:\Cache" mkdir C:\Cache
|
||||
- IF NOT EXIST "C:\Cache\go1.9.4.msi" curl -o "C:\Cache\go1.9.4.msi" https://storage.googleapis.com/golang/go1.9.4.windows-amd64.msi
|
||||
- IF NOT EXIST "C:\Cache\go1.10.1.msi" curl -o "C:\Cache\go1.10.1.msi" https://storage.googleapis.com/golang/go1.10.1.windows-amd64.msi
|
||||
- IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip
|
||||
- IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip
|
||||
- IF EXIST "C:\Go" rmdir /S /Q C:\Go
|
||||
- msiexec.exe /i "C:\Cache\go1.9.4.msi" /quiet
|
||||
- msiexec.exe /i "C:\Cache\go1.10.1.msi" /quiet
|
||||
- 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y
|
||||
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
|
||||
- go version
|
||||
|
||||
@@ -57,7 +57,7 @@ var fService = flag.String("service", "",
|
||||
var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)")
|
||||
|
||||
var (
|
||||
nextVersion = "1.6.0"
|
||||
nextVersion = "1.7.0"
|
||||
version string
|
||||
commit string
|
||||
branch string
|
||||
@@ -73,48 +73,6 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
Usage:
|
||||
|
||||
telegraf [commands|flags]
|
||||
|
||||
The commands & flags are:
|
||||
|
||||
config print out full sample configuration to stdout
|
||||
version print the version to stdout
|
||||
|
||||
--config <file> configuration file to load
|
||||
--test gather metrics once, print them to stdout, and exit
|
||||
--config-directory directory containing additional *.conf files
|
||||
--input-filter filter the input plugins to enable, separator is :
|
||||
--output-filter filter the output plugins to enable, separator is :
|
||||
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
|
||||
--debug print metrics as they're generated to stdout
|
||||
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
|
||||
--quiet run in quiet mode
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf --input-filter cpu --output-filter influxdb config
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf --config telegraf.conf --test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf --config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
||||
|
||||
# run telegraf with pprof
|
||||
telegraf --config telegraf.conf --pprof-addr localhost:6060
|
||||
`
|
||||
|
||||
var stop chan struct{}
|
||||
|
||||
func reloadLoop(
|
||||
@@ -365,7 +323,7 @@ func main() {
|
||||
DisplayName: "Telegraf Data Collector Service",
|
||||
Description: "Collects data using a series of plugins and publishes it to" +
|
||||
"another series of plugins.",
|
||||
Arguments: []string{"-config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
|
||||
Arguments: []string{"--config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
|
||||
}
|
||||
|
||||
prg := &program{
|
||||
@@ -378,14 +336,14 @@ func main() {
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
// Handle the -service flag here to prevent any issues with tooling that
|
||||
// Handle the --service flag here to prevent any issues with tooling that
|
||||
// may not have an interactive session, e.g. installing from Ansible.
|
||||
if *fService != "" {
|
||||
if *fConfig != "" {
|
||||
(*svcConfig).Arguments = []string{"-config", *fConfig}
|
||||
(*svcConfig).Arguments = []string{"--config", *fConfig}
|
||||
}
|
||||
if *fConfigDirectory != "" {
|
||||
(*svcConfig).Arguments = append((*svcConfig).Arguments, "-config-directory", *fConfigDirectory)
|
||||
(*svcConfig).Arguments = append((*svcConfig).Arguments, "--config-directory", *fConfigDirectory)
|
||||
}
|
||||
err := service.Control(s, *fService)
|
||||
if err != nil {
|
||||
|
||||
45
cmd/telegraf/usage.go
Normal file
45
cmd/telegraf/usage.go
Normal file
@@ -0,0 +1,45 @@
|
||||
// +build !windows
|
||||
|
||||
package main
|
||||
|
||||
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
Usage:
|
||||
|
||||
telegraf [commands|flags]
|
||||
|
||||
The commands & flags are:
|
||||
|
||||
config print out full sample configuration to stdout
|
||||
version print the version to stdout
|
||||
|
||||
--config <file> configuration file to load
|
||||
--test gather metrics once, print them to stdout, and exit
|
||||
--config-directory directory containing additional *.conf files
|
||||
--input-filter filter the input plugins to enable, separator is :
|
||||
--output-filter filter the output plugins to enable, separator is :
|
||||
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
|
||||
--debug print metrics as they're generated to stdout
|
||||
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
|
||||
--quiet run in quiet mode
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf --input-filter cpu --output-filter influxdb config
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf --config telegraf.conf --test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf --config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
||||
|
||||
# run telegraf with pprof
|
||||
telegraf --config telegraf.conf --pprof-addr localhost:6060
|
||||
`
|
||||
54
cmd/telegraf/usage_windows.go
Normal file
54
cmd/telegraf/usage_windows.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// +build windows
|
||||
|
||||
package main
|
||||
|
||||
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
Usage:
|
||||
|
||||
telegraf [commands|flags]
|
||||
|
||||
The commands & flags are:
|
||||
|
||||
config print out full sample configuration to stdout
|
||||
version print the version to stdout
|
||||
|
||||
--config <file> configuration file to load
|
||||
--test gather metrics once, print them to stdout, and exit
|
||||
--config-directory directory containing additional *.conf files
|
||||
--input-filter filter the input plugins to enable, separator is :
|
||||
--output-filter filter the output plugins to enable, separator is :
|
||||
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
|
||||
--debug print metrics as they're generated to stdout
|
||||
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
|
||||
--quiet run in quiet mode
|
||||
|
||||
--console run as console application
|
||||
--service operate on service, one of: install, uninstall, start, stop
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf --input-filter cpu --output-filter influxdb config
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf --config telegraf.conf --test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf --config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
||||
|
||||
# run telegraf with pprof
|
||||
telegraf --config telegraf.conf --pprof-addr localhost:6060
|
||||
|
||||
# run telegraf without service controller
|
||||
telegraf --console install --config "C:\Program Files\Telegraf\telegraf.conf"
|
||||
|
||||
# install telegraf service
|
||||
telegraf --service install --config "C:\Program Files\Telegraf\telegraf.conf"
|
||||
`
|
||||
@@ -79,15 +79,15 @@ services:
|
||||
- "389:389"
|
||||
- "636:636"
|
||||
crate:
|
||||
image: crate/crate
|
||||
ports:
|
||||
- "4200:4200"
|
||||
- "4230:4230"
|
||||
- "5432:5432"
|
||||
command:
|
||||
- crate
|
||||
- -Cnetwork.host=0.0.0.0
|
||||
- -Ctransport.host=localhost
|
||||
- -Clicense.enterprise=false
|
||||
environment:
|
||||
- CRATE_HEAP_SIZE=128m
|
||||
image: crate/crate
|
||||
ports:
|
||||
- "4200:4200"
|
||||
- "4230:4230"
|
||||
command:
|
||||
- crate
|
||||
- -Cnetwork.host=0.0.0.0
|
||||
- -Ctransport.host=localhost
|
||||
- -Clicense.enterprise=false
|
||||
environment:
|
||||
- CRATE_HEAP_SIZE=128m
|
||||
- JAVA_OPTS='-Xms256m -Xmx256m'
|
||||
|
||||
@@ -153,11 +153,11 @@ The inverse of `namepass`. If a match is found the point is discarded. This
|
||||
is tested on points after they have passed the `namepass` test.
|
||||
* **fieldpass**:
|
||||
An array of glob pattern strings. Only fields whose field key matches a
|
||||
pattern in this list are emitted. Not available for outputs.
|
||||
pattern in this list are emitted.
|
||||
* **fielddrop**:
|
||||
The inverse of `fieldpass`. Fields with a field key matching one of the
|
||||
patterns will be discarded from the point. This is tested on points after
|
||||
they have passed the `fieldpass` test. Not available for outputs.
|
||||
they have passed the `fieldpass` test.
|
||||
* **tagpass**:
|
||||
A table mapping tag keys to arrays of glob pattern strings. Only points
|
||||
that contain a tag key in the table and a tag value matching one of its
|
||||
|
||||
@@ -5,7 +5,7 @@ the general steps to set it up.
|
||||
|
||||
1. Obtain the telegraf windows distribution
|
||||
2. Create the directory `C:\Program Files\Telegraf` (if you install in a different
|
||||
location simply specify the `-config` parameter with the desired location)
|
||||
location simply specify the `--config` parameter with the desired location)
|
||||
3. Place the telegraf.exe and the telegraf.conf config file into `C:\Program Files\Telegraf`
|
||||
4. To install the service into the Windows Service Manager, run the following in PowerShell as an administrator (If necessary, you can wrap any spaces in the file paths in double quotes ""):
|
||||
|
||||
@@ -26,6 +26,15 @@ the general steps to set it up.
|
||||
> net start telegraf
|
||||
```
|
||||
|
||||
## Config Directory
|
||||
|
||||
You can also specify a `--config-directory` for the service to use:
|
||||
1. Create a directory for config snippets: `C:\Program Files\Telegraf\telegraf.d`
|
||||
2. Include the `--config-directory` option when registering the service:
|
||||
```
|
||||
> C:\"Program Files"\Telegraf\telegraf.exe --service install --config C:\"Program Files"\Telegraf\telegraf.conf --config-directory C:\"Program Files"\Telegraf\telegraf.d
|
||||
```
|
||||
|
||||
## Other supported operations
|
||||
|
||||
Telegraf can manage its own service through the --service flag:
|
||||
@@ -37,7 +46,6 @@ Telegraf can manage its own service through the --service flag:
|
||||
| `telegraf.exe --service start` | Start the telegraf service |
|
||||
| `telegraf.exe --service stop` | Stop the telegraf service |
|
||||
|
||||
|
||||
Troubleshooting common error #1067
|
||||
|
||||
When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start
|
||||
|
||||
@@ -398,7 +398,7 @@
|
||||
# ## 0 : No compression
|
||||
# ## 1 : Gzip compression
|
||||
# ## 2 : Snappy compression
|
||||
# # compression_codec = 0
|
||||
# compression_codec = 0
|
||||
#
|
||||
# ## RequiredAcks is used in Produce Requests to tell the broker how many
|
||||
# ## replica acknowledgements it must see before responding
|
||||
@@ -414,11 +414,10 @@
|
||||
# ## received the data. This option provides the best durability, we
|
||||
# ## guarantee that no messages will be lost as long as at least one in
|
||||
# ## sync replica remains.
|
||||
# # required_acks = -1
|
||||
# required_acks = -1
|
||||
#
|
||||
# ## The maximum number of times to retry sending a metric before failing
|
||||
# ## until the next flush.
|
||||
# # max_retry = 3
|
||||
# ## The total number of times to retry sending a message
|
||||
# max_retry = 3
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
@@ -435,7 +434,7 @@
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
# # data_format = "influx"
|
||||
# data_format = "influx"
|
||||
|
||||
|
||||
# # Configuration for the AWS Kinesis output.
|
||||
@@ -2831,7 +2830,7 @@
|
||||
#
|
||||
# ## Optional name for the varnish instance (or working directory) to query
|
||||
# ## Usually appened after -n in varnish cli
|
||||
# # instance_name = instanceName
|
||||
# #name = instanceName
|
||||
|
||||
|
||||
# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools
|
||||
@@ -3018,6 +3017,7 @@
|
||||
# # watch_method = "inotify"
|
||||
#
|
||||
# ## Parse logstash-style "grok" patterns:
|
||||
# ## Telegraf built-in parsing patterns: https://goo.gl/dkay10
|
||||
# [inputs.logparser.grok]
|
||||
# ## This is a list of patterns to check the given log file(s) for.
|
||||
# ## Note that adding patterns here increases processing time. The most
|
||||
|
||||
@@ -519,7 +519,13 @@ func (c *Config) LoadDirectory(path string) error {
|
||||
log.Printf("W! Telegraf is not permitted to read %s", thispath)
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
if strings.HasPrefix(info.Name(), "..") {
|
||||
// skip Kubernetes mounts, prevening loading the same config twice
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
name := info.Name()
|
||||
|
||||
4
internal/config/testdata/subconfig/..4984_10_04_08_28_06.119/invalid-config.conf
vendored
Normal file
4
internal/config/testdata/subconfig/..4984_10_04_08_28_06.119/invalid-config.conf
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
# This invalid config file should be skipped during testing
|
||||
# as it is an ..data folder
|
||||
|
||||
[[outputs.influxdb
|
||||
@@ -112,9 +112,10 @@ func RandomString(n int) string {
|
||||
return string(bytes)
|
||||
}
|
||||
|
||||
// GetTLSConfig gets a tls.Config object from the given certs, key, and CA files.
|
||||
// you must give the full path to the files.
|
||||
// If all files are blank and InsecureSkipVerify=false, returns a nil pointer.
|
||||
// GetTLSConfig gets a tls.Config object from the given certs, key, and CA files
|
||||
// for use with a client.
|
||||
// The full path to each file must be provided.
|
||||
// Returns a nil pointer if all files are blank and InsecureSkipVerify=false.
|
||||
func GetTLSConfig(
|
||||
SSLCert, SSLKey, SSLCA string,
|
||||
InsecureSkipVerify bool,
|
||||
@@ -155,6 +156,50 @@ func GetTLSConfig(
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// GetServerTLSConfig gets a tls.Config object from the given certs, key, and one or more CA files
|
||||
// for use with a server.
|
||||
// The full path to each file must be provided.
|
||||
// Returns a nil pointer if all files are blank.
|
||||
func GetServerTLSConfig(
|
||||
TLSCert, TLSKey string,
|
||||
TLSAllowedCACerts []string,
|
||||
) (*tls.Config, error) {
|
||||
if TLSCert == "" && TLSKey == "" && len(TLSAllowedCACerts) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
t := &tls.Config{}
|
||||
|
||||
if len(TLSAllowedCACerts) != 0 {
|
||||
caCertPool := x509.NewCertPool()
|
||||
for _, cert := range TLSAllowedCACerts {
|
||||
c, err := ioutil.ReadFile(cert)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("Could not load TLS CA: %s",
|
||||
err))
|
||||
}
|
||||
caCertPool.AppendCertsFromPEM(c)
|
||||
}
|
||||
t.ClientCAs = caCertPool
|
||||
t.ClientAuth = tls.RequireAndVerifyClientCert
|
||||
}
|
||||
|
||||
if TLSCert != "" && TLSKey != "" {
|
||||
cert, err := tls.LoadX509KeyPair(TLSCert, TLSKey)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf(
|
||||
"Could not load TLS client key/certificate from %s:%s: %s",
|
||||
TLSKey, TLSCert, err))
|
||||
}
|
||||
|
||||
t.Certificates = []tls.Certificate{cert}
|
||||
}
|
||||
|
||||
t.BuildNameToCertificate()
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// SnakeCase converts the given string to snake case following the Golang format:
|
||||
// acronyms are converted to lower-case and preceded by an underscore.
|
||||
func SnakeCase(in string) string {
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
package limiter
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRateLimiter(t *testing.T) {
|
||||
r := NewRateLimiter(5, time.Second)
|
||||
ticker := time.NewTicker(time.Millisecond * 75)
|
||||
|
||||
// test that we can only get 5 receives from the rate limiter
|
||||
counter := 0
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case <-r.C:
|
||||
counter++
|
||||
case <-ticker.C:
|
||||
break outer
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, 5, counter)
|
||||
r.Stop()
|
||||
// verify that the Stop function closes the channel.
|
||||
_, ok := <-r.C
|
||||
assert.False(t, ok)
|
||||
}
|
||||
|
||||
func TestRateLimiterMultipleIterations(t *testing.T) {
|
||||
r := NewRateLimiter(5, time.Millisecond*50)
|
||||
ticker := time.NewTicker(time.Millisecond * 250)
|
||||
|
||||
// test that we can get 15 receives from the rate limiter
|
||||
counter := 0
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
break outer
|
||||
case <-r.C:
|
||||
counter++
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, counter > 10)
|
||||
r.Stop()
|
||||
// verify that the Stop function closes the channel.
|
||||
_, ok := <-r.C
|
||||
assert.False(t, ok)
|
||||
}
|
||||
@@ -77,6 +77,7 @@ func (r *RunningInput) MakeMetric(
|
||||
|
||||
if r.trace && m != nil {
|
||||
s := influx.NewSerializer()
|
||||
s.SetFieldSortOrder(influx.SortFields)
|
||||
octets, err := s.Serialize(m)
|
||||
if err == nil {
|
||||
fmt.Print("> " + string(octets))
|
||||
|
||||
@@ -113,6 +113,11 @@ func (ro *RunningOutput) AddMetric(m telegraf.Metric) {
|
||||
m, _ = metric.New(name, tags, fields, t)
|
||||
}
|
||||
|
||||
if output, ok := ro.Output.(telegraf.AggregatingOutput); ok {
|
||||
output.Add(m)
|
||||
return
|
||||
}
|
||||
|
||||
ro.metrics.Add(m)
|
||||
if ro.metrics.Len() == ro.MetricBatchSize {
|
||||
batch := ro.metrics.Batch(ro.MetricBatchSize)
|
||||
@@ -125,6 +130,12 @@ func (ro *RunningOutput) AddMetric(m telegraf.Metric) {
|
||||
|
||||
// Write writes all cached points to this output.
|
||||
func (ro *RunningOutput) Write() error {
|
||||
if output, ok := ro.Output.(telegraf.AggregatingOutput); ok {
|
||||
metrics := output.Push()
|
||||
ro.metrics.Add(metrics...)
|
||||
output.Reset()
|
||||
}
|
||||
|
||||
nFails, nMetrics := ro.failMetrics.Len(), ro.metrics.Len()
|
||||
ro.BufferSize.Set(int64(nFails + nMetrics))
|
||||
log.Printf("D! Output [%s] buffer fullness: %d / %d metrics. ",
|
||||
|
||||
@@ -54,8 +54,6 @@ type Metric interface {
|
||||
AddField(key string, value interface{})
|
||||
RemoveField(key string)
|
||||
|
||||
SetTime(t time.Time)
|
||||
|
||||
// HashID returns an unique identifier for the series.
|
||||
HashID() uint64
|
||||
|
||||
|
||||
@@ -202,10 +202,6 @@ func (m *metric) RemoveField(key string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *metric) SetTime(t time.Time) {
|
||||
m.tm = t
|
||||
}
|
||||
|
||||
func (m *metric) Copy() telegraf.Metric {
|
||||
m2 := &metric{
|
||||
name: m.name,
|
||||
|
||||
@@ -13,6 +13,12 @@ type Output interface {
|
||||
Write(metrics []Metric) error
|
||||
}
|
||||
|
||||
type AggregatingOutput interface {
|
||||
Add(in Metric)
|
||||
Push() []Metric
|
||||
Reset()
|
||||
}
|
||||
|
||||
type ServiceOutput interface {
|
||||
// Connect to the Output
|
||||
Connect() error
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/fail2ban"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/fibaro"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/filestat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/fluentd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/graylog"
|
||||
@@ -49,6 +50,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/logparser"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mcrouter"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mesos"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/minecraft"
|
||||
@@ -64,6 +66,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nstat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nvidia_smi"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/openldap"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
|
||||
|
||||
@@ -63,8 +63,6 @@ func (n *Apache) Description() string {
|
||||
}
|
||||
|
||||
func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
if len(n.Urls) == 0 {
|
||||
n.Urls = []string{"http://localhost/server-status?auto"}
|
||||
}
|
||||
@@ -80,6 +78,8 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
n.client = client
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(n.Urls))
|
||||
for _, u := range n.Urls {
|
||||
addr, err := url.Parse(u)
|
||||
if err != nil {
|
||||
@@ -87,7 +87,6 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(addr *url.URL) {
|
||||
defer wg.Done()
|
||||
acc.AddError(n.gatherUrl(addr, acc))
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
|
||||
# Telegraf plugin: Cassandra
|
||||
|
||||
### **Deprecated in version 1.7**: Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin with the [cassandra.conf](/plugins/inputs/jolokia2/examples/cassandra.conf) example configuration.
|
||||
|
||||
#### Plugin arguments:
|
||||
- **context** string: Context root used for jolokia url
|
||||
- **servers** []string: List of servers with the format "<user:passwd@><host>:port"
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
@@ -174,7 +175,11 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
|
||||
|
||||
func (j *Cassandra) SampleConfig() string {
|
||||
return `
|
||||
# This is the context root used to compose the jolokia url
|
||||
## DEPRECATED: The cassandra plugin has been deprecated. Please use the
|
||||
## jolokia2 plugin instead.
|
||||
##
|
||||
## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
|
||||
|
||||
context = "/jolokia/read"
|
||||
## List of cassandra servers exposing jolokia read service
|
||||
servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
|
||||
@@ -258,6 +263,16 @@ func parseServerTokens(server string) map[string]string {
|
||||
return serverTokens
|
||||
}
|
||||
|
||||
func (c *Cassandra) Start(acc telegraf.Accumulator) error {
|
||||
log.Println("W! DEPRECATED: The cassandra plugin has been deprecated. " +
|
||||
"Please use the jolokia2 plugin instead. " +
|
||||
"https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cassandra) Stop() {
|
||||
}
|
||||
|
||||
func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
|
||||
context := c.Context
|
||||
servers := c.Servers
|
||||
|
||||
@@ -4,12 +4,11 @@ The docker plugin uses the Docker Engine API to gather metrics on running
|
||||
docker containers.
|
||||
|
||||
The docker plugin uses the [Official Docker Client](https://github.com/moby/moby/tree/master/client)
|
||||
to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/).
|
||||
[Library Documentation](https://godoc.org/github.com/moby/moby/client)
|
||||
to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.24/).
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
```toml
|
||||
# Read metrics about docker containers
|
||||
[[inputs.docker]]
|
||||
## Docker Endpoint
|
||||
@@ -76,15 +75,57 @@ may prefer to exclude them:
|
||||
```
|
||||
|
||||
|
||||
### Measurements & Fields:
|
||||
### Metrics:
|
||||
|
||||
Every effort was made to preserve the names based on the JSON response from the
|
||||
docker API.
|
||||
|
||||
Note that the docker_container_cpu metric may appear multiple times per collection,
|
||||
based on the availability of per-cpu stats on your system.
|
||||
- docker
|
||||
- tags:
|
||||
- unit
|
||||
- engine_host
|
||||
- server_version
|
||||
- fields:
|
||||
- n_used_file_descriptors
|
||||
- n_cpus
|
||||
- n_containers
|
||||
- n_containers_running
|
||||
- n_containers_stopped
|
||||
- n_containers_paused
|
||||
- n_images
|
||||
- n_goroutines
|
||||
- n_listener_events
|
||||
- memory_total
|
||||
- pool_blocksize
|
||||
|
||||
- docker_data
|
||||
- tags:
|
||||
- unit
|
||||
- engine_host
|
||||
- server_version
|
||||
- fields:
|
||||
- available
|
||||
- total
|
||||
- used
|
||||
|
||||
- docker_metadata
|
||||
- tags:
|
||||
- unit
|
||||
- engine_host
|
||||
- server_version
|
||||
- fields:
|
||||
- available
|
||||
- total
|
||||
- used
|
||||
|
||||
- docker_container_mem
|
||||
- tags:
|
||||
- engine_host
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_version
|
||||
- fields:
|
||||
- total_pgmafault
|
||||
- cache
|
||||
- mapped_file
|
||||
@@ -119,7 +160,16 @@ based on the availability of per-cpu stats on your system.
|
||||
- failcnt
|
||||
- limit
|
||||
- container_id
|
||||
|
||||
- docker_container_cpu
|
||||
- tags:
|
||||
- engine_host
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_version
|
||||
- cpu
|
||||
- fields:
|
||||
- throttling_periods
|
||||
- throttling_throttled_periods
|
||||
- throttling_throttled_time
|
||||
@@ -129,7 +179,16 @@ based on the availability of per-cpu stats on your system.
|
||||
- usage_total
|
||||
- usage_percent
|
||||
- container_id
|
||||
|
||||
- docker_container_net
|
||||
- tags:
|
||||
- engine_host
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_version
|
||||
- network
|
||||
- fields:
|
||||
- rx_dropped
|
||||
- rx_bytes
|
||||
- rx_errors
|
||||
@@ -139,7 +198,16 @@ based on the availability of per-cpu stats on your system.
|
||||
- tx_errors
|
||||
- tx_bytes
|
||||
- container_id
|
||||
|
||||
- docker_container_blkio
|
||||
- tags:
|
||||
- engine_host
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_version
|
||||
- device
|
||||
- fields:
|
||||
- io_service_bytes_recursive_async
|
||||
- io_service_bytes_recursive_read
|
||||
- io_service_bytes_recursive_sync
|
||||
@@ -151,118 +219,38 @@ based on the availability of per-cpu stats on your system.
|
||||
- io_serviced_recursive_total
|
||||
- io_serviced_recursive_write
|
||||
- container_id
|
||||
- docker_
|
||||
- n_used_file_descriptors
|
||||
- n_cpus
|
||||
- n_containers
|
||||
- n_containers_running
|
||||
- n_containers_stopped
|
||||
- n_containers_paused
|
||||
- n_images
|
||||
- n_goroutines
|
||||
- n_listener_events
|
||||
- memory_total
|
||||
- pool_blocksize
|
||||
- docker_data
|
||||
- available
|
||||
- total
|
||||
- used
|
||||
- docker_metadata
|
||||
- available
|
||||
- total
|
||||
- used
|
||||
- docker_swarm
|
||||
- tasks_desired
|
||||
- tasks_running
|
||||
|
||||
|
||||
### Tags:
|
||||
#### Docker Engine tags
|
||||
- docker (memory_total)
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
- docker (pool_blocksize)
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
- docker_data
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
- docker_metadata
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
|
||||
#### Docker Container tags
|
||||
- Tags on all containers:
|
||||
- docker_container_health
|
||||
- tags:
|
||||
- engine_host
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_version
|
||||
- docker_container_mem specific:
|
||||
- docker_container_cpu specific:
|
||||
- cpu
|
||||
- docker_container_net specific:
|
||||
- network
|
||||
- docker_container_blkio specific:
|
||||
- device
|
||||
- docker_container_health specific:
|
||||
- health_status
|
||||
- failing_streak
|
||||
- docker_swarm specific:
|
||||
- fields:
|
||||
- health_status (string)
|
||||
- failing_streak (integer)
|
||||
|
||||
- docker_swarm
|
||||
- tags:
|
||||
- service_id
|
||||
- service_name
|
||||
- service_mode
|
||||
- fields:
|
||||
- tasks_desired
|
||||
- tasks_running
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
% ./telegraf --config ~/ws/telegraf.conf --input-filter docker --test
|
||||
* Plugin: docker, Collection 1
|
||||
> docker n_cpus=8i 1456926671065383978
|
||||
> docker n_used_file_descriptors=15i 1456926671065383978
|
||||
> docker n_containers=7i 1456926671065383978
|
||||
> docker n_containers_running=7i 1456926671065383978
|
||||
> docker n_containers_stopped=3i 1456926671065383978
|
||||
> docker n_containers_paused=0i 1456926671065383978
|
||||
> docker n_images=152i 1456926671065383978
|
||||
> docker n_goroutines=36i 1456926671065383978
|
||||
> docker n_listener_events=0i 1456926671065383978
|
||||
> docker,unit=bytes memory_total=18935443456i 1456926671065383978
|
||||
> docker,unit=bytes pool_blocksize=65540i 1456926671065383978
|
||||
> docker_data,unit=bytes available=24340000000i,total=107400000000i,used=14820000000i 1456926671065383978
|
||||
> docker_metadata,unit=bytes available=2126999999i,total=2146999999i,used=20420000i 145692667106538
|
||||
> docker_container_mem,
|
||||
container_image=spotify/kafka,container_name=kafka \
|
||||
active_anon=52568064i,active_file=6926336i,cache=12038144i,fail_count=0i,\
|
||||
hierarchical_memory_limit=9223372036854771712i,inactive_anon=52707328i,\
|
||||
inactive_file=5111808i,limit=1044578304i,mapped_file=10301440i,\
|
||||
max_usage=140656640i,pgfault=63762i,pgmajfault=2837i,pgpgin=73355i,\
|
||||
pgpgout=45736i,rss=105275392i,rss_huge=4194304i,total_active_anon=52568064i,\
|
||||
total_active_file=6926336i,total_cache=12038144i,total_inactive_anon=52707328i,\
|
||||
total_inactive_file=5111808i,total_mapped_file=10301440i,total_pgfault=63762i,\
|
||||
total_pgmafault=0i,total_pgpgin=73355i,total_pgpgout=45736i,\
|
||||
total_rss=105275392i,total_rss_huge=4194304i,total_unevictable=0i,\
|
||||
total_writeback=0i,unevictable=0i,usage=117440512i,writeback=0i 1453409536840126713
|
||||
> docker_container_cpu,
|
||||
container_image=spotify/kafka,container_name=kafka,cpu=cpu-total \
|
||||
throttling_periods=0i,throttling_throttled_periods=0i,\
|
||||
throttling_throttled_time=0i,usage_in_kernelmode=440000000i,\
|
||||
usage_in_usermode=2290000000i,usage_system=84795360000000i,\
|
||||
usage_total=6628208865i 1453409536840126713
|
||||
> docker_container_cpu,
|
||||
container_image=spotify/kafka,container_name=kafka,cpu=cpu0 \
|
||||
usage_total=6628208865i 1453409536840126713
|
||||
> docker_container_net,\
|
||||
container_image=spotify/kafka,container_name=kafka,network=eth0 \
|
||||
rx_bytes=7468i,rx_dropped=0i,rx_errors=0i,rx_packets=94i,tx_bytes=946i,\
|
||||
tx_dropped=0i,tx_errors=0i,tx_packets=13i 1453409536840126713
|
||||
> docker_container_blkio,
|
||||
container_image=spotify/kafka,container_name=kafka,device=8:0 \
|
||||
io_service_bytes_recursive_async=80216064i,io_service_bytes_recursive_read=79925248i,\
|
||||
io_service_bytes_recursive_sync=77824i,io_service_bytes_recursive_total=80293888i,\
|
||||
io_service_bytes_recursive_write=368640i,io_serviced_recursive_async=6562i,\
|
||||
io_serviced_recursive_read=6492i,io_serviced_recursive_sync=37i,\
|
||||
io_serviced_recursive_total=6599i,io_serviced_recursive_write=107i 1453409536840126713
|
||||
>docker_swarm,
|
||||
service_id=xaup2o9krw36j2dy1mjx1arjw,service_mode=replicated,service_name=test,\
|
||||
tasks_desired=3,tasks_running=3 1508968160000000000
|
||||
docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce n_containers=6i,n_containers_paused=0i,n_containers_running=1i,n_containers_stopped=5i,n_cpus=2i,n_goroutines=41i,n_images=2i,n_listener_events=0i,n_used_file_descriptors=27i 1524002041000000000
|
||||
docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce,unit=bytes memory_total=2101661696i 1524002041000000000
|
||||
docker_container_mem,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce active_anon=8327168i,active_file=2314240i,cache=27402240i,container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",hierarchical_memory_limit=9223372036854771712i,inactive_anon=0i,inactive_file=25088000i,limit=2101661696i,mapped_file=20582400i,max_usage=36646912i,pgfault=4193i,pgmajfault=214i,pgpgin=9243i,pgpgout=520i,rss=8327168i,rss_huge=0i,total_active_anon=8327168i,total_active_file=2314240i,total_cache=27402240i,total_inactive_anon=0i,total_inactive_file=25088000i,total_mapped_file=20582400i,total_pgfault=4193i,total_pgmajfault=214i,total_pgpgin=9243i,total_pgpgout=520i,total_rss=8327168i,total_rss_huge=0i,total_unevictable=0i,total_writeback=0i,unevictable=0i,usage=36528128i,usage_percent=0.4342225020025297,writeback=0i 1524002042000000000
|
||||
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,cpu=cpu-total,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",throttling_periods=0i,throttling_throttled_periods=0i,throttling_throttled_time=0i,usage_in_kernelmode=40000000i,usage_in_usermode=100000000i,usage_percent=0,usage_system=6394210000000i,usage_total=117319068i 1524002042000000000
|
||||
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,cpu=cpu0,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",usage_total=20825265i 1524002042000000000
|
||||
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,cpu=cpu1,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",usage_total=96493803i 1524002042000000000
|
||||
docker_container_net,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,engine_host=debian-stretch-docker,network=eth0,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",rx_bytes=1576i,rx_dropped=0i,rx_errors=0i,rx_packets=20i,tx_bytes=0i,tx_dropped=0i,tx_errors=0i,tx_packets=0i 1524002042000000000
|
||||
docker_container_blkio,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,device=254:0,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",io_service_bytes_recursive_async=27398144i,io_service_bytes_recursive_read=27398144i,io_service_bytes_recursive_sync=0i,io_service_bytes_recursive_total=27398144i,io_service_bytes_recursive_write=0i,io_serviced_recursive_async=529i,io_serviced_recursive_read=529i,io_serviced_recursive_sync=0i,io_serviced_recursive_total=529i,io_serviced_recursive_write=0i 1524002042000000000
|
||||
docker_container_health,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce failing_streak=0i,health_status="healthy" 1524007529000000000
|
||||
docker_swarm,service_id=xaup2o9krw36j2dy1mjx1arjw,service_mode=replicated,service_name=test tasks_desired=3,tasks_running=3 1508968160000000000
|
||||
```
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
version string
|
||||
version = "1.24"
|
||||
defaultHeaders = map[string]string{"User-Agent": "engine-api-cli-1.0"}
|
||||
)
|
||||
|
||||
|
||||
@@ -54,6 +54,7 @@ type Docker struct {
|
||||
client Client
|
||||
httpClient *http.Client
|
||||
engine_host string
|
||||
serverVersion string
|
||||
filtersCreated bool
|
||||
labelFilter filter.Filter
|
||||
containerFilter filter.Filter
|
||||
@@ -301,7 +302,14 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.engine_host = info.Name
|
||||
d.serverVersion = info.ServerVersion
|
||||
|
||||
tags := map[string]string{
|
||||
"engine_host": d.engine_host,
|
||||
"server_version": d.serverVersion,
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"n_cpus": info.NCPU,
|
||||
@@ -315,15 +323,13 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||
"n_listener_events": info.NEventsListener,
|
||||
}
|
||||
// Add metrics
|
||||
acc.AddFields("docker",
|
||||
fields,
|
||||
map[string]string{"engine_host": d.engine_host},
|
||||
now)
|
||||
acc.AddFields("docker", fields, tags, now)
|
||||
acc.AddFields("docker",
|
||||
map[string]interface{}{"memory_total": info.MemTotal},
|
||||
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
|
||||
tags,
|
||||
now)
|
||||
// Get storage metrics
|
||||
tags["unit"] = "bytes"
|
||||
for _, rawData := range info.DriverStatus {
|
||||
// Try to convert string to int (bytes)
|
||||
value, err := parseSize(rawData[1])
|
||||
@@ -335,7 +341,7 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||
// pool blocksize
|
||||
acc.AddFields("docker",
|
||||
map[string]interface{}{"pool_blocksize": value},
|
||||
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
|
||||
tags,
|
||||
now)
|
||||
} else if strings.HasPrefix(name, "data_space_") {
|
||||
// data space
|
||||
@@ -348,16 +354,10 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||
}
|
||||
}
|
||||
if len(dataFields) > 0 {
|
||||
acc.AddFields("docker_data",
|
||||
dataFields,
|
||||
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
|
||||
now)
|
||||
acc.AddFields("docker_data", dataFields, tags, now)
|
||||
}
|
||||
if len(metadataFields) > 0 {
|
||||
acc.AddFields("docker_metadata",
|
||||
metadataFields,
|
||||
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
|
||||
now)
|
||||
acc.AddFields("docker_metadata", metadataFields, tags, now)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -388,6 +388,7 @@ func (d *Docker) gatherContainer(
|
||||
|
||||
tags := map[string]string{
|
||||
"engine_host": d.engine_host,
|
||||
"server_version": d.serverVersion,
|
||||
"container_name": cname,
|
||||
"container_image": imageName,
|
||||
"container_version": imageVersion,
|
||||
|
||||
@@ -615,7 +615,10 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
"n_images": int(199),
|
||||
"n_goroutines": int(39),
|
||||
},
|
||||
map[string]string{"engine_host": "absol"},
|
||||
map[string]string{
|
||||
"engine_host": "absol",
|
||||
"server_version": "17.09.0-ce",
|
||||
},
|
||||
)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
@@ -626,8 +629,9 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
"available": int64(36530000000),
|
||||
},
|
||||
map[string]string{
|
||||
"unit": "bytes",
|
||||
"engine_host": "absol",
|
||||
"unit": "bytes",
|
||||
"engine_host": "absol",
|
||||
"server_version": "17.09.0-ce",
|
||||
},
|
||||
)
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
@@ -648,6 +652,7 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
"ENVVAR7": "ENVVAR8=ENVVAR9",
|
||||
"label1": "test_value_1",
|
||||
"label2": "test_value_2",
|
||||
"server_version": "17.09.0-ce",
|
||||
},
|
||||
)
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
@@ -670,6 +675,7 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
"ENVVAR7": "ENVVAR8=ENVVAR9",
|
||||
"label1": "test_value_1",
|
||||
"label2": "test_value_2",
|
||||
"server_version": "17.09.0-ce",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
@@ -55,6 +55,7 @@ var info = types.Info{
|
||||
DockerRootDir: "/var/lib/docker",
|
||||
NoProxy: "",
|
||||
BridgeNfIP6tables: true,
|
||||
ServerVersion: "17.09.0-ce",
|
||||
}
|
||||
|
||||
var containerList = []types.Container{
|
||||
|
||||
51
plugins/inputs/fibaro/README.md
Normal file
51
plugins/inputs/fibaro/README.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# Fibaro Input Plugin
|
||||
|
||||
The Fibaro plugin makes HTTP calls to the Fibaro controller API to gather values of hooked devices.
|
||||
Those values could be true (1) or false (0) for switches, percentage for dimmers, temperature, etc.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Read devices value(s) from a Fibaro controller
|
||||
[[inputs.fibaro]]
|
||||
## Required Fibaro controller address/hostname.
|
||||
## Note: at the time of writing this plugin, Fibaro only implemented http - no https available
|
||||
url = "http://<controller>:80"
|
||||
|
||||
## Required credentials to access the API (http://<controller/api/<component>)
|
||||
username = "<username>"
|
||||
password = "<password>"
|
||||
|
||||
## Amount of time allowed to complete the HTTP request
|
||||
# timeout = "5s"
|
||||
```
|
||||
|
||||
### Metrics:
|
||||
|
||||
- fibaro
|
||||
- tags:
|
||||
- section (section name)
|
||||
- room (room name)
|
||||
- name (device name)
|
||||
- type (device type)
|
||||
- fields:
|
||||
- value (float)
|
||||
- value2 (float, when available from device)
|
||||
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
fibaro,host=vm1,name=Escaliers,room=Dégagement,section=Pièces\ communes,type=com.fibaro.binarySwitch value=0 1523351010000000000
|
||||
fibaro,host=vm1,name=Porte\ fenêtre,room=Salon,section=Pièces\ communes,type=com.fibaro.FGRM222 value=99,value2=99 1523351010000000000
|
||||
fibaro,host=vm1,name=LED\ îlot\ central,room=Cuisine,section=Cuisine,type=com.fibaro.binarySwitch value=0 1523351010000000000
|
||||
fibaro,host=vm1,name=Détérioration,room=Entrée,section=Pièces\ communes,type=com.fibaro.heatDetector value=0 1523351010000000000
|
||||
fibaro,host=vm1,name=Température,room=Cave,section=Cave,type=com.fibaro.temperatureSensor value=17.87 1523351010000000000
|
||||
fibaro,host=vm1,name=Présence,room=Garde-manger,section=Cuisine,type=com.fibaro.FGMS001 value=1 1523351010000000000
|
||||
fibaro,host=vm1,name=Luminosité,room=Garde-manger,section=Cuisine,type=com.fibaro.lightSensor value=92 1523351010000000000
|
||||
fibaro,host=vm1,name=Etat,room=Garage,section=Extérieur,type=com.fibaro.doorSensor value=0 1523351010000000000
|
||||
fibaro,host=vm1,name=CO2\ (ppm),room=Salon,section=Pièces\ communes,type=com.fibaro.multilevelSensor value=880 1523351010000000000
|
||||
fibaro,host=vm1,name=Humidité\ (%),room=Salon,section=Pièces\ communes,type=com.fibaro.humiditySensor value=53 1523351010000000000
|
||||
fibaro,host=vm1,name=Pression\ (mb),room=Salon,section=Pièces\ communes,type=com.fibaro.multilevelSensor value=1006.9 1523351010000000000
|
||||
fibaro,host=vm1,name=Bruit\ (db),room=Salon,section=Pièces\ communes,type=com.fibaro.multilevelSensor value=58 1523351010000000000
|
||||
```
|
||||
202
plugins/inputs/fibaro/fibaro.go
Normal file
202
plugins/inputs/fibaro/fibaro.go
Normal file
@@ -0,0 +1,202 @@
|
||||
package fibaro
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
const sampleConfig = `
|
||||
## Required Fibaro controller address/hostname.
|
||||
## Note: at the time of writing this plugin, Fibaro only implemented http - no https available
|
||||
url = "http://<controller>:80"
|
||||
|
||||
## Required credentials to access the API (http://<controller/api/<component>)
|
||||
username = "<username>"
|
||||
password = "<password>"
|
||||
|
||||
## Amount of time allowed to complete the HTTP request
|
||||
# timeout = "5s"
|
||||
`
|
||||
|
||||
const description = "Read devices value(s) from a Fibaro controller"
|
||||
|
||||
// Fibaro contains connection information
|
||||
type Fibaro struct {
|
||||
URL string
|
||||
|
||||
// HTTP Basic Auth Credentials
|
||||
Username string
|
||||
Password string
|
||||
|
||||
Timeout internal.Duration
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// LinkRoomsSections links rooms to sections
|
||||
type LinkRoomsSections struct {
|
||||
Name string
|
||||
SectionID uint16
|
||||
}
|
||||
|
||||
// Sections contains sections informations
|
||||
type Sections struct {
|
||||
ID uint16 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// Rooms contains rooms informations
|
||||
type Rooms struct {
|
||||
ID uint16 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
SectionID uint16 `json:"sectionID"`
|
||||
}
|
||||
|
||||
// Devices contains devices informations
|
||||
type Devices struct {
|
||||
ID uint16 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
RoomID uint16 `json:"roomID"`
|
||||
Type string `json:"type"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Properties struct {
|
||||
Dead interface{} `json:"dead"`
|
||||
Value interface{} `json:"value"`
|
||||
Value2 interface{} `json:"value2"`
|
||||
} `json:"properties"`
|
||||
}
|
||||
|
||||
// Description returns a string explaining the purpose of this plugin
|
||||
func (f *Fibaro) Description() string { return description }
|
||||
|
||||
// SampleConfig returns text explaining how plugin should be configured
|
||||
func (f *Fibaro) SampleConfig() string { return sampleConfig }
|
||||
|
||||
// getJSON connects, authenticates and reads JSON payload returned by Fibaro box
|
||||
func (f *Fibaro) getJSON(path string, dataStruct interface{}) error {
|
||||
var requestURL = f.URL + path
|
||||
|
||||
req, err := http.NewRequest("GET", requestURL, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.SetBasicAuth(f.Username, f.Password)
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)",
|
||||
requestURL,
|
||||
resp.StatusCode,
|
||||
http.StatusText(resp.StatusCode),
|
||||
http.StatusOK,
|
||||
http.StatusText(http.StatusOK))
|
||||
return err
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
err = dec.Decode(&dataStruct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gather fetches all required information to output metrics
|
||||
func (f *Fibaro) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
if f.client == nil {
|
||||
f.client = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
},
|
||||
Timeout: f.Timeout.Duration,
|
||||
}
|
||||
}
|
||||
|
||||
var tmpSections []Sections
|
||||
err := f.getJSON("/api/sections", &tmpSections)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sections := map[uint16]string{}
|
||||
for _, v := range tmpSections {
|
||||
sections[v.ID] = v.Name
|
||||
}
|
||||
|
||||
var tmpRooms []Rooms
|
||||
err = f.getJSON("/api/rooms", &tmpRooms)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rooms := map[uint16]LinkRoomsSections{}
|
||||
for _, v := range tmpRooms {
|
||||
rooms[v.ID] = LinkRoomsSections{Name: v.Name, SectionID: v.SectionID}
|
||||
}
|
||||
|
||||
var devices []Devices
|
||||
err = f.getJSON("/api/devices", &devices)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, device := range devices {
|
||||
// skip device in some cases
|
||||
if device.RoomID == 0 ||
|
||||
device.Enabled == false ||
|
||||
device.Properties.Dead == "true" ||
|
||||
device.Type == "com.fibaro.zwaveDevice" {
|
||||
continue
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"section": sections[rooms[device.RoomID].SectionID],
|
||||
"room": rooms[device.RoomID].Name,
|
||||
"name": device.Name,
|
||||
"type": device.Type,
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
|
||||
if device.Properties.Value != nil {
|
||||
value := device.Properties.Value
|
||||
switch value {
|
||||
case "true":
|
||||
value = "1"
|
||||
case "false":
|
||||
value = "0"
|
||||
}
|
||||
|
||||
if fValue, err := strconv.ParseFloat(value.(string), 64); err == nil {
|
||||
fields["value"] = fValue
|
||||
}
|
||||
}
|
||||
|
||||
if device.Properties.Value2 != nil {
|
||||
if fValue, err := strconv.ParseFloat(device.Properties.Value2.(string), 64); err == nil {
|
||||
fields["value2"] = fValue
|
||||
}
|
||||
}
|
||||
|
||||
acc.AddFields("fibaro", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("fibaro", func() telegraf.Input {
|
||||
return &Fibaro{}
|
||||
})
|
||||
}
|
||||
204
plugins/inputs/fibaro/fibaro_test.go
Normal file
204
plugins/inputs/fibaro/fibaro_test.go
Normal file
@@ -0,0 +1,204 @@
|
||||
package fibaro
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const sectionsJSON = `
|
||||
[
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Section 1",
|
||||
"sortOrder": 1
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "Section 2",
|
||||
"sortOrder": 2
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "Section 3",
|
||||
"sortOrder": 3
|
||||
}
|
||||
]`
|
||||
|
||||
const roomsJSON = `
|
||||
[
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Room 1",
|
||||
"sectionID": 1,
|
||||
"icon": "room_1",
|
||||
"sortOrder": 1
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "Room 2",
|
||||
"sectionID": 2,
|
||||
"icon": "room_2",
|
||||
"sortOrder": 2
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "Room 3",
|
||||
"sectionID": 3,
|
||||
"icon": "room_3",
|
||||
"sortOrder": 3
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"name": "Room 4",
|
||||
"sectionID": 3,
|
||||
"icon": "room_4",
|
||||
"sortOrder": 4
|
||||
}
|
||||
]`
|
||||
|
||||
const devicesJSON = `
|
||||
[
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Device 1",
|
||||
"roomID": 1,
|
||||
"type": "com.fibaro.binarySwitch",
|
||||
"enabled": true,
|
||||
"properties": {
|
||||
"dead": "false",
|
||||
"value": "false"
|
||||
},
|
||||
"sortOrder": 1
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "Device 2",
|
||||
"roomID": 2,
|
||||
"type": "com.fibaro.binarySwitch",
|
||||
"enabled": true,
|
||||
"properties": {
|
||||
"dead": "false",
|
||||
"value": "true"
|
||||
},
|
||||
"sortOrder": 2
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "Device 3",
|
||||
"roomID": 3,
|
||||
"type": "com.fibaro.multilevelSwitch",
|
||||
"enabled": true,
|
||||
"properties": {
|
||||
"dead": "false",
|
||||
"value": "67"
|
||||
},
|
||||
"sortOrder": 3
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"name": "Device 4",
|
||||
"roomID": 4,
|
||||
"type": "com.fibaro.temperatureSensor",
|
||||
"enabled": true,
|
||||
"properties": {
|
||||
"dead": "false",
|
||||
"value": "22.80"
|
||||
},
|
||||
"sortOrder": 4
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"name": "Device 5",
|
||||
"roomID": 4,
|
||||
"type": "com.fibaro.FGRM222",
|
||||
"enabled": true,
|
||||
"properties": {
|
||||
"dead": "false",
|
||||
"value": "50",
|
||||
"value2": "75"
|
||||
},
|
||||
"sortOrder": 5
|
||||
}
|
||||
]`
|
||||
|
||||
// TestUnauthorized validates that 401 (wrong credentials) is managed properly
|
||||
func TestUnauthorized(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
a := Fibaro{
|
||||
URL: ts.URL,
|
||||
Username: "user",
|
||||
Password: "pass",
|
||||
client: &http.Client{},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(a.Gather)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// TestJSONSuccess validates that module works OK with valid JSON payloads
|
||||
func TestJSONSuccess(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
payload := ""
|
||||
switch r.URL.Path {
|
||||
case "/api/sections":
|
||||
payload = sectionsJSON
|
||||
case "/api/rooms":
|
||||
payload = roomsJSON
|
||||
case "/api/devices":
|
||||
payload = devicesJSON
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, payload)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
a := Fibaro{
|
||||
URL: ts.URL,
|
||||
Username: "user",
|
||||
Password: "pass",
|
||||
client: &http.Client{},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(a.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Gather should add 5 metrics
|
||||
assert.Equal(t, uint64(5), acc.NMetrics())
|
||||
|
||||
// Ensure fields / values are correct - Device 1
|
||||
tags := map[string]string{"section": "Section 1", "room": "Room 1", "name": "Device 1", "type": "com.fibaro.binarySwitch"}
|
||||
fields := map[string]interface{}{"value": float64(0)}
|
||||
acc.AssertContainsTaggedFields(t, "fibaro", fields, tags)
|
||||
|
||||
// Ensure fields / values are correct - Device 2
|
||||
tags = map[string]string{"section": "Section 2", "room": "Room 2", "name": "Device 2", "type": "com.fibaro.binarySwitch"}
|
||||
fields = map[string]interface{}{"value": float64(1)}
|
||||
acc.AssertContainsTaggedFields(t, "fibaro", fields, tags)
|
||||
|
||||
// Ensure fields / values are correct - Device 3
|
||||
tags = map[string]string{"section": "Section 3", "room": "Room 3", "name": "Device 3", "type": "com.fibaro.multilevelSwitch"}
|
||||
fields = map[string]interface{}{"value": float64(67)}
|
||||
acc.AssertContainsTaggedFields(t, "fibaro", fields, tags)
|
||||
|
||||
// Ensure fields / values are correct - Device 4
|
||||
tags = map[string]string{"section": "Section 3", "room": "Room 4", "name": "Device 4", "type": "com.fibaro.temperatureSensor"}
|
||||
fields = map[string]interface{}{"value": float64(22.8)}
|
||||
acc.AssertContainsTaggedFields(t, "fibaro", fields, tags)
|
||||
|
||||
// Ensure fields / values are correct - Device 5
|
||||
tags = map[string]string{"section": "Section 3", "room": "Room 4", "name": "Device 5", "type": "com.fibaro.FGRM222"}
|
||||
fields = map[string]interface{}{"value": float64(50), "value2": float64(75)}
|
||||
acc.AssertContainsTaggedFields(t, "fibaro", fields, tags)
|
||||
}
|
||||
@@ -33,6 +33,9 @@ InfluxDB-formatted endpoints. See below for more information.
|
||||
|
||||
### Measurements & Fields
|
||||
|
||||
**Note:** The measurements and fields are dynamically built from the InfluxDB source,
|
||||
and may vary between versions.
|
||||
|
||||
- influxdb
|
||||
- n_shards
|
||||
- influxdb_database
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The [Jolokia](http://jolokia.org) _agent_ and _proxy_ input plugins collect JMX metrics from an HTTP endpoint using Jolokia's [JSON-over-HTTP protocol](https://jolokia.org/reference/html/protocol.html).
|
||||
|
||||
## Jolokia Agent Configuration
|
||||
### Configuration:
|
||||
|
||||
#### Jolokia Agent Configuration
|
||||
|
||||
The `jolokia2_agent` input plugin reads JMX metrics from one or more [Jolokia agent](https://jolokia.org/agent/jvm.html) REST endpoints.
|
||||
|
||||
@@ -32,7 +34,7 @@ Optionally, specify SSL options for communicating with agents:
|
||||
paths = ["Uptime"]
|
||||
```
|
||||
|
||||
## Jolokia Proxy Configuration
|
||||
#### Jolokia Proxy Configuration
|
||||
|
||||
The `jolokia2_proxy` input plugin reads JMX metrics from one or more _targets_ by interacting with a [Jolokia proxy](https://jolokia.org/features/proxy.html) REST endpoint.
|
||||
|
||||
@@ -77,7 +79,7 @@ Optionally, specify SSL options for communicating with proxies:
|
||||
paths = ["Uptime"]
|
||||
```
|
||||
|
||||
## Jolokia Metric Configuration
|
||||
#### Jolokia Metric Configuration
|
||||
|
||||
Each `metric` declaration generates a Jolokia request to fetch telemetry from a JMX MBean.
|
||||
|
||||
@@ -167,3 +169,11 @@ Both `jolokia2_agent` and `jolokia2_proxy` plugins support default configuration
|
||||
| `default_field_separator` | `.` | A character to use to join Mbean attributes when creating fields. |
|
||||
| `default_field_prefix` | _None_ | A string to prepend to the field names produced by all `metric` declarations. |
|
||||
| `default_tag_prefix` | _None_ | A string to prepend to the tag names produced by all `metric` declarations. |
|
||||
|
||||
### Example Configurations:
|
||||
|
||||
- [Java JVM](/plugins/inputs/jolokia2/examples/java.conf)
|
||||
- [Kafka](/plugins/inputs/jolokia2/examples/kafka.conf)
|
||||
- [Cassandra](/plugins/inputs/jolokia2/examples/cassandra.conf)
|
||||
|
||||
Please help improve this list and contribute new configuration files by opening an issue or pull request.
|
||||
|
||||
95
plugins/inputs/jolokia2/examples/cassandra.conf
Normal file
95
plugins/inputs/jolokia2/examples/cassandra.conf
Normal file
@@ -0,0 +1,95 @@
|
||||
[[inputs.jolokia2_agent]]
|
||||
urls = ["http://localhost:8778/jolokia"]
|
||||
name_prefix = "java_"
|
||||
|
||||
[[inputs.jolokia2_agent.metrics]]
|
||||
name = "Memory"
|
||||
mbean = "java.lang:type=Memory"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "GarbageCollector"
|
||||
mbean = "java.lang:name=*,type=GarbageCollector"
|
||||
tag_keys = ["name"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent]]
|
||||
urls = ["http://localhost:8778/jolokia"]
|
||||
name_prefix = "cassandra_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "Cache"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,scope=*,type=Cache"
|
||||
tag_keys = ["name", "scope"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "Client"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,type=Client"
|
||||
tag_keys = ["name"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "ClientRequestMetrics"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,type=ClientRequestMetrics"
|
||||
tag_keys = ["name"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "ClientRequest"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,scope=*,type=ClientRequest"
|
||||
tag_keys = ["name", "scope"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "ColumnFamily"
|
||||
mbean = "org.apache.cassandra.metrics:keyspace=*,name=*,scope=*,type=ColumnFamily"
|
||||
tag_keys = ["keyspace", "name", "scope"]
|
||||
field_prefix = "$2_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "CommitLog"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,type=CommitLog"
|
||||
tag_keys = ["name"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "Compaction"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,type=Compaction"
|
||||
tag_keys = ["name"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "CQL"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,type=CQL"
|
||||
tag_keys = ["name"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "DroppedMessage"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,scope=*,type=DroppedMessage"
|
||||
tag_keys = ["name", "scope"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "FileCache"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,type=FileCache"
|
||||
tag_keys = ["name"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "ReadRepair"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,type=ReadRepair"
|
||||
tag_keys = ["name"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "Storage"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,type=Storage"
|
||||
tag_keys = ["name"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "ThreadPools"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,path=*,scope=*,type=ThreadPools"
|
||||
tag_keys = ["name", "path", "scope"]
|
||||
field_prefix = "$1_"
|
||||
@@ -3,9 +3,9 @@ package jolokia2
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
)
|
||||
|
||||
type JolokiaAgent struct {
|
||||
@@ -16,7 +16,7 @@ type JolokiaAgent struct {
|
||||
URLs []string `toml:"urls"`
|
||||
Username string
|
||||
Password string
|
||||
ResponseTimeout internal.Duration `toml:"response_timeout"`
|
||||
ResponseTimeout time.Duration `toml:"response_timeout"`
|
||||
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
@@ -25,7 +25,6 @@ type JolokiaAgent struct {
|
||||
|
||||
Metrics []MetricConfig `toml:"metric"`
|
||||
gatherer *Gatherer
|
||||
clients []*Client
|
||||
}
|
||||
|
||||
func (ja *JolokiaAgent) SampleConfig() string {
|
||||
@@ -63,27 +62,20 @@ func (ja *JolokiaAgent) Gather(acc telegraf.Accumulator) error {
|
||||
ja.gatherer = NewGatherer(ja.createMetrics())
|
||||
}
|
||||
|
||||
// Initialize clients once
|
||||
if ja.clients == nil {
|
||||
ja.clients = make([]*Client, 0, len(ja.URLs))
|
||||
for _, url := range ja.URLs {
|
||||
client, err := ja.createClient(url)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("Unable to create client for %s: %v", url, err))
|
||||
continue
|
||||
}
|
||||
ja.clients = append(ja.clients, client)
|
||||
}
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, client := range ja.clients {
|
||||
for _, url := range ja.URLs {
|
||||
client, err := ja.createClient(url)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("Unable to create client for %s: %v", url, err))
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(client *Client) {
|
||||
defer wg.Done()
|
||||
|
||||
err := ja.gatherer.Gather(client, acc)
|
||||
err = ja.gatherer.Gather(client, acc)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("Unable to gather metrics for %s: %v", client.URL, err))
|
||||
}
|
||||
@@ -111,7 +103,7 @@ func (ja *JolokiaAgent) createClient(url string) (*Client, error) {
|
||||
return NewClient(url, &ClientConfig{
|
||||
Username: ja.Username,
|
||||
Password: ja.Password,
|
||||
ResponseTimeout: ja.ResponseTimeout.Duration,
|
||||
ResponseTimeout: ja.ResponseTimeout,
|
||||
SSLCA: ja.SSLCA,
|
||||
SSLCert: ja.SSLCert,
|
||||
SSLKey: ja.SSLKey,
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
package jolokia2
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
)
|
||||
|
||||
type JolokiaProxy struct {
|
||||
@@ -21,7 +22,7 @@ type JolokiaProxy struct {
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
InsecureSkipVerify bool
|
||||
ResponseTimeout internal.Duration `toml:"response_timeout"`
|
||||
ResponseTimeout time.Duration `toml:"response_timeout"`
|
||||
|
||||
Metrics []MetricConfig `toml:"metric"`
|
||||
client *Client
|
||||
@@ -55,13 +56,13 @@ func (jp *JolokiaProxy) SampleConfig() string {
|
||||
## Add proxy targets to query
|
||||
# default_target_username = ""
|
||||
# default_target_password = ""
|
||||
[[inputs.jolokia_proxy.target]]
|
||||
[[inputs.jolokia2_proxy.target]]
|
||||
url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi"
|
||||
# username = ""
|
||||
# password = ""
|
||||
# username = ""
|
||||
# password = ""
|
||||
|
||||
## Add metrics to read
|
||||
[[inputs.jolokia_proxy.metric]]
|
||||
[[inputs.jolokia2_proxy.metric]]
|
||||
name = "java_runtime"
|
||||
mbean = "java.lang:type=Runtime"
|
||||
paths = ["Uptime"]
|
||||
@@ -118,7 +119,7 @@ func (jp *JolokiaProxy) createClient() (*Client, error) {
|
||||
return NewClient(jp.URL, &ClientConfig{
|
||||
Username: jp.Username,
|
||||
Password: jp.Password,
|
||||
ResponseTimeout: jp.ResponseTimeout.Duration,
|
||||
ResponseTimeout: jp.ResponseTimeout,
|
||||
SSLCA: jp.SSLCA,
|
||||
SSLCert: jp.SSLCert,
|
||||
SSLKey: jp.SSLKey,
|
||||
|
||||
251
plugins/inputs/leofs/README.md
Normal file
251
plugins/inputs/leofs/README.md
Normal file
@@ -0,0 +1,251 @@
|
||||
# LeoFS Input Plugin
|
||||
|
||||
The LeoFS plugin gathers metrics of LeoGateway, LeoManager, and LeoStorage using SNMP. See [LeoFS Documentation / System Administration / System Monitoring](https://leo-project.net/leofs/docs/admin/system_admin/monitoring/).
|
||||
|
||||
## Configuration:
|
||||
|
||||
```toml
|
||||
# Sample Config:
|
||||
|
||||
[[inputs.leofs]]
|
||||
servers = ["127.0.0.1:4010"]
|
||||
```
|
||||
|
||||
## Measurements & Fields:
|
||||
### Statistics specific to the internals of LeoManager
|
||||
#### Erlang VM
|
||||
|
||||
- 1 min Statistics
|
||||
- num_of_processes
|
||||
- total_memory_usage
|
||||
- system_memory_usage
|
||||
- processes_memory_usage
|
||||
- ets_memory_usage
|
||||
- used_allocated_memory
|
||||
- allocated_memory
|
||||
- 5 min Statistics
|
||||
- num_of_processes_5min
|
||||
- total_memory_usage_5min
|
||||
- system_memory_usage_5min
|
||||
- processes_memory_usage_5min
|
||||
- ets_memory_usage_5min
|
||||
- used_allocated_memory_5min
|
||||
- allocated_memory_5min
|
||||
|
||||
### Statistics specific to the internals of LeoStorage
|
||||
#### Erlang VM
|
||||
|
||||
- 1 min Statistics
|
||||
- num_of_processes
|
||||
- total_memory_usage
|
||||
- system_memory_usage
|
||||
- processes_memory_usage
|
||||
- ets_memory_usage
|
||||
- used_allocated_memory
|
||||
- allocated_memory
|
||||
- 5 min Statistics
|
||||
- num_of_processes_5min
|
||||
- total_memory_usage_5min
|
||||
- system_memory_usage_5min
|
||||
- processes_memory_usage_5min
|
||||
- ets_memory_usage_5min
|
||||
- used_allocated_memory_5min
|
||||
- allocated_memory_5min
|
||||
|
||||
#### Total Number of Requests
|
||||
|
||||
- 1 min Statistics
|
||||
- num_of_writes
|
||||
- num_of_reads
|
||||
- num_of_deletes
|
||||
- 5 min Statistics
|
||||
- num_of_writes_5min
|
||||
- num_of_reads_5min
|
||||
- num_of_deletes_5min
|
||||
|
||||
#### Total Number of Objects and Total Size of Objects
|
||||
|
||||
- num_of_active_objects
|
||||
- total_objects
|
||||
- total_size_of_active_objects
|
||||
- total_size
|
||||
|
||||
#### Total Number of MQ Messages
|
||||
|
||||
- num_of_replication_messages,
|
||||
- num_of_sync-vnode_messages,
|
||||
- num_of_rebalance_messages,
|
||||
- mq_num_of_msg_recovery_node
|
||||
- mq_num_of_msg_deletion_dir
|
||||
- mq_num_of_msg_async_deletion_dir
|
||||
- mq_num_of_msg_req_deletion_dir
|
||||
- mq_mdcr_num_of_msg_req_comp_metadata
|
||||
- mq_mdcr_num_of_msg_req_sync_obj
|
||||
|
||||
Note: The following items are available since LeoFS v1.4.0:
|
||||
|
||||
- mq_num_of_msg_recovery_node
|
||||
- mq_num_of_msg_deletion_dir
|
||||
- mq_num_of_msg_async_deletion_dir
|
||||
- mq_num_of_msg_req_deletion_dir
|
||||
- mq_mdcr_num_of_msg_req_comp_metadata
|
||||
- mq_mdcr_num_of_msg_req_sync_obj
|
||||
|
||||
#### Data Compaction
|
||||
|
||||
- comp_state
|
||||
- comp_last_start_datetime
|
||||
- comp_last_end_datetime
|
||||
- comp_num_of_pending_targets
|
||||
- comp_num_of_ongoing_targets
|
||||
- comp_num_of_out_of_targets
|
||||
|
||||
Note: The all items are available since LeoFS v1.4.0.
|
||||
|
||||
### Statistics specific to the internals of LeoGateway
|
||||
#### Erlang VM
|
||||
|
||||
- 1 min Statistics
|
||||
- num_of_processes
|
||||
- total_memory_usage
|
||||
- system_memory_usage
|
||||
- processes_memory_usage
|
||||
- ets_memory_usage
|
||||
- used_allocated_memory
|
||||
- allocated_memory
|
||||
- 5 min Statistics
|
||||
- num_of_processes_5min
|
||||
- total_memory_usage_5min
|
||||
- system_memory_usage_5min
|
||||
- processes_memory_usage_5min
|
||||
- ets_memory_usage_5min
|
||||
- used_allocated_memory_5min
|
||||
- allocated_memory_5min
|
||||
|
||||
#### Total Number of Requests
|
||||
|
||||
- 1 min Statistics
|
||||
- num_of_writes
|
||||
- num_of_reads
|
||||
- num_of_deletes
|
||||
- 5 min Statistics
|
||||
- num_of_writes_5min
|
||||
- num_of_reads_5min
|
||||
- num_of_deletes_5min
|
||||
|
||||
#### Object Cache
|
||||
|
||||
- count_of_cache-hit
|
||||
- count_of_cache-miss
|
||||
- total_of_files
|
||||
- total_cached_size
|
||||
|
||||
|
||||
### Tags:
|
||||
|
||||
All measurements have the following tags:
|
||||
|
||||
- node
|
||||
|
||||
|
||||
### Example output:
|
||||
|
||||
#### LeoManager
|
||||
|
||||
```bash
|
||||
$ ./telegraf --config ./plugins/inputs/leofs/leo_manager.conf --input-filter leofs --test
|
||||
> leofs, host=manager_0, node=manager_0@127.0.0.1
|
||||
allocated_memory=78255445,
|
||||
allocated_memory_5min=78159025,
|
||||
ets_memory_usage=4611900,
|
||||
ets_memory_usage_5min=4632599,
|
||||
num_of_processes=223,
|
||||
num_of_processes_5min=223,
|
||||
processes_memory_usage=20201316,
|
||||
processes_memory_usage_5min=20186559,
|
||||
system_memory_usage=37172701,
|
||||
system_memory_usage_5min=37189213,
|
||||
total_memory_usage=57373373,
|
||||
total_memory_usage_5min=57374653,
|
||||
used_allocated_memory=67,
|
||||
used_allocated_memory_5min=67
|
||||
1524105758000000000
|
||||
```
|
||||
|
||||
#### LeoStorage
|
||||
|
||||
```bash
|
||||
$ ./telegraf --config ./plugins/inputs/leofs/leo_storage.conf --input-filter leofs --test
|
||||
> leofs,host=storage_0,node=storage_0@127.0.0.1
|
||||
allocated_memory=63504384,
|
||||
allocated_memory_5min=0,
|
||||
comp_last_end_datetime=0,
|
||||
comp_last_start_datetime=0,
|
||||
comp_num_of_ongoing_targets=0,
|
||||
comp_num_of_out_of_targets=0,
|
||||
comp_num_of_pending_targets=8,
|
||||
comp_state=0,
|
||||
ets_memory_usage=3877824,
|
||||
ets_memory_usage_5min=0,
|
||||
mq_mdcr_num_of_msg_req_comp_metadata=0,
|
||||
mq_mdcr_num_of_msg_req_sync_obj=0,
|
||||
mq_num_of_msg_async_deletion_dir=0,
|
||||
mq_num_of_msg_deletion_dir=0,
|
||||
mq_num_of_msg_recovery_node=0,
|
||||
mq_num_of_msg_req_deletion_dir=0,
|
||||
num_of_active_objects=70,
|
||||
num_of_deletes=0,
|
||||
num_of_deletes_5min=0,
|
||||
num_of_processes=577,
|
||||
num_of_processes_5min=0,
|
||||
num_of_reads=1,
|
||||
num_of_reads_5min=0,
|
||||
num_of_rebalance_messages=0,
|
||||
num_of_replication_messages=0,
|
||||
num_of_sync-vnode_messages=0,
|
||||
num_of_writes=70,
|
||||
num_of_writes_5min=0,
|
||||
processes_memory_usage=20029464,
|
||||
processes_memory_usage_5min=0,
|
||||
system_memory_usage=25900472,
|
||||
system_memory_usage_5min=0,
|
||||
total_memory_usage=45920987,
|
||||
total_memory_usage_5min=0,
|
||||
total_objects=70,
|
||||
total_size=2,
|
||||
total_size_of_active_objects=2,
|
||||
used_allocated_memory=69,
|
||||
used_allocated_memory_5min=0
|
||||
1524529826000000000
|
||||
```
|
||||
|
||||
#### LeoGateway
|
||||
|
||||
```
|
||||
$ ./telegraf --config ./plugins/inputs/leofs/leo_gateway.conf --input-filter leofs --test
|
||||
> leofs, host=gateway_0, node=gateway_0@127.0.0.1
|
||||
allocated_memory=87941120,
|
||||
allocated_memory_5min=88067672,
|
||||
count_of_cache-hit=0,
|
||||
count_of_cache-miss=0,
|
||||
ets_memory_usage=4843497,
|
||||
ets_memory_usage_5min=4841574,
|
||||
num_of_deletes=0,
|
||||
num_of_deletes_5min=0,
|
||||
num_of_processes=555,
|
||||
num_of_processes_5min=555,
|
||||
num_of_reads=0,
|
||||
num_of_reads_5min=0,
|
||||
num_of_writes=0,
|
||||
num_of_writes_5min=0,
|
||||
processes_memory_usage=17388052,
|
||||
processes_memory_usage_5min=17413928,
|
||||
system_memory_usage=49531263,
|
||||
system_memory_usage_5min=49577819,
|
||||
total_cached_size=0,
|
||||
total_memory_usage=66917393,
|
||||
total_memory_usage_5min=66989469,
|
||||
total_of_files=0,
|
||||
used_allocated_memory=69,
|
||||
used_allocated_memory_5min=69 1524105894000000000
|
||||
```
|
||||
@@ -93,6 +93,19 @@ var KeyMapping = map[ServerType][]string{
|
||||
"allocated_memory",
|
||||
"used_allocated_memory_5min",
|
||||
"allocated_memory_5min",
|
||||
// following items are since LeoFS v1.4.0
|
||||
"mq_num_of_msg_recovery_node",
|
||||
"mq_num_of_msg_deletion_dir",
|
||||
"mq_num_of_msg_async_deletion_dir",
|
||||
"mq_num_of_msg_req_deletion_dir",
|
||||
"mq_mdcr_num_of_msg_req_comp_metadata",
|
||||
"mq_mdcr_num_of_msg_req_sync_obj",
|
||||
"comp_state",
|
||||
"comp_last_start_datetime",
|
||||
"comp_last_end_datetime",
|
||||
"comp_num_of_pending_targets",
|
||||
"comp_num_of_ongoing_targets",
|
||||
"comp_num_of_out_of_targets",
|
||||
},
|
||||
ServerTypeGateway: {
|
||||
"num_of_processes",
|
||||
|
||||
@@ -42,34 +42,46 @@ package main
|
||||
|
||||
import "fmt"
|
||||
|
||||
const output = ` + "`" + `.1.3.6.1.4.1.35450.34.1.0 = STRING: "storage_0@127.0.0.1"
|
||||
.1.3.6.1.4.1.35450.34.2.0 = Gauge32: 512
|
||||
.1.3.6.1.4.1.35450.34.3.0 = Gauge32: 38126307
|
||||
.1.3.6.1.4.1.35450.34.4.0 = Gauge32: 22308716
|
||||
.1.3.6.1.4.1.35450.34.5.0 = Gauge32: 15816448
|
||||
.1.3.6.1.4.1.35450.34.6.0 = Gauge32: 5232008
|
||||
.1.3.6.1.4.1.35450.34.7.0 = Gauge32: 512
|
||||
.1.3.6.1.4.1.35450.34.8.0 = Gauge32: 38113176
|
||||
.1.3.6.1.4.1.35450.34.9.0 = Gauge32: 22313398
|
||||
.1.3.6.1.4.1.35450.34.10.0 = Gauge32: 15798779
|
||||
.1.3.6.1.4.1.35450.34.11.0 = Gauge32: 5237315
|
||||
.1.3.6.1.4.1.35450.34.12.0 = Gauge32: 191
|
||||
.1.3.6.1.4.1.35450.34.13.0 = Gauge32: 824
|
||||
.1.3.6.1.4.1.35450.34.14.0 = Gauge32: 0
|
||||
.1.3.6.1.4.1.35450.34.15.0 = Gauge32: 50105
|
||||
.1.3.6.1.4.1.35450.34.16.0 = Gauge32: 196654
|
||||
.1.3.6.1.4.1.35450.34.17.0 = Gauge32: 0
|
||||
.1.3.6.1.4.1.35450.34.18.0 = Gauge32: 2052
|
||||
.1.3.6.1.4.1.35450.34.19.0 = Gauge32: 50296
|
||||
.1.3.6.1.4.1.35450.34.20.0 = Gauge32: 35
|
||||
.1.3.6.1.4.1.35450.34.21.0 = Gauge32: 898
|
||||
.1.3.6.1.4.1.35450.34.22.0 = Gauge32: 0
|
||||
.1.3.6.1.4.1.35450.34.23.0 = Gauge32: 0
|
||||
.1.3.6.1.4.1.35450.34.24.0 = Gauge32: 0
|
||||
.1.3.6.1.4.1.35450.34.31.0 = Gauge32: 51
|
||||
.1.3.6.1.4.1.35450.34.32.0 = Gauge32: 53219328
|
||||
.1.3.6.1.4.1.35450.34.33.0 = Gauge32: 51
|
||||
.1.3.6.1.4.1.35450.34.34.0 = Gauge32: 53351083` + "`" +
|
||||
const output = ` + "`" + `.1.3.6.1.4.1.35450.56.1.0 = STRING: "storage_0@127.0.0.1"
|
||||
.1.3.6.1.4.1.35450.56.2.0 = Gauge32: 512
|
||||
.1.3.6.1.4.1.35450.56.3.0 = Gauge32: 38126307
|
||||
.1.3.6.1.4.1.35450.56.4.0 = Gauge32: 22308716
|
||||
.1.3.6.1.4.1.35450.56.5.0 = Gauge32: 15816448
|
||||
.1.3.6.1.4.1.35450.56.6.0 = Gauge32: 5232008
|
||||
.1.3.6.1.4.1.35450.56.7.0 = Gauge32: 512
|
||||
.1.3.6.1.4.1.35450.56.8.0 = Gauge32: 38113176
|
||||
.1.3.6.1.4.1.35450.56.9.0 = Gauge32: 22313398
|
||||
.1.3.6.1.4.1.35450.56.10.0 = Gauge32: 15798779
|
||||
.1.3.6.1.4.1.35450.56.11.0 = Gauge32: 5237315
|
||||
.1.3.6.1.4.1.35450.56.12.0 = Gauge32: 191
|
||||
.1.3.6.1.4.1.35450.56.13.0 = Gauge32: 824
|
||||
.1.3.6.1.4.1.35450.56.14.0 = Gauge32: 0
|
||||
.1.3.6.1.4.1.35450.56.15.0 = Gauge32: 50105
|
||||
.1.3.6.1.4.1.35450.56.16.0 = Gauge32: 196654
|
||||
.1.3.6.1.4.1.35450.56.17.0 = Gauge32: 0
|
||||
.1.3.6.1.4.1.35450.56.18.0 = Gauge32: 2052
|
||||
.1.3.6.1.4.1.35450.56.19.0 = Gauge32: 50296
|
||||
.1.3.6.1.4.1.35450.56.20.0 = Gauge32: 35
|
||||
.1.3.6.1.4.1.35450.56.21.0 = Gauge32: 898
|
||||
.1.3.6.1.4.1.35450.56.22.0 = Gauge32: 0
|
||||
.1.3.6.1.4.1.35450.56.23.0 = Gauge32: 0
|
||||
.1.3.6.1.4.1.35450.56.24.0 = Gauge32: 0
|
||||
.1.3.6.1.4.1.35450.56.31.0 = Gauge32: 51
|
||||
.1.3.6.1.4.1.35450.56.32.0 = Gauge32: 53219328
|
||||
.1.3.6.1.4.1.35450.56.33.0 = Gauge32: 51
|
||||
.1.3.6.1.4.1.35450.56.34.0 = Gauge32: 53351083
|
||||
.1.3.6.1.4.1.35450.56.41.0 = Gauge32: 101
|
||||
.1.3.6.1.4.1.35450.56.42.0 = Gauge32: 216
|
||||
.1.3.6.1.4.1.35450.56.43.0 = Gauge32: 313
|
||||
.1.3.6.1.4.1.35450.56.44.0 = Gauge32: 421
|
||||
.1.3.6.1.4.1.35450.56.45.0 = Gauge32: 597
|
||||
.1.3.6.1.4.1.35450.56.46.0 = Gauge32: 628
|
||||
.1.3.6.1.4.1.35450.56.51.0 = Gauge32: 1
|
||||
.1.3.6.1.4.1.35450.56.52.0 = Gauge32: 1522154118
|
||||
.1.3.6.1.4.1.35450.56.53.0 = Gauge32: 1522196496
|
||||
.1.3.6.1.4.1.35450.56.54.0 = Gauge32: 1
|
||||
.1.3.6.1.4.1.35450.56.55.0 = Gauge32: 7
|
||||
.1.3.6.1.4.1.35450.56.56.0 = Gauge32: 0` + "`" +
|
||||
`
|
||||
func main() {
|
||||
fmt.Println(output)
|
||||
|
||||
@@ -80,6 +80,8 @@ Timestamp modifiers can be used to convert captures to the timestamp of the
|
||||
parsed metric. If no timestamp is parsed the metric will be created using the
|
||||
current time.
|
||||
|
||||
You must capture at least one field per line.
|
||||
|
||||
- Available modifiers:
|
||||
- string (default if nothing is specified)
|
||||
- int
|
||||
@@ -108,10 +110,11 @@ CUSTOM time layouts must be within quotes and be the representation of the
|
||||
"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`
|
||||
See https://golang.org/pkg/time/#Parse for more details.
|
||||
|
||||
Telegraf has many of its own
|
||||
[built-in patterns](./grok/patterns/influx-patterns),
|
||||
as well as supporting
|
||||
Telegraf has many of its own [built-in patterns](./grok/patterns/influx-patterns),
|
||||
as well as support for most of
|
||||
[logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns).
|
||||
_Golang regular expressions do not support lookahead or lookbehind.
|
||||
logstash patterns that depend on these are not supported._
|
||||
|
||||
If you need help building patterns to match your logs,
|
||||
you will find the https://grokdebug.herokuapp.com application quite useful!
|
||||
|
||||
@@ -70,6 +70,7 @@ const sampleConfig = `
|
||||
# watch_method = "inotify"
|
||||
|
||||
## Parse logstash-style "grok" patterns:
|
||||
## Telegraf built-in parsing patterns: https://goo.gl/dkay10
|
||||
[inputs.logparser.grok]
|
||||
## This is a list of patterns to check the given log file(s) for.
|
||||
## Note that adding patterns here increases processing time. The most
|
||||
|
||||
103
plugins/inputs/mcrouter/README.md
Normal file
103
plugins/inputs/mcrouter/README.md
Normal file
@@ -0,0 +1,103 @@
|
||||
# Mcrouter Input Plugin
|
||||
|
||||
This plugin gathers statistics data from a Mcrouter server.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Read metrics from one or many mcrouter servers.
|
||||
[[inputs.mcrouter]]
|
||||
## An array of address to gather stats about. Specify an ip or hostname
|
||||
## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc.
|
||||
servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"]
|
||||
|
||||
## Timeout for metric collections from all servers. Minimum timeout is "1s".
|
||||
# timeout = "5s"
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
The fields from this plugin are gathered in the *mcrouter* measurement.
|
||||
|
||||
Description of gathered fields can be found [here](https://github.com/facebook/mcrouter/wiki/Stats-list).
|
||||
|
||||
Fields:
|
||||
|
||||
* uptime
|
||||
* num_servers
|
||||
* num_servers_new
|
||||
* num_servers_up
|
||||
* num_servers_down
|
||||
* num_servers_closed
|
||||
* num_clients
|
||||
* num_suspect_servers
|
||||
* destination_batches_sum
|
||||
* destination_requests_sum
|
||||
* outstanding_route_get_reqs_queued
|
||||
* outstanding_route_update_reqs_queued
|
||||
* outstanding_route_get_avg_queue_size
|
||||
* outstanding_route_update_avg_queue_size
|
||||
* outstanding_route_get_avg_wait_time_sec
|
||||
* outstanding_route_update_avg_wait_time_sec
|
||||
* retrans_closed_connections
|
||||
* destination_pending_reqs
|
||||
* destination_inflight_reqs
|
||||
* destination_batch_size
|
||||
* asynclog_requests
|
||||
* proxy_reqs_processing
|
||||
* proxy_reqs_waiting
|
||||
* client_queue_notify_period
|
||||
* rusage_system
|
||||
* rusage_user
|
||||
* ps_num_minor_faults
|
||||
* ps_num_major_faults
|
||||
* ps_user_time_sec
|
||||
* ps_system_time_sec
|
||||
* ps_vsize
|
||||
* ps_rss
|
||||
* fibers_allocated
|
||||
* fibers_pool_size
|
||||
* fibers_stack_high_watermark
|
||||
* successful_client_connections
|
||||
* duration_us
|
||||
* destination_max_pending_reqs
|
||||
* destination_max_inflight_reqs
|
||||
* retrans_per_kbyte_max
|
||||
* cmd_get_count
|
||||
* cmd_delete_out
|
||||
* cmd_lease_get
|
||||
* cmd_set
|
||||
* cmd_get_out_all
|
||||
* cmd_get_out
|
||||
* cmd_lease_set_count
|
||||
* cmd_other_out_all
|
||||
* cmd_lease_get_out
|
||||
* cmd_set_count
|
||||
* cmd_lease_set_out
|
||||
* cmd_delete_count
|
||||
* cmd_other
|
||||
* cmd_delete
|
||||
* cmd_get
|
||||
* cmd_lease_set
|
||||
* cmd_set_out
|
||||
* cmd_lease_get_count
|
||||
* cmd_other_out
|
||||
* cmd_lease_get_out_all
|
||||
* cmd_set_out_all
|
||||
* cmd_other_count
|
||||
* cmd_delete_out_all
|
||||
* cmd_lease_set_out_all
|
||||
|
||||
### Tags:
|
||||
|
||||
* Mcrouter measurements have the following tags:
|
||||
- server (the host name from which metrics are gathered)
|
||||
|
||||
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ ./telegraf --config telegraf.conf --input-filter mcrouter --test
|
||||
mcrouter,server=localhost:11211 uptime=166,num_servers=1,num_servers_new=1,num_servers_up=0,num_servers_down=0,num_servers_closed=0,num_clients=1,num_suspect_servers=0,destination_batches_sum=0,destination_requests_sum=0,outstanding_route_get_reqs_queued=0,outstanding_route_update_reqs_queued=0,outstanding_route_get_avg_queue_size=0,outstanding_route_update_avg_queue_size=0,outstanding_route_get_avg_wait_time_sec=0,outstanding_route_update_avg_wait_time_sec=0,retrans_closed_connections=0,destination_pending_reqs=0,destination_inflight_reqs=0,destination_batch_size=0,asynclog_requests=0,proxy_reqs_processing=1,proxy_reqs_waiting=0,client_queue_notify_period=0,rusage_system=0.040966,rusage_user=0.020483,ps_num_minor_faults=2490,ps_num_major_faults=11,ps_user_time_sec=0.02,ps_system_time_sec=0.04,ps_vsize=697741312,ps_rss=10563584,fibers_allocated=0,fibers_pool_size=0,fibers_stack_high_watermark=0,successful_client_connections=18,duration_us=0,destination_max_pending_reqs=0,destination_max_inflight_reqs=0,retrans_per_kbyte_max=0,cmd_get_count=0,cmd_delete_out=0,cmd_lease_get=0,cmd_set=0,cmd_get_out_all=0,cmd_get_out=0,cmd_lease_set_count=0,cmd_other_out_all=0,cmd_lease_get_out=0,cmd_set_count=0,cmd_lease_set_out=0,cmd_delete_count=0,cmd_other=0,cmd_delete=0,cmd_get=0,cmd_lease_set=0,cmd_set_out=0,cmd_lease_get_count=0,cmd_other_out=0,cmd_lease_get_out_all=0,cmd_set_out_all=0,cmd_other_count=0,cmd_delete_out_all=0,cmd_lease_set_out_all=0 1453831884664956455
|
||||
```
|
||||
286
plugins/inputs/mcrouter/mcrouter.go
Normal file
286
plugins/inputs/mcrouter/mcrouter.go
Normal file
@@ -0,0 +1,286 @@
|
||||
package mcrouter
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
// Mcrouter is a mcrouter plugin
|
||||
type Mcrouter struct {
|
||||
Servers []string
|
||||
Timeout internal.Duration
|
||||
}
|
||||
|
||||
// enum for statType
|
||||
type statType int
|
||||
|
||||
const (
|
||||
typeInt statType = iota
|
||||
typeFloat statType = iota
|
||||
)
|
||||
|
||||
var sampleConfig = `
|
||||
## An array of address to gather stats about. Specify an ip or hostname
|
||||
## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc.
|
||||
servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"]
|
||||
|
||||
## Timeout for metric collections from all servers. Minimum timeout is "1s".
|
||||
# timeout = "5s"
|
||||
`
|
||||
|
||||
var defaultTimeout = 5 * time.Second
|
||||
|
||||
var defaultServerURL = url.URL{
|
||||
Scheme: "tcp",
|
||||
Host: "localhost:11211",
|
||||
}
|
||||
|
||||
// The list of metrics that should be sent
|
||||
var sendMetrics = map[string]statType{
|
||||
"uptime": typeInt,
|
||||
"num_servers": typeInt,
|
||||
"num_servers_new": typeInt,
|
||||
"num_servers_up": typeInt,
|
||||
"num_servers_down": typeInt,
|
||||
"num_servers_closed": typeInt,
|
||||
"num_clients": typeInt,
|
||||
"num_suspect_servers": typeInt,
|
||||
"destination_batches_sum": typeInt,
|
||||
"destination_requests_sum": typeInt,
|
||||
"outstanding_route_get_reqs_queued": typeInt,
|
||||
"outstanding_route_update_reqs_queued": typeInt,
|
||||
"outstanding_route_get_avg_queue_size": typeInt,
|
||||
"outstanding_route_update_avg_queue_size": typeInt,
|
||||
"outstanding_route_get_avg_wait_time_sec": typeInt,
|
||||
"outstanding_route_update_avg_wait_time_sec": typeInt,
|
||||
"retrans_closed_connections": typeInt,
|
||||
"destination_pending_reqs": typeInt,
|
||||
"destination_inflight_reqs": typeInt,
|
||||
"destination_batch_size": typeInt,
|
||||
"asynclog_requests": typeInt,
|
||||
"proxy_reqs_processing": typeInt,
|
||||
"proxy_reqs_waiting": typeInt,
|
||||
"client_queue_notify_period": typeInt,
|
||||
"rusage_system": typeFloat,
|
||||
"rusage_user": typeFloat,
|
||||
"ps_num_minor_faults": typeInt,
|
||||
"ps_num_major_faults": typeInt,
|
||||
"ps_user_time_sec": typeFloat,
|
||||
"ps_system_time_sec": typeFloat,
|
||||
"ps_vsize": typeInt,
|
||||
"ps_rss": typeInt,
|
||||
"fibers_allocated": typeInt,
|
||||
"fibers_pool_size": typeInt,
|
||||
"fibers_stack_high_watermark": typeInt,
|
||||
"successful_client_connections": typeInt,
|
||||
"duration_us": typeInt,
|
||||
"destination_max_pending_reqs": typeInt,
|
||||
"destination_max_inflight_reqs": typeInt,
|
||||
"retrans_per_kbyte_max": typeInt,
|
||||
"cmd_get_count": typeInt,
|
||||
"cmd_delete_out": typeInt,
|
||||
"cmd_lease_get": typeInt,
|
||||
"cmd_set": typeInt,
|
||||
"cmd_get_out_all": typeInt,
|
||||
"cmd_get_out": typeInt,
|
||||
"cmd_lease_set_count": typeInt,
|
||||
"cmd_other_out_all": typeInt,
|
||||
"cmd_lease_get_out": typeInt,
|
||||
"cmd_set_count": typeInt,
|
||||
"cmd_lease_set_out": typeInt,
|
||||
"cmd_delete_count": typeInt,
|
||||
"cmd_other": typeInt,
|
||||
"cmd_delete": typeInt,
|
||||
"cmd_get": typeInt,
|
||||
"cmd_lease_set": typeInt,
|
||||
"cmd_set_out": typeInt,
|
||||
"cmd_lease_get_count": typeInt,
|
||||
"cmd_other_out": typeInt,
|
||||
"cmd_lease_get_out_all": typeInt,
|
||||
"cmd_set_out_all": typeInt,
|
||||
"cmd_other_count": typeInt,
|
||||
"cmd_delete_out_all": typeInt,
|
||||
"cmd_lease_set_out_all": typeInt,
|
||||
}
|
||||
|
||||
// SampleConfig returns sample configuration message
|
||||
func (m *Mcrouter) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
// Description returns description of Mcrouter plugin
|
||||
func (m *Mcrouter) Description() string {
|
||||
return "Read metrics from one or many mcrouter servers"
|
||||
}
|
||||
|
||||
// Gather reads stats from all configured servers accumulates stats
|
||||
func (m *Mcrouter) Gather(acc telegraf.Accumulator) error {
|
||||
ctx := context.Background()
|
||||
|
||||
if m.Timeout.Duration < 1*time.Second {
|
||||
m.Timeout.Duration = defaultTimeout
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, m.Timeout.Duration)
|
||||
defer cancel()
|
||||
|
||||
if len(m.Servers) == 0 {
|
||||
m.Servers = []string{defaultServerURL.String()}
|
||||
}
|
||||
|
||||
for _, serverAddress := range m.Servers {
|
||||
acc.AddError(m.gatherServer(ctx, serverAddress, acc))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseAddress parses an address string into 'host:port' and 'protocol' parts
|
||||
func (m *Mcrouter) ParseAddress(address string) (string, string, error) {
|
||||
var protocol string
|
||||
var host string
|
||||
var port string
|
||||
|
||||
u, parseError := url.Parse(address)
|
||||
|
||||
if parseError != nil {
|
||||
return "", "", fmt.Errorf("Invalid server address")
|
||||
}
|
||||
|
||||
if u.Scheme != "tcp" && u.Scheme != "unix" {
|
||||
return "", "", fmt.Errorf("Invalid server protocol")
|
||||
}
|
||||
|
||||
protocol = u.Scheme
|
||||
|
||||
if protocol == "unix" {
|
||||
if u.Path == "" {
|
||||
return "", "", fmt.Errorf("Invalid unix socket path")
|
||||
}
|
||||
|
||||
address = u.Path
|
||||
} else {
|
||||
if u.Host == "" {
|
||||
return "", "", fmt.Errorf("Invalid host")
|
||||
}
|
||||
|
||||
host = u.Hostname()
|
||||
port = u.Port()
|
||||
|
||||
if host == "" {
|
||||
host = defaultServerURL.Hostname()
|
||||
}
|
||||
|
||||
if port == "" {
|
||||
port = defaultServerURL.Port()
|
||||
}
|
||||
|
||||
address = host + ":" + port
|
||||
}
|
||||
|
||||
return address, protocol, nil
|
||||
}
|
||||
|
||||
func (m *Mcrouter) gatherServer(ctx context.Context, address string, acc telegraf.Accumulator) error {
|
||||
var conn net.Conn
|
||||
var err error
|
||||
var protocol string
|
||||
var dialer net.Dialer
|
||||
|
||||
address, protocol, err = m.ParseAddress(address)
|
||||
|
||||
conn, err = dialer.DialContext(ctx, protocol, address)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer conn.Close()
|
||||
|
||||
// Extend connection
|
||||
deadline, ok := ctx.Deadline()
|
||||
|
||||
if ok {
|
||||
conn.SetDeadline(deadline)
|
||||
}
|
||||
|
||||
// Read and write buffer
|
||||
reader := bufio.NewReader(conn)
|
||||
scanner := bufio.NewScanner(reader)
|
||||
|
||||
// Send command
|
||||
if _, err := fmt.Fprint(conn, "stats\r\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
values, err := parseResponse(scanner)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add server address as a tag
|
||||
tags := map[string]string{"server": address}
|
||||
|
||||
// Process values
|
||||
fields := make(map[string]interface{})
|
||||
for key, sType := range sendMetrics {
|
||||
if value, ok := values[key]; ok {
|
||||
switch sType {
|
||||
case typeInt:
|
||||
if v, errParse := strconv.ParseInt(value, 10, 64); errParse == nil {
|
||||
fields[key] = v
|
||||
}
|
||||
case typeFloat:
|
||||
if v, errParse := strconv.ParseFloat(value, 64); errParse == nil {
|
||||
fields[key] = v
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
acc.AddFields("mcrouter", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseResponse(r *bufio.Scanner) (map[string]string, error) {
|
||||
values := make(map[string]string)
|
||||
|
||||
for r.Scan() {
|
||||
// Read line
|
||||
line := r.Text()
|
||||
|
||||
// Done
|
||||
if line == "END" {
|
||||
break
|
||||
}
|
||||
|
||||
// Read values
|
||||
s := strings.SplitN(line, " ", 3)
|
||||
|
||||
if len(s) != 3 || s[0] != "STAT" {
|
||||
return nil, fmt.Errorf("unexpected line in stats response: %s", line)
|
||||
}
|
||||
|
||||
// Save values
|
||||
values[s[1]] = s[2]
|
||||
}
|
||||
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("mcrouter", func() telegraf.Input {
|
||||
return &Mcrouter{}
|
||||
})
|
||||
}
|
||||
250
plugins/inputs/mcrouter/mcrouter_test.go
Normal file
250
plugins/inputs/mcrouter/mcrouter_test.go
Normal file
@@ -0,0 +1,250 @@
|
||||
package mcrouter
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAddressParsing(t *testing.T) {
|
||||
m := &Mcrouter{
|
||||
Servers: []string{"tcp://" + testutil.GetLocalHost()},
|
||||
}
|
||||
|
||||
var acceptTests = [][3]string{
|
||||
{"tcp://localhost:8086", "localhost:8086", "tcp"},
|
||||
{"tcp://localhost", "localhost:" + defaultServerURL.Port(), "tcp"},
|
||||
{"tcp://localhost:", "localhost:" + defaultServerURL.Port(), "tcp"},
|
||||
{"tcp://:8086", defaultServerURL.Hostname() + ":8086", "tcp"},
|
||||
{"tcp://:", defaultServerURL.Host, "tcp"},
|
||||
}
|
||||
|
||||
var rejectTests = []string{
|
||||
"tcp://",
|
||||
}
|
||||
|
||||
for _, args := range acceptTests {
|
||||
address, protocol, err := m.ParseAddress(args[0])
|
||||
|
||||
assert.Nil(t, err, args[0])
|
||||
assert.True(t, address == args[1], args[0])
|
||||
assert.True(t, protocol == args[2], args[0])
|
||||
}
|
||||
|
||||
for _, addr := range rejectTests {
|
||||
address, protocol, err := m.ParseAddress(addr)
|
||||
|
||||
assert.NotNil(t, err, addr)
|
||||
assert.Empty(t, address, addr)
|
||||
assert.Empty(t, protocol, addr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMcrouterGeneratesMetrics(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
m := &Mcrouter{
|
||||
Servers: []string{"tcp://" + testutil.GetLocalHost()},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := acc.GatherError(m.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
intMetrics := []string{"uptime", "num_servers", "num_servers_new", "num_servers_up",
|
||||
"num_servers_down", "num_servers_closed", "num_clients",
|
||||
"num_suspect_servers", "destination_batches_sum", "destination_requests_sum",
|
||||
"outstanding_route_get_reqs_queued", "outstanding_route_update_reqs_queued",
|
||||
"outstanding_route_get_avg_queue_size", "outstanding_route_update_avg_queue_size",
|
||||
"outstanding_route_get_avg_wait_time_sec", "outstanding_route_update_avg_wait_time_sec",
|
||||
"retrans_closed_connections", "destination_pending_reqs", "destination_inflight_reqs",
|
||||
"destination_batch_size", "asynclog_requests", "proxy_reqs_processing",
|
||||
"proxy_reqs_waiting", "client_queue_notify_period",
|
||||
"ps_num_minor_faults", "ps_num_major_faults",
|
||||
"ps_vsize", "ps_rss", "fibers_allocated", "fibers_pool_size", "fibers_stack_high_watermark",
|
||||
"successful_client_connections", "duration_us", "destination_max_pending_reqs",
|
||||
"destination_max_inflight_reqs", "retrans_per_kbyte_max", "cmd_get_count", "cmd_delete_out",
|
||||
"cmd_lease_get", "cmd_set", "cmd_get_out_all", "cmd_get_out", "cmd_lease_set_count",
|
||||
"cmd_other_out_all", "cmd_lease_get_out", "cmd_set_count", "cmd_lease_set_out",
|
||||
"cmd_delete_count", "cmd_other", "cmd_delete", "cmd_get", "cmd_lease_set", "cmd_set_out",
|
||||
"cmd_lease_get_count", "cmd_other_out", "cmd_lease_get_out_all", "cmd_set_out_all",
|
||||
"cmd_other_count", "cmd_delete_out_all", "cmd_lease_set_out_all"}
|
||||
|
||||
floatMetrics := []string{"rusage_system", "rusage_user", "ps_user_time_sec", "ps_system_time_sec"}
|
||||
|
||||
for _, metric := range intMetrics {
|
||||
assert.True(t, acc.HasInt64Field("mcrouter", metric), metric)
|
||||
}
|
||||
|
||||
for _, metric := range floatMetrics {
|
||||
assert.True(t, acc.HasFloatField("mcrouter", metric), metric)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMcrouterParseMetrics(t *testing.T) {
|
||||
r := bufio.NewReader(strings.NewReader(mcrouterStats))
|
||||
scanner := bufio.NewScanner(r)
|
||||
values, err := parseResponse(scanner)
|
||||
require.NoError(t, err, "Error parsing mcrouter response")
|
||||
|
||||
tests := []struct {
|
||||
key string
|
||||
value string
|
||||
}{
|
||||
{"uptime", "166"},
|
||||
{"num_servers", "1"},
|
||||
{"num_servers_new", "1"},
|
||||
{"num_servers_up", "0"},
|
||||
{"num_servers_down", "0"},
|
||||
{"num_servers_closed", "0"},
|
||||
{"num_clients", "1"},
|
||||
{"num_suspect_servers", "0"},
|
||||
{"destination_batches_sum", "0"},
|
||||
{"destination_requests_sum", "0"},
|
||||
{"outstanding_route_get_reqs_queued", "0"},
|
||||
{"outstanding_route_update_reqs_queued", "0"},
|
||||
{"outstanding_route_get_avg_queue_size", "0"},
|
||||
{"outstanding_route_update_avg_queue_size", "0"},
|
||||
{"outstanding_route_get_avg_wait_time_sec", "0"},
|
||||
{"outstanding_route_update_avg_wait_time_sec", "0"},
|
||||
{"retrans_closed_connections", "0"},
|
||||
{"destination_pending_reqs", "0"},
|
||||
{"destination_inflight_reqs", "0"},
|
||||
{"destination_batch_size", "0"},
|
||||
{"asynclog_requests", "0"},
|
||||
{"proxy_reqs_processing", "1"},
|
||||
{"proxy_reqs_waiting", "0"},
|
||||
{"client_queue_notify_period", "0"},
|
||||
{"rusage_system", "0.040966"},
|
||||
{"rusage_user", "0.020483"},
|
||||
{"ps_num_minor_faults", "2490"},
|
||||
{"ps_num_major_faults", "11"},
|
||||
{"ps_user_time_sec", "0.02"},
|
||||
{"ps_system_time_sec", "0.04"},
|
||||
{"ps_vsize", "697741312"},
|
||||
{"ps_rss", "10563584"},
|
||||
{"fibers_allocated", "0"},
|
||||
{"fibers_pool_size", "0"},
|
||||
{"fibers_stack_high_watermark", "0"},
|
||||
{"successful_client_connections", "18"},
|
||||
{"duration_us", "0"},
|
||||
{"destination_max_pending_reqs", "0"},
|
||||
{"destination_max_inflight_reqs", "0"},
|
||||
{"retrans_per_kbyte_max", "0"},
|
||||
{"cmd_get_count", "0"},
|
||||
{"cmd_delete_out", "0"},
|
||||
{"cmd_lease_get", "0"},
|
||||
{"cmd_set", "0"},
|
||||
{"cmd_get_out_all", "0"},
|
||||
{"cmd_get_out", "0"},
|
||||
{"cmd_lease_set_count", "0"},
|
||||
{"cmd_other_out_all", "0"},
|
||||
{"cmd_lease_get_out", "0"},
|
||||
{"cmd_set_count", "0"},
|
||||
{"cmd_lease_set_out", "0"},
|
||||
{"cmd_delete_count", "0"},
|
||||
{"cmd_other", "0"},
|
||||
{"cmd_delete", "0"},
|
||||
{"cmd_get", "0"},
|
||||
{"cmd_lease_set", "0"},
|
||||
{"cmd_set_out", "0"},
|
||||
{"cmd_lease_get_count", "0"},
|
||||
{"cmd_other_out", "0"},
|
||||
{"cmd_lease_get_out_all", "0"},
|
||||
{"cmd_set_out_all", "0"},
|
||||
{"cmd_other_count", "0"},
|
||||
{"cmd_delete_out_all", "0"},
|
||||
{"cmd_lease_set_out_all", "0"},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
value, ok := values[test.key]
|
||||
if !ok {
|
||||
t.Errorf("Did not find key for metric %s in values", test.key)
|
||||
continue
|
||||
}
|
||||
if value != test.value {
|
||||
t.Errorf("Metric: %s, Expected: %s, actual: %s",
|
||||
test.key, test.value, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var mcrouterStats = `STAT version 36.0.0 mcrouter
|
||||
STAT commandargs --port 11211 --config-file /etc/mcrouter/mcrouter.json --async-dir /var/spool/mcrouter --log-path /var/log/mcrouter/mcrouter.log --stats-root /var/mcrouter/stats --server-timeout 100 --reset-inactive-connection-interval 10000 --proxy-threads auto
|
||||
STAT pid 21357
|
||||
STAT parent_pid 1
|
||||
STAT time 1524673265
|
||||
STAT uptime 166
|
||||
STAT num_servers 1
|
||||
STAT num_servers_new 1
|
||||
STAT num_servers_up 0
|
||||
STAT num_servers_down 0
|
||||
STAT num_servers_closed 0
|
||||
STAT num_clients 1
|
||||
STAT num_suspect_servers 0
|
||||
STAT destination_batches_sum 0
|
||||
STAT destination_requests_sum 0
|
||||
STAT outstanding_route_get_reqs_queued 0
|
||||
STAT outstanding_route_update_reqs_queued 0
|
||||
STAT outstanding_route_get_avg_queue_size 0
|
||||
STAT outstanding_route_update_avg_queue_size 0
|
||||
STAT outstanding_route_get_avg_wait_time_sec 0
|
||||
STAT outstanding_route_update_avg_wait_time_sec 0
|
||||
STAT retrans_closed_connections 0
|
||||
STAT destination_pending_reqs 0
|
||||
STAT destination_inflight_reqs 0
|
||||
STAT destination_batch_size 0
|
||||
STAT asynclog_requests 0
|
||||
STAT proxy_reqs_processing 1
|
||||
STAT proxy_reqs_waiting 0
|
||||
STAT client_queue_notify_period 0
|
||||
STAT rusage_system 0.040966
|
||||
STAT rusage_user 0.020483
|
||||
STAT ps_num_minor_faults 2490
|
||||
STAT ps_num_major_faults 11
|
||||
STAT ps_user_time_sec 0.02
|
||||
STAT ps_system_time_sec 0.04
|
||||
STAT ps_vsize 697741312
|
||||
STAT ps_rss 10563584
|
||||
STAT fibers_allocated 0
|
||||
STAT fibers_pool_size 0
|
||||
STAT fibers_stack_high_watermark 0
|
||||
STAT successful_client_connections 18
|
||||
STAT duration_us 0
|
||||
STAT destination_max_pending_reqs 0
|
||||
STAT destination_max_inflight_reqs 0
|
||||
STAT retrans_per_kbyte_max 0
|
||||
STAT cmd_get_count 0
|
||||
STAT cmd_delete_out 0
|
||||
STAT cmd_lease_get 0
|
||||
STAT cmd_set 0
|
||||
STAT cmd_get_out_all 0
|
||||
STAT cmd_get_out 0
|
||||
STAT cmd_lease_set_count 0
|
||||
STAT cmd_other_out_all 0
|
||||
STAT cmd_lease_get_out 0
|
||||
STAT cmd_set_count 0
|
||||
STAT cmd_lease_set_out 0
|
||||
STAT cmd_delete_count 0
|
||||
STAT cmd_other 0
|
||||
STAT cmd_delete 0
|
||||
STAT cmd_get 0
|
||||
STAT cmd_lease_set 0
|
||||
STAT cmd_set_out 0
|
||||
STAT cmd_lease_get_count 0
|
||||
STAT cmd_other_out 0
|
||||
STAT cmd_lease_get_out_all 0
|
||||
STAT cmd_set_out_all 0
|
||||
STAT cmd_other_count 0
|
||||
STAT cmd_delete_out_all 0
|
||||
STAT cmd_lease_set_out_all 0
|
||||
END
|
||||
`
|
||||
@@ -1,6 +1,6 @@
|
||||
# Telegraf plugin: MongoDB
|
||||
# MongoDB Input Plugin
|
||||
|
||||
#### Configuration
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.mongodb]]
|
||||
@@ -10,7 +10,9 @@
|
||||
## mongodb://user:auth_key@10.10.3.30:27017,
|
||||
## mongodb://10.10.3.33:18832,
|
||||
servers = ["mongodb://127.0.0.1:27017"]
|
||||
gather_perdb_stats = false
|
||||
|
||||
## When true, collect per database stats
|
||||
# gather_perdb_stats = false
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
@@ -19,53 +21,106 @@
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
```
|
||||
This connection uri may be different based on your environment and mongodb
|
||||
setup. If the user doesn't have the required privilege to execute serverStatus
|
||||
command the you will get this error on telegraf
|
||||
|
||||
#### Permissions:
|
||||
|
||||
If your MongoDB instance has access control enabled you will need to connect
|
||||
as a user with sufficient rights.
|
||||
|
||||
With MongoDB 3.4 and higher, the `clusterMonitor` role can be used. In
|
||||
version 3.2 you may also need these additional permissions:
|
||||
```
|
||||
> db.grantRolesToUser("user", [{role: "read", actions: "find", db: "local"}])
|
||||
```
|
||||
|
||||
If the user is missing required privileges you may see an error in the
|
||||
Telegraf logs similar to:
|
||||
```
|
||||
Error in input [mongodb]: not authorized on admin to execute command { serverStatus: 1, recordStats: 0 }
|
||||
```
|
||||
|
||||
#### Description
|
||||
### Metrics:
|
||||
|
||||
The telegraf plugin collects mongodb stats exposed by serverStatus and few more
|
||||
and create a single measurement containing values e.g.
|
||||
* active_reads
|
||||
* active_writes
|
||||
* commands_per_sec
|
||||
* deletes_per_sec
|
||||
* flushes_per_sec
|
||||
* getmores_per_sec
|
||||
* inserts_per_sec
|
||||
* net_in_bytes
|
||||
* net_out_bytes
|
||||
* open_connections
|
||||
* percent_cache_dirty
|
||||
* percent_cache_used
|
||||
* queries_per_sec
|
||||
* queued_reads
|
||||
* queued_writes
|
||||
* resident_megabytes
|
||||
* updates_per_sec
|
||||
* vsize_megabytes
|
||||
* total_in_use
|
||||
* total_available
|
||||
* total_created
|
||||
* total_refreshing
|
||||
* ttl_deletes_per_sec
|
||||
* ttl_passes_per_sec
|
||||
* repl_lag
|
||||
* jumbo_chunks (only if mongos or mongo config)
|
||||
- mongodb
|
||||
- tags:
|
||||
- hostname
|
||||
- fields:
|
||||
- active_reads (integer)
|
||||
- active_writes (integer)
|
||||
- commands_per_sec (integer)
|
||||
- deletes_per_sec (integer)
|
||||
- flushes_per_sec (integer)
|
||||
- getmores_per_sec (integer)
|
||||
- inserts_per_sec (integer)
|
||||
- jumbo_chunks (integer)
|
||||
- member_status (string)
|
||||
- net_in_bytes (integer)
|
||||
- net_out_bytes (integer)
|
||||
- open_connections (integer)
|
||||
- percent_cache_dirty (float)
|
||||
- percent_cache_used (float)
|
||||
- queries_per_sec (integer)
|
||||
- queued_reads (integer)
|
||||
- queued_writes (integer)
|
||||
- repl_commands_per_sec (integer)
|
||||
- repl_deletes_per_sec (integer)
|
||||
- repl_getmores_per_sec (integer)
|
||||
- repl_inserts_per_sec (integer)
|
||||
- repl_lag (integer)
|
||||
- repl_queries_per_sec (integer)
|
||||
- repl_updates_per_sec (integer)
|
||||
- repl_oplog_window_sec (integer)
|
||||
- resident_megabytes (integer)
|
||||
- state (string)
|
||||
- total_available (integer)
|
||||
- total_created (integer)
|
||||
- total_in_use (integer)
|
||||
- total_refreshing (integer)
|
||||
- ttl_deletes_per_sec (integer)
|
||||
- ttl_passes_per_sec (integer)
|
||||
- updates_per_sec (integer)
|
||||
- vsize_megabytes (integer)
|
||||
- wtcache_app_threads_page_read_count (integer)
|
||||
- wtcache_app_threads_page_read_time (integer)
|
||||
- wtcache_app_threads_page_write_count (integer)
|
||||
- wtcache_bytes_read_into (integer)
|
||||
- wtcache_bytes_written_from (integer)
|
||||
- wtcache_current_bytes (integer)
|
||||
- wtcache_max_bytes_configured (integer)
|
||||
- wtcache_pages_evicted_by_app_thread (integer)
|
||||
- wtcache_pages_queued_for_eviction (integer)
|
||||
- wtcache_server_evicting_pages (integer)
|
||||
- wtcache_tracked_dirty_bytes (integer)
|
||||
- wtcache_worker_thread_evictingpages (integer)
|
||||
|
||||
If gather_db_stats is set to true, it will also collect per database stats exposed by db.stats()
|
||||
creating another measurement called mongodb_db_stats and containing values:
|
||||
* collections
|
||||
* objects
|
||||
* avg_obj_size
|
||||
* data_size
|
||||
* storage_size
|
||||
* num_extents
|
||||
* indexes
|
||||
* index_size
|
||||
* ok
|
||||
- mongodb_db_stats
|
||||
- tags:
|
||||
- db_name
|
||||
- hostname
|
||||
- fields:
|
||||
- avg_obj_size (float)
|
||||
- collections (integer)
|
||||
- data_size (integer)
|
||||
- index_size (integer)
|
||||
- indexes (integer)
|
||||
- num_extents (integer)
|
||||
- objects (integer)
|
||||
- ok (integer)
|
||||
- storage_size (integer)
|
||||
- type (string)
|
||||
|
||||
- mongodb_shard_stats
|
||||
- tags:
|
||||
- hostname
|
||||
- fields:
|
||||
- in_use (integer)
|
||||
- available (integer)
|
||||
- created (integer)
|
||||
- refreshing (integer)
|
||||
|
||||
### Example Output:
|
||||
```
|
||||
mongodb,hostname=127.0.0.1:27017 active_reads=0i,active_writes=0i,commands_per_sec=6i,deletes_per_sec=0i,flushes_per_sec=0i,getmores_per_sec=1i,inserts_per_sec=0i,jumbo_chunks=0i,member_status="PRI",net_in_bytes=851i,net_out_bytes=23904i,open_connections=6i,percent_cache_dirty=0,percent_cache_used=0,queries_per_sec=2i,queued_reads=0i,queued_writes=0i,repl_commands_per_sec=0i,repl_deletes_per_sec=0i,repl_getmores_per_sec=0i,repl_inserts_per_sec=0i,repl_lag=0i,repl_queries_per_sec=0i,repl_updates_per_sec=0i,resident_megabytes=67i,state="PRIMARY",total_available=0i,total_created=0i,total_in_use=0i,total_refreshing=0i,ttl_deletes_per_sec=0i,ttl_passes_per_sec=0i,updates_per_sec=0i,vsize_megabytes=729i,wtcache_app_threads_page_read_count=4i,wtcache_app_threads_page_read_time=18i,wtcache_app_threads_page_write_count=6i,wtcache_bytes_read_into=10075i,wtcache_bytes_written_from=115711i,wtcache_current_bytes=86038i,wtcache_max_bytes_configured=1073741824i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=0i,wtcache_worker_thread_evictingpages=0i 1522798796000000000
|
||||
mongodb_db_stats,db_name=local,hostname=127.0.0.1:27017 avg_obj_size=818.625,collections=5i,data_size=6549i,index_size=86016i,indexes=4i,num_extents=0i,objects=8i,ok=1i,storage_size=118784i,type="db_stat" 1522799074000000000
|
||||
mongodb_shard_stats,hostname=127.0.0.1:27017,in_use=3i,available=3i,created=4i,refreshing=0i 1522799074000000000
|
||||
```
|
||||
|
||||
@@ -45,7 +45,9 @@ var sampleConfig = `
|
||||
## mongodb://user:auth_key@10.10.3.30:27017,
|
||||
## mongodb://10.10.3.33:18832,
|
||||
servers = ["mongodb://127.0.0.1:27017"]
|
||||
gather_perdb_stats = false
|
||||
|
||||
## When true, collect per database stats
|
||||
# gather_perdb_stats = false
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
|
||||
@@ -9,10 +9,11 @@ import (
|
||||
)
|
||||
|
||||
type MongodbData struct {
|
||||
StatLine *StatLine
|
||||
Fields map[string]interface{}
|
||||
Tags map[string]string
|
||||
DbData []DbData
|
||||
StatLine *StatLine
|
||||
Fields map[string]interface{}
|
||||
Tags map[string]string
|
||||
DbData []DbData
|
||||
ShardHostData []DbData
|
||||
}
|
||||
|
||||
type DbData struct {
|
||||
@@ -60,6 +61,7 @@ var DefaultReplStats = map[string]string{
|
||||
"member_status": "NodeType",
|
||||
"state": "NodeState",
|
||||
"repl_lag": "ReplLag",
|
||||
"repl_oplog_window_sec": "OplogTimeDiff",
|
||||
}
|
||||
|
||||
var DefaultClusterStats = map[string]string{
|
||||
@@ -73,6 +75,13 @@ var DefaultShardStats = map[string]string{
|
||||
"total_refreshing": "TotalRefreshing",
|
||||
}
|
||||
|
||||
var ShardHostStats = map[string]string{
|
||||
"in_use": "InUse",
|
||||
"available": "Available",
|
||||
"created": "Created",
|
||||
"refreshing": "Refreshing",
|
||||
}
|
||||
|
||||
var MmapStats = map[string]string{
|
||||
"mapped_megabytes": "Mapped",
|
||||
"non-mapped_megabytes": "NonMapped",
|
||||
@@ -127,6 +136,22 @@ func (d *MongodbData) AddDbStats() {
|
||||
}
|
||||
}
|
||||
|
||||
func (d *MongodbData) AddShardHostStats() {
|
||||
for host, hostStat := range d.StatLine.ShardHostStatsLines {
|
||||
hostStatLine := reflect.ValueOf(&hostStat).Elem()
|
||||
newDbData := &DbData{
|
||||
Name: host,
|
||||
Fields: make(map[string]interface{}),
|
||||
}
|
||||
newDbData.Fields["type"] = "shard_host_stat"
|
||||
for k, v := range ShardHostStats {
|
||||
val := hostStatLine.FieldByName(v).Interface()
|
||||
newDbData.Fields[k] = val
|
||||
}
|
||||
d.ShardHostData = append(d.ShardHostData, *newDbData)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *MongodbData) AddDefaultStats() {
|
||||
statLine := reflect.ValueOf(d.StatLine).Elem()
|
||||
d.addStat(statLine, DefaultStats)
|
||||
@@ -178,4 +203,14 @@ func (d *MongodbData) flush(acc telegraf.Accumulator) {
|
||||
)
|
||||
db.Fields = make(map[string]interface{})
|
||||
}
|
||||
for _, host := range d.ShardHostData {
|
||||
d.Tags["hostname"] = host.Name
|
||||
acc.AddFields(
|
||||
"mongodb_shard_stats",
|
||||
host.Fields,
|
||||
d.Tags,
|
||||
d.StatLine.Time,
|
||||
)
|
||||
host.Fields = make(map[string]interface{})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package mongodb
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -120,6 +121,43 @@ func TestAddShardStats(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddShardHostStats(t *testing.T) {
|
||||
expectedHosts := []string{"hostA", "hostB"}
|
||||
hostStatLines := map[string]ShardHostStatLine{}
|
||||
for _, host := range expectedHosts {
|
||||
hostStatLines[host] = ShardHostStatLine{
|
||||
InUse: 0,
|
||||
Available: 0,
|
||||
Created: 0,
|
||||
Refreshing: 0,
|
||||
}
|
||||
}
|
||||
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
ShardHostStatsLines: hostStatLines,
|
||||
},
|
||||
map[string]string{}, // Use empty tags, so we don't break existing tests
|
||||
)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
d.AddShardHostStats()
|
||||
d.flush(&acc)
|
||||
|
||||
var hostsFound []string
|
||||
for host, _ := range hostStatLines {
|
||||
for key, _ := range ShardHostStats {
|
||||
assert.True(t, acc.HasInt64Field("mongodb_shard_stats", key))
|
||||
}
|
||||
|
||||
assert.True(t, acc.HasTag("mongodb_shard_stats", "hostname"))
|
||||
hostsFound = append(hostsFound, host)
|
||||
}
|
||||
sort.Strings(hostsFound)
|
||||
sort.Strings(expectedHosts)
|
||||
assert.Equal(t, hostsFound, expectedHosts)
|
||||
}
|
||||
|
||||
func TestStateTag(t *testing.T) {
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
@@ -162,6 +200,7 @@ func TestStateTag(t *testing.T) {
|
||||
"repl_queries_per_sec": int64(0),
|
||||
"repl_updates_per_sec": int64(0),
|
||||
"repl_lag": int64(0),
|
||||
"repl_oplog_window_sec": int64(0),
|
||||
"resident_megabytes": int64(0),
|
||||
"updates_per_sec": int64(0),
|
||||
"vsize_megabytes": int64(0),
|
||||
|
||||
@@ -22,6 +22,41 @@ func (s *Server) getDefaultTags() map[string]string {
|
||||
return tags
|
||||
}
|
||||
|
||||
type oplogEntry struct {
|
||||
Timestamp bson.MongoTimestamp `bson:"ts"`
|
||||
}
|
||||
|
||||
func (s *Server) gatherOplogStats() *OplogStats {
|
||||
stats := &OplogStats{}
|
||||
localdb := s.Session.DB("local")
|
||||
|
||||
op_first := oplogEntry{}
|
||||
op_last := oplogEntry{}
|
||||
query := bson.M{"ts": bson.M{"$exists": true}}
|
||||
|
||||
for _, collection_name := range []string{"oplog.rs", "oplog.$main"} {
|
||||
if err := localdb.C(collection_name).Find(query).Sort("$natural").Limit(1).One(&op_first); err != nil {
|
||||
if err == mgo.ErrNotFound {
|
||||
continue
|
||||
}
|
||||
log.Println("E! Error getting first oplog entry (" + err.Error() + ")")
|
||||
return stats
|
||||
}
|
||||
if err := localdb.C(collection_name).Find(query).Sort("-$natural").Limit(1).One(&op_last); err != nil {
|
||||
if err == mgo.ErrNotFound {
|
||||
continue
|
||||
}
|
||||
log.Println("E! Error getting last oplog entry (" + err.Error() + ")")
|
||||
return stats
|
||||
}
|
||||
}
|
||||
|
||||
op_first_time := time.Unix(int64(op_first.Timestamp>>32), 0)
|
||||
op_last_time := time.Unix(int64(op_last.Timestamp>>32), 0)
|
||||
stats.TimeDiff = int64(op_last_time.Sub(op_first_time).Seconds())
|
||||
return stats
|
||||
}
|
||||
|
||||
func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error {
|
||||
s.Session.SetMode(mgo.Eventual, true)
|
||||
s.Session.SetSocketTimeout(0)
|
||||
@@ -66,6 +101,8 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error
|
||||
log.Println("E! Error getting database shard stats (" + err.Error() + ")")
|
||||
}
|
||||
|
||||
oplogStats := s.gatherOplogStats()
|
||||
|
||||
result_db_stats := &DbStats{}
|
||||
if gatherDbStats == true {
|
||||
names := []string{}
|
||||
@@ -99,6 +136,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error
|
||||
ClusterStatus: result_cluster,
|
||||
DbStats: result_db_stats,
|
||||
ShardStats: resultShards,
|
||||
OplogStats: oplogStats,
|
||||
}
|
||||
|
||||
defer func() {
|
||||
@@ -118,6 +156,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error
|
||||
)
|
||||
data.AddDefaultStats()
|
||||
data.AddDbStats()
|
||||
data.AddShardHostStats()
|
||||
data.flush(acc)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -35,6 +35,7 @@ type MongoStatus struct {
|
||||
ClusterStatus *ClusterStatus
|
||||
DbStats *DbStats
|
||||
ShardStats *ShardStats
|
||||
OplogStats *OplogStats
|
||||
}
|
||||
|
||||
type ServerStatus struct {
|
||||
@@ -102,6 +103,11 @@ type ReplSetStatus struct {
|
||||
MyState int64 `bson:"myState"`
|
||||
}
|
||||
|
||||
// OplogStatus stores information from getReplicationInfo
|
||||
type OplogStats struct {
|
||||
TimeDiff int64
|
||||
}
|
||||
|
||||
// ReplSetMember stores information related to a replica set member
|
||||
type ReplSetMember struct {
|
||||
Name string `bson:"name"`
|
||||
@@ -119,12 +125,27 @@ type WiredTiger struct {
|
||||
|
||||
// ShardStats stores information from shardConnPoolStats.
|
||||
type ShardStats struct {
|
||||
ShardStatsData `bson:",inline"`
|
||||
Hosts map[string]ShardHostStatsData `bson:"hosts"`
|
||||
}
|
||||
|
||||
// ShardStatsData is the total Shard Stats from shardConnPoolStats database command.
|
||||
type ShardStatsData struct {
|
||||
TotalInUse int64 `bson:"totalInUse"`
|
||||
TotalAvailable int64 `bson:"totalAvailable"`
|
||||
TotalCreated int64 `bson:"totalCreated"`
|
||||
TotalRefreshing int64 `bson:"totalRefreshing"`
|
||||
}
|
||||
|
||||
// ShardHostStatsData is the host-specific stats
|
||||
// from shardConnPoolStats database command.
|
||||
type ShardHostStatsData struct {
|
||||
InUse int64 `bson:"inUse"`
|
||||
Available int64 `bson:"available"`
|
||||
Created int64 `bson:"created"`
|
||||
Refreshing int64 `bson:"refreshing"`
|
||||
}
|
||||
|
||||
type ConcurrentTransactions struct {
|
||||
Write ConcurrentTransStats `bson:"write"`
|
||||
Read ConcurrentTransStats `bson:"read"`
|
||||
@@ -442,6 +463,7 @@ type StatLine struct {
|
||||
// Replicated Opcounter fields
|
||||
InsertR, QueryR, UpdateR, DeleteR, GetMoreR, CommandR int64
|
||||
ReplLag int64
|
||||
OplogTimeDiff int64
|
||||
Flushes int64
|
||||
Mapped, Virtual, Resident, NonMapped int64
|
||||
Faults int64
|
||||
@@ -462,6 +484,9 @@ type StatLine struct {
|
||||
|
||||
// Shard stats
|
||||
TotalInUse, TotalAvailable, TotalCreated, TotalRefreshing int64
|
||||
|
||||
// Shard Hosts stats field
|
||||
ShardHostStatsLines map[string]ShardHostStatLine
|
||||
}
|
||||
|
||||
type DbStatLine struct {
|
||||
@@ -477,6 +502,13 @@ type DbStatLine struct {
|
||||
Ok int64
|
||||
}
|
||||
|
||||
type ShardHostStatLine struct {
|
||||
InUse int64
|
||||
Available int64
|
||||
Created int64
|
||||
Refreshing int64
|
||||
}
|
||||
|
||||
func parseLocks(stat ServerStatus) map[string]LockUsage {
|
||||
returnVal := map[string]LockUsage{}
|
||||
for namespace, lockInfo := range stat.Locks {
|
||||
@@ -772,6 +804,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
|
||||
|
||||
newClusterStat := *newMongo.ClusterStatus
|
||||
returnVal.JumboChunksCount = newClusterStat.JumboChunksCount
|
||||
returnVal.OplogTimeDiff = newMongo.OplogStats.TimeDiff
|
||||
|
||||
newDbStats := *newMongo.DbStats
|
||||
for _, db := range newDbStats.Dbs {
|
||||
@@ -801,6 +834,17 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
|
||||
returnVal.TotalAvailable = newShardStats.TotalAvailable
|
||||
returnVal.TotalCreated = newShardStats.TotalCreated
|
||||
returnVal.TotalRefreshing = newShardStats.TotalRefreshing
|
||||
returnVal.ShardHostStatsLines = map[string]ShardHostStatLine{}
|
||||
for host, stats := range newShardStats.Hosts {
|
||||
shardStatLine := &ShardHostStatLine{
|
||||
InUse: stats.InUse,
|
||||
Available: stats.Available,
|
||||
Created: stats.Created,
|
||||
Refreshing: stats.Refreshing,
|
||||
}
|
||||
|
||||
returnVal.ShardHostStatsLines[host] = *shardStatLine
|
||||
}
|
||||
|
||||
return returnVal
|
||||
}
|
||||
|
||||
@@ -114,7 +114,7 @@ style concurrently:
|
||||
servers = ["tcp(127.0.0.1:3306)/"]
|
||||
|
||||
[[inputs.mysql]]
|
||||
name_suffix = "_v2"
|
||||
name_override = "_2"
|
||||
metric_version = 2
|
||||
|
||||
servers = ["tcp(127.0.0.1:3306)/"]
|
||||
@@ -141,7 +141,7 @@ measurement name.
|
||||
metric_version = 2
|
||||
|
||||
[[inputs.mysql]]
|
||||
name_suffix = "_v2"
|
||||
name_override = "_2"
|
||||
metric_version = 2
|
||||
|
||||
servers = ["tcp(127.0.0.1:3306)/"]
|
||||
|
||||
@@ -608,9 +608,7 @@ func (m *Mysql) gatherSlaveStatuses(db *sql.DB, serv string, acc telegraf.Accumu
|
||||
}
|
||||
// range over columns, and try to parse values
|
||||
for i, col := range cols {
|
||||
if m.MetricVersion >= 2 {
|
||||
col = strings.ToLower(col)
|
||||
}
|
||||
col = strings.ToLower(col)
|
||||
if value, ok := m.parseValue(*vals[i].(*sql.RawBytes)); ok {
|
||||
fields["slave_"+col] = value
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
nats "github.com/nats-io/go-nats"
|
||||
"github.com/nats-io/nats"
|
||||
)
|
||||
|
||||
type natsError struct {
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
nats "github.com/nats-io/go-nats"
|
||||
"github.com/nats-io/nats"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
// +build !windows
|
||||
|
||||
package ntpq
|
||||
|
||||
import (
|
||||
|
||||
47
plugins/inputs/nvidia_smi/README.md
Normal file
47
plugins/inputs/nvidia_smi/README.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# `nvidia-smi` Input Plugin
|
||||
|
||||
This plugin uses a query on the [`nvidia-smi`](https://developer.nvidia.com/nvidia-system-management-interface) binary to pull GPU stats including memory and GPU usage, temp and other.
|
||||
|
||||
### Configuration
|
||||
|
||||
```toml
|
||||
# Pulls statistics from nvidia GPUs attached to the host
|
||||
[[inputs.nvidia_smi]]
|
||||
## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath
|
||||
# bin_path = /usr/bin/nvidia-smi
|
||||
|
||||
## Optional: timeout for GPU polling
|
||||
# timeout = 5s
|
||||
```
|
||||
|
||||
### Metrics
|
||||
- measurement: `nvidia_smi`
|
||||
- tags
|
||||
- `name` (type of GPU e.g. `GeForce GTX 170 Ti`)
|
||||
- `compute_mode` (The compute mode of the GPU e.g. `Default`)
|
||||
- `index` (The port index where the GPU is connected to the motherboard e.g. `1`)
|
||||
- `pstate` (Overclocking state for the GPU e.g. `P0`)
|
||||
- `uuid` (A unique identifier for the GPU e.g. `GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665`)
|
||||
- fields
|
||||
- `fan_speed` (integer, percentage)
|
||||
- `memory_free` (integer, KB)
|
||||
- `memory_used` (integer, KB)
|
||||
- `memory_total` (integer, KB)
|
||||
- `temperature_gpu` (integer, degrees C)
|
||||
- `utilization_gpu` (integer, percentage)
|
||||
- `utilization_memory` (integer, percentage)
|
||||
|
||||
### Sample Query
|
||||
|
||||
The below query could be used to alert on the average temperature of the your GPUs over the last minute
|
||||
|
||||
```
|
||||
SELECT mean("temperature_gpu") FROM "nvidia_smi" WHERE time > now() - 5m GROUP BY time(1m), "index", "name", "host"
|
||||
```
|
||||
|
||||
### Example Output
|
||||
```
|
||||
nvidia_smi,compute_mode=Default,host=8218cf,index=0,name=GeForce\ GTX\ 1070,pstate=P2,uuid=GPU-823bc202-6279-6f2c-d729-868a30f14d96 fan_speed=100i,memory_free=7563i,memory_total=8112i,memory_used=549i,temperature_gpu=53i,utilization_gpu=100i,utilization_memory=90i 1523991122000000000
|
||||
nvidia_smi,compute_mode=Default,host=8218cf,index=1,name=GeForce\ GTX\ 1080,pstate=P2,uuid=GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665 fan_speed=100i,memory_free=7557i,memory_total=8114i,memory_used=557i,temperature_gpu=50i,utilization_gpu=100i,utilization_memory=85i 1523991122000000000
|
||||
nvidia_smi,compute_mode=Default,host=8218cf,index=2,name=GeForce\ GTX\ 1080,pstate=P2,uuid=GPU-d4cfc28d-0481-8d07-b81a-ddfc63d74adf fan_speed=100i,memory_free=7557i,memory_total=8114i,memory_used=557i,temperature_gpu=58i,utilization_gpu=100i,utilization_memory=86i 1523991122000000000
|
||||
```
|
||||
149
plugins/inputs/nvidia_smi/nvidia_smi.go
Normal file
149
plugins/inputs/nvidia_smi/nvidia_smi.go
Normal file
@@ -0,0 +1,149 @@
|
||||
package nvidia_smi
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
var (
|
||||
measurement = "nvidia_smi"
|
||||
metrics = "fan.speed,memory.total,memory.used,memory.free,pstate,temperature.gpu,name,uuid,compute_mode,utilization.gpu,utilization.memory,index"
|
||||
metricNames = [][]string{
|
||||
[]string{"fan_speed", "field"},
|
||||
[]string{"memory_total", "field"},
|
||||
[]string{"memory_used", "field"},
|
||||
[]string{"memory_free", "field"},
|
||||
[]string{"pstate", "tag"},
|
||||
[]string{"temperature_gpu", "field"},
|
||||
[]string{"name", "tag"},
|
||||
[]string{"uuid", "tag"},
|
||||
[]string{"compute_mode", "tag"},
|
||||
[]string{"utilization_gpu", "field"},
|
||||
[]string{"utilization_memory", "field"},
|
||||
[]string{"index", "tag"},
|
||||
}
|
||||
)
|
||||
|
||||
// NvidiaSMI holds the methods for this plugin
|
||||
type NvidiaSMI struct {
|
||||
BinPath string
|
||||
Timeout internal.Duration
|
||||
|
||||
metrics string
|
||||
}
|
||||
|
||||
// Description returns the description of the NvidiaSMI plugin
|
||||
func (smi *NvidiaSMI) Description() string {
|
||||
return "Pulls statistics from nvidia GPUs attached to the host"
|
||||
}
|
||||
|
||||
// SampleConfig returns the sample configuration for the NvidiaSMI plugin
|
||||
func (smi *NvidiaSMI) SampleConfig() string {
|
||||
return `
|
||||
## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath
|
||||
# bin_path = /usr/bin/nvidia-smi
|
||||
|
||||
## Optional: timeout for GPU polling
|
||||
# timeout = 5s
|
||||
`
|
||||
}
|
||||
|
||||
// Gather implements the telegraf interface
|
||||
func (smi *NvidiaSMI) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
if _, err := os.Stat(smi.BinPath); os.IsNotExist(err) {
|
||||
return fmt.Errorf("nvidia-smi binary not at path %s, cannot gather GPU data", smi.BinPath)
|
||||
}
|
||||
|
||||
data, err := smi.pollSMI()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = gatherNvidiaSMI(data, acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("nvidia_smi", func() telegraf.Input {
|
||||
return &NvidiaSMI{
|
||||
BinPath: "/usr/bin/nvidia-smi",
|
||||
Timeout: internal.Duration{Duration: 5 * time.Second},
|
||||
metrics: metrics,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (smi *NvidiaSMI) pollSMI() (string, error) {
|
||||
// Construct and execute metrics query
|
||||
opts := []string{"--format=noheader,nounits,csv", fmt.Sprintf("--query-gpu=%s", smi.metrics)}
|
||||
ret, err := internal.CombinedOutputTimeout(exec.Command(smi.BinPath, opts...), smi.Timeout.Duration)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(ret), nil
|
||||
}
|
||||
|
||||
func gatherNvidiaSMI(ret string, acc telegraf.Accumulator) error {
|
||||
// First split the lines up and handle each one
|
||||
scanner := bufio.NewScanner(strings.NewReader(ret))
|
||||
for scanner.Scan() {
|
||||
tags, fields, err := parseLine(scanner.Text())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acc.AddFields(measurement, fields, tags)
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return fmt.Errorf("Error scanning text %s", ret)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseLine(line string) (map[string]string, map[string]interface{}, error) {
|
||||
tags := make(map[string]string, 0)
|
||||
fields := make(map[string]interface{}, 0)
|
||||
|
||||
// Next split up the comma delimited metrics
|
||||
met := strings.Split(line, ",")
|
||||
|
||||
// Make sure there are as many metrics in the line as there were queried.
|
||||
if len(met) == len(metricNames) {
|
||||
for i, m := range metricNames {
|
||||
|
||||
// First handle the tags
|
||||
if m[1] == "tag" {
|
||||
tags[m[0]] = strings.TrimSpace(met[i])
|
||||
continue
|
||||
}
|
||||
|
||||
// Then parse the integers out of the fields
|
||||
out, err := strconv.ParseInt(strings.TrimSpace(met[i]), 10, 64)
|
||||
if err != nil {
|
||||
return tags, fields, err
|
||||
}
|
||||
fields[m[0]] = out
|
||||
}
|
||||
|
||||
// Return the tags and fields
|
||||
return tags, fields, nil
|
||||
}
|
||||
|
||||
// If the line is empty return an emptyline error
|
||||
return tags, fields, fmt.Errorf("Different number of metrics returned (%d) than expeced (%d)", len(met), len(metricNames))
|
||||
}
|
||||
35
plugins/inputs/nvidia_smi/nvidia_smi_test.go
Normal file
35
plugins/inputs/nvidia_smi/nvidia_smi_test.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package nvidia_smi
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseLineStandard(t *testing.T) {
|
||||
line := "85, 8114, 553, 7561, P2, 61, GeForce GTX 1070 Ti, GPU-d1911b8a-f5c8-5e66-057c-486561269de8, Default, 100, 93, 1\n"
|
||||
tags, fields, err := parseLine(line)
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
if tags["name"] != "GeForce GTX 1070 Ti" {
|
||||
t.Fail()
|
||||
}
|
||||
if temp, ok := fields["temperature_gpu"].(int); ok && temp == 61 {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseLineEmptyLine(t *testing.T) {
|
||||
line := "\n"
|
||||
_, _, err := parseLine(line)
|
||||
if err == nil {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseLineBad(t *testing.T) {
|
||||
line := "the quick brown fox jumped over the lazy dog"
|
||||
_, _, err := parseLine(line)
|
||||
if err == nil {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
@@ -24,7 +24,7 @@ Get phpfpm stats using either HTTP status page or fpm socket.
|
||||
## "fcgi://10.0.0.12:9000/status"
|
||||
## "cgi://10.0.10.12:9001/status"
|
||||
##
|
||||
## Example of multiple gathering from local socket and remove host
|
||||
## Example of multiple gathering from local socket and remote host
|
||||
## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
|
||||
urls = ["http://localhost/status"]
|
||||
```
|
||||
|
||||
@@ -171,17 +171,17 @@ func (p *Ping) args(url string) []string {
|
||||
// Build the ping command args based on toml config
|
||||
args := []string{"-c", strconv.Itoa(p.Count), "-n", "-s", "16"}
|
||||
if p.PingInterval > 0 {
|
||||
args = append(args, "-i", strconv.FormatFloat(p.PingInterval, 'f', 1, 64))
|
||||
args = append(args, "-i", strconv.FormatFloat(p.PingInterval, 'f', -1, 64))
|
||||
}
|
||||
if p.Timeout > 0 {
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
args = append(args, "-W", strconv.FormatFloat(p.Timeout*1000, 'f', 1, 64))
|
||||
args = append(args, "-W", strconv.FormatFloat(p.Timeout*1000, 'f', -1, 64))
|
||||
case "linux":
|
||||
args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', 1, 64))
|
||||
args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', -1, 64))
|
||||
default:
|
||||
// Not sure the best option here, just assume GNU ping?
|
||||
args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', 1, 64))
|
||||
args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', -1, 64))
|
||||
}
|
||||
}
|
||||
if p.Deadline > 0 {
|
||||
@@ -243,21 +243,24 @@ func processPingOutput(out string) (int, int, float64, float64, float64, float64
|
||||
}
|
||||
} else if strings.Contains(line, "min/avg/max") {
|
||||
stats := strings.Split(line, " ")[3]
|
||||
min, err = strconv.ParseFloat(strings.Split(stats, "/")[0], 64)
|
||||
data := strings.Split(stats, "/")
|
||||
min, err = strconv.ParseFloat(data[0], 64)
|
||||
if err != nil {
|
||||
return trans, recv, min, avg, max, stddev, err
|
||||
}
|
||||
avg, err = strconv.ParseFloat(strings.Split(stats, "/")[1], 64)
|
||||
avg, err = strconv.ParseFloat(data[1], 64)
|
||||
if err != nil {
|
||||
return trans, recv, min, avg, max, stddev, err
|
||||
}
|
||||
max, err = strconv.ParseFloat(strings.Split(stats, "/")[2], 64)
|
||||
max, err = strconv.ParseFloat(data[2], 64)
|
||||
if err != nil {
|
||||
return trans, recv, min, avg, max, stddev, err
|
||||
}
|
||||
stddev, err = strconv.ParseFloat(strings.Split(stats, "/")[3], 64)
|
||||
if err != nil {
|
||||
return trans, recv, min, avg, max, stddev, err
|
||||
if len(data) == 4 {
|
||||
stddev, err = strconv.ParseFloat(data[3], 64)
|
||||
if err != nil {
|
||||
return trans, recv, min, avg, max, stddev, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,6 +41,19 @@ PING www.google.com (216.58.218.164) 56(84) bytes of data.
|
||||
rtt min/avg/max/mdev = 35.225/43.628/51.806/5.325 ms
|
||||
`
|
||||
|
||||
// BusyBox v1.24.1 (2017-02-28 03:28:13 CET) multi-call binary
|
||||
var busyBoxPingOutput = `
|
||||
PING 8.8.8.8 (8.8.8.8): 56 data bytes
|
||||
64 bytes from 8.8.8.8: seq=0 ttl=56 time=22.559 ms
|
||||
64 bytes from 8.8.8.8: seq=1 ttl=56 time=15.810 ms
|
||||
64 bytes from 8.8.8.8: seq=2 ttl=56 time=16.262 ms
|
||||
64 bytes from 8.8.8.8: seq=3 ttl=56 time=15.815 ms
|
||||
|
||||
--- 8.8.8.8 ping statistics ---
|
||||
4 packets transmitted, 4 packets received, 0% packet loss
|
||||
round-trip min/avg/max = 15.810/17.611/22.559 ms
|
||||
`
|
||||
|
||||
// Fatal ping output (invalid argument)
|
||||
var fatalPingOutput = `
|
||||
ping: -i interval too short: Operation not permitted
|
||||
@@ -65,6 +78,15 @@ func TestProcessPingOutput(t *testing.T) {
|
||||
assert.InDelta(t, 43.628, avg, 0.001)
|
||||
assert.InDelta(t, 51.806, max, 0.001)
|
||||
assert.InDelta(t, 5.325, stddev, 0.001)
|
||||
|
||||
trans, rec, min, avg, max, stddev, err = processPingOutput(busyBoxPingOutput)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 4, trans, "4 packets were transmitted")
|
||||
assert.Equal(t, 4, rec, "4 packets were transmitted")
|
||||
assert.InDelta(t, 15.810, min, 0.001)
|
||||
assert.InDelta(t, 17.611, avg, 0.001)
|
||||
assert.InDelta(t, 22.559, max, 0.001)
|
||||
assert.InDelta(t, -1.0, stddev, 0.001)
|
||||
}
|
||||
|
||||
// Test that processPingOutput returns an error when 'ping' fails to run, such
|
||||
@@ -106,7 +128,7 @@ func TestArgs(t *testing.T) {
|
||||
"12000.0", "www.google.com"}
|
||||
default:
|
||||
expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W",
|
||||
"12.0", "www.google.com"}
|
||||
"12", "www.google.com"}
|
||||
}
|
||||
|
||||
p.Deadline = 24
|
||||
@@ -117,7 +139,7 @@ func TestArgs(t *testing.T) {
|
||||
"12000.0", "-t", "24", "www.google.com"}
|
||||
default:
|
||||
expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W",
|
||||
"12.0", "-w", "24", "www.google.com"}
|
||||
"12", "-w", "24", "www.google.com"}
|
||||
}
|
||||
|
||||
sort.Strings(actual)
|
||||
@@ -133,7 +155,7 @@ func TestArgs(t *testing.T) {
|
||||
"12000.0", "-t", "24", "-i", "1.2", "www.google.com"}
|
||||
default:
|
||||
expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W",
|
||||
"12.0", "-w", "24", "-i", "1.2", "www.google.com"}
|
||||
"12", "-w", "24", "-i", "1.2", "www.google.com"}
|
||||
}
|
||||
sort.Strings(actual)
|
||||
sort.Strings(expected)
|
||||
|
||||
@@ -141,6 +141,9 @@ OID to get. May be a numeric or textual OID.
|
||||
* `oid_index_suffix`:
|
||||
The OID sub-identifier to strip off so that the index can be matched against other fields in the table.
|
||||
|
||||
* `oid_index_length`:
|
||||
Specifies the length of the index after the supplied table OID (in OID path segments). Truncates the index after this point to remove non-fixed value or length index suffixes.
|
||||
|
||||
* `name`:
|
||||
Output field/tag name.
|
||||
If not specified, it defaults to the value of `oid`. If `oid` is numeric, an attempt to translate the numeric OID into a texual OID will be made.
|
||||
|
||||
@@ -218,20 +218,10 @@ func (t *Table) initBuild() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.Name == "" {
|
||||
t.Name = oidText
|
||||
}
|
||||
|
||||
knownOIDs := map[string]bool{}
|
||||
for _, f := range t.Fields {
|
||||
knownOIDs[f.Oid] = true
|
||||
}
|
||||
for _, f := range fields {
|
||||
if !knownOIDs[f.Oid] {
|
||||
t.Fields = append(t.Fields, f)
|
||||
}
|
||||
}
|
||||
t.Fields = append(t.Fields, fields...)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -247,6 +237,8 @@ type Field struct {
|
||||
Oid string
|
||||
// OidIndexSuffix is the trailing sub-identifier on a table record OID that will be stripped off to get the record's index.
|
||||
OidIndexSuffix string
|
||||
// OidIndexLength specifies the length of the index in OID path segments. It can be used to remove sub-identifiers that vary in content or length.
|
||||
OidIndexLength int
|
||||
// IsTag controls whether this OID is output as a tag or a value.
|
||||
IsTag bool
|
||||
// Conversion controls any type conversion that is done on the value.
|
||||
@@ -472,6 +464,18 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) {
|
||||
}
|
||||
idx = idx[:len(idx)-len(f.OidIndexSuffix)]
|
||||
}
|
||||
if f.OidIndexLength != 0 {
|
||||
i := f.OidIndexLength + 1 // leading separator
|
||||
idx = strings.Map(func(r rune) rune {
|
||||
if r == '.' {
|
||||
i -= 1
|
||||
}
|
||||
if i < 1 {
|
||||
return -1
|
||||
}
|
||||
return r
|
||||
}, idx)
|
||||
}
|
||||
|
||||
fv, err := fieldConvert(f.Conversion, ent.Value)
|
||||
if err != nil {
|
||||
|
||||
@@ -21,7 +21,7 @@ var mockedCommands = [][]string{
|
||||
{"snmptranslate", "-Td", "-Ob", "-m", "all", "1.0.0.1.1"},
|
||||
{"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.1"},
|
||||
{"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.1.0"},
|
||||
{"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.5"},
|
||||
{"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.4"},
|
||||
{"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.2.3"},
|
||||
{"snmptranslate", "-Td", "-Ob", ".iso.2.3"},
|
||||
{"snmptranslate", "-Td", "-Ob", "-m", "all", ".999"},
|
||||
@@ -30,7 +30,6 @@ var mockedCommands = [][]string{
|
||||
{"snmptranslate", "-Td", "-Ob", "TEST::testTable"},
|
||||
{"snmptranslate", "-Td", "-Ob", "TEST::connections"},
|
||||
{"snmptranslate", "-Td", "-Ob", "TEST::latency"},
|
||||
{"snmptranslate", "-Td", "-Ob", "TEST::description"},
|
||||
{"snmptranslate", "-Td", "-Ob", "TEST::hostname"},
|
||||
{"snmptranslate", "-Td", "-Ob", "IF-MIB::ifPhysAddress.1"},
|
||||
{"snmptranslate", "-Td", "-Ob", "BRIDGE-MIB::dot1dTpFdbAddress.1"},
|
||||
|
||||
@@ -67,7 +67,7 @@ var mockedCommandResults = map[string]mockedCommandResult{
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x001.0.0.1.1": mockedCommandResult{stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1": mockedCommandResult{stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1.0": mockedCommandResult{stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.5": mockedCommandResult{stdout: "TEST::testTableEntry.5\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 5 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.4": mockedCommandResult{stdout: "TEST::testTableEntry.4\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 4 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.2.3": mockedCommandResult{stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00.iso.2.3": mockedCommandResult{stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.999": mockedCommandResult{stdout: ".999\n [TRUNCATED]\n", stderr: "", exitError: false},
|
||||
@@ -76,11 +76,10 @@ var mockedCommandResults = map[string]mockedCommandResult{
|
||||
"snmptranslate\x00-Td\x00-Ob\x00TEST::testTable": mockedCommandResult{stdout: "TEST::testTable\ntestTable OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 0 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00TEST::connections": mockedCommandResult{stdout: "TEST::connections\nconnections OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tINTEGER\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 2 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00TEST::latency": mockedCommandResult{stdout: "TEST::latency\nlatency OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 3 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00TEST::description": mockedCommandResult{stdout: "TEST::description\ndescription OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 4 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00TEST::hostname": mockedCommandResult{stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00IF-MIB::ifPhysAddress.1": mockedCommandResult{stdout: "IF-MIB::ifPhysAddress.1\nifPhysAddress OBJECT-TYPE\n -- FROM\tIF-MIB\n -- TEXTUAL CONVENTION PhysAddress\n SYNTAX\tOCTET STRING\n DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"The interface's address at its protocol sub-layer. For\n example, for an 802.x interface, this object normally\n contains a MAC address. The interface's media-specific MIB\n must define the bit and byte ordering and the format of the\n value of this object. For interfaces which do not have such\n an address (e.g., a serial line), this object should contain\n an octet string of zero length.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) interfaces(2) ifTable(2) ifEntry(1) ifPhysAddress(6) 1 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00BRIDGE-MIB::dot1dTpFdbAddress.1": mockedCommandResult{stdout: "BRIDGE-MIB::dot1dTpFdbAddress.1\ndot1dTpFdbAddress OBJECT-TYPE\n -- FROM\tBRIDGE-MIB\n -- TEXTUAL CONVENTION MacAddress\n SYNTAX\tOCTET STRING (6) \n DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"A unicast MAC address for which the bridge has\n forwarding and/or filtering information.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) dot1dBridge(17) dot1dTp(4) dot1dTpFdbTable(3) dot1dTpFdbEntry(1) dot1dTpFdbAddress(1) 1 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00TCP-MIB::tcpConnectionLocalAddress.1": mockedCommandResult{stdout: "TCP-MIB::tcpConnectionLocalAddress.1\ntcpConnectionLocalAddress OBJECT-TYPE\n -- FROM\tTCP-MIB\n -- TEXTUAL CONVENTION InetAddress\n SYNTAX\tOCTET STRING (0..255) \n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n DESCRIPTION\t\"The local IP address for this TCP connection. The type\n of this address is determined by the value of\n tcpConnectionLocalAddressType.\n\n As this object is used in the index for the\n tcpConnectionTable, implementors should be\n careful not to create entries that would result in OIDs\n with more than 128 subidentifiers; otherwise the information\n cannot be accessed by using SNMPv1, SNMPv2c, or SNMPv3.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) tcp(6) tcpConnectionTable(19) tcpConnectionEntry(1) tcpConnectionLocalAddress(2) 1 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00TEST::testTable.1": mockedCommandResult{stdout: "TEST::testTableEntry\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) 1 }\n", stderr: "", exitError: false},
|
||||
"snmptable\x00-Ch\x00-Cl\x00-c\x00public\x00127.0.0.1\x00TEST::testTable": mockedCommandResult{stdout: "server connections latency description \nTEST::testTable: No entries\n", stderr: "", exitError: false},
|
||||
"snmptable\x00-Ch\x00-Cl\x00-c\x00public\x00127.0.0.1\x00TEST::testTable": mockedCommandResult{stdout: "server connections latency \nTEST::testTable: No entries\n", stderr: "", exitError: false},
|
||||
}
|
||||
|
||||
@@ -72,7 +72,7 @@ var tsc = &testSNMPConnection{
|
||||
".1.0.0.0.1.3.1": "0.456",
|
||||
".1.0.0.0.1.3.2": "0.000",
|
||||
".1.0.0.0.1.3.3": "9.999",
|
||||
".1.0.0.0.1.5.0": 123456,
|
||||
".1.0.0.0.1.4.0": 123456,
|
||||
".1.0.0.1.1": "baz",
|
||||
".1.0.0.1.2": 234,
|
||||
".1.0.0.1.3": []byte("byte slice"),
|
||||
@@ -159,23 +159,19 @@ func TestFieldInit(t *testing.T) {
|
||||
|
||||
func TestTableInit(t *testing.T) {
|
||||
tbl := Table{
|
||||
Oid: ".1.0.0.0",
|
||||
Fields: []Field{
|
||||
{Oid: ".999", Name: "foo"},
|
||||
{Oid: "TEST::description", Name: "description", IsTag: true},
|
||||
},
|
||||
Oid: ".1.0.0.0",
|
||||
Fields: []Field{{Oid: ".999", Name: "foo"}},
|
||||
}
|
||||
err := tbl.init()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "testTable", tbl.Name)
|
||||
|
||||
assert.Len(t, tbl.Fields, 5)
|
||||
assert.Len(t, tbl.Fields, 4)
|
||||
assert.Contains(t, tbl.Fields, Field{Oid: ".999", Name: "foo", initialized: true})
|
||||
assert.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.1", Name: "server", IsTag: true, initialized: true})
|
||||
assert.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.2", Name: "connections", initialized: true})
|
||||
assert.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.3", Name: "latency", initialized: true})
|
||||
assert.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.4", Name: "description", IsTag: true, initialized: true})
|
||||
}
|
||||
|
||||
func TestSnmpInit(t *testing.T) {
|
||||
@@ -191,11 +187,10 @@ func TestSnmpInit(t *testing.T) {
|
||||
err := s.init()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, s.Tables[0].Fields, 4)
|
||||
assert.Len(t, s.Tables[0].Fields, 3)
|
||||
assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.1", Name: "server", IsTag: true, initialized: true})
|
||||
assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.2", Name: "connections", initialized: true})
|
||||
assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.3", Name: "latency", initialized: true})
|
||||
assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.4", Name: "description", initialized: true})
|
||||
|
||||
assert.Equal(t, Field{
|
||||
Oid: ".1.0.0.1.1",
|
||||
@@ -458,6 +453,11 @@ func TestTableBuild_walk(t *testing.T) {
|
||||
Oid: ".1.0.0.2.1.5",
|
||||
OidIndexSuffix: ".9.9",
|
||||
},
|
||||
{
|
||||
Name: "myfield5",
|
||||
Oid: ".1.0.0.2.1.5",
|
||||
OidIndexLength: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -474,6 +474,7 @@ func TestTableBuild_walk(t *testing.T) {
|
||||
"myfield2": 1,
|
||||
"myfield3": float64(0.123),
|
||||
"myfield4": 11,
|
||||
"myfield5": 11,
|
||||
},
|
||||
}
|
||||
rtr2 := RTableRow{
|
||||
@@ -485,6 +486,7 @@ func TestTableBuild_walk(t *testing.T) {
|
||||
"myfield2": 2,
|
||||
"myfield3": float64(0.456),
|
||||
"myfield4": 22,
|
||||
"myfield5": 22,
|
||||
},
|
||||
}
|
||||
rtr3 := RTableRow{
|
||||
@@ -577,7 +579,7 @@ func TestGather(t *testing.T) {
|
||||
Fields: []Field{
|
||||
{
|
||||
Name: "myOtherField",
|
||||
Oid: ".1.0.0.0.1.5",
|
||||
Oid: ".1.0.0.0.1.4",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
7
plugins/inputs/snmp/testdata/test.mib
vendored
7
plugins/inputs/snmp/testdata/test.mib
vendored
@@ -22,7 +22,6 @@ TestTableEntry ::=
|
||||
server OCTET STRING,
|
||||
connections INTEGER,
|
||||
latency OCTET STRING,
|
||||
description OCTET STRING,
|
||||
}
|
||||
|
||||
server OBJECT-TYPE
|
||||
@@ -43,12 +42,6 @@ latency OBJECT-TYPE
|
||||
STATUS current
|
||||
::= { testTableEntry 3 }
|
||||
|
||||
description OBJECT-TYPE
|
||||
SYNTAX OCTET STRING
|
||||
MAX-ACCESS read-only
|
||||
STATUS current
|
||||
::= { testTableEntry 4 }
|
||||
|
||||
hostname OBJECT-TYPE
|
||||
SYNTAX OCTET STRING
|
||||
MAX-ACCESS read-only
|
||||
|
||||
@@ -35,6 +35,13 @@ This is a sample configuration for the plugin.
|
||||
## 0 (default) is unlimited.
|
||||
# read_timeout = "30s"
|
||||
|
||||
## Optional TLS configuration.
|
||||
## Only applies to stream sockets (e.g. TCP).
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Enables client authentication if set.
|
||||
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
|
||||
|
||||
## Maximum socket buffer size in bytes.
|
||||
## For stream sockets, once the buffer fills up, the sender will start backing up.
|
||||
## For datagram sockets, once the buffer fills up, metrics will start dropping.
|
||||
|
||||
@@ -12,6 +12,8 @@ import (
|
||||
|
||||
"time"
|
||||
|
||||
"crypto/tls"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
@@ -122,9 +124,9 @@ func (ssl *streamSocketListener) read(c net.Conn) {
|
||||
}
|
||||
|
||||
if err := scnr.Err(); err != nil {
|
||||
if err, ok := err.(net.Error); ok && err.Timeout() {
|
||||
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
||||
log.Printf("D! Timeout in plugin [input.socket_listener]: %s", err)
|
||||
} else if !strings.HasSuffix(err.Error(), ": use of closed network connection") {
|
||||
} else if netErr != nil && !strings.HasSuffix(err.Error(), ": use of closed network connection") {
|
||||
ssl.AddError(err)
|
||||
}
|
||||
}
|
||||
@@ -159,11 +161,14 @@ func (psl *packetSocketListener) listen() {
|
||||
}
|
||||
|
||||
type SocketListener struct {
|
||||
ServiceAddress string
|
||||
MaxConnections int
|
||||
ReadBufferSize int
|
||||
ReadTimeout *internal.Duration
|
||||
KeepAlivePeriod *internal.Duration
|
||||
ServiceAddress string `toml:"service_address"`
|
||||
MaxConnections int `toml:"max_connections"`
|
||||
ReadBufferSize int `toml:"read_buffer_size"`
|
||||
ReadTimeout *internal.Duration `toml:"read_timeout"`
|
||||
TLSAllowedCACerts []string `toml:"tls_allowed_cacerts"`
|
||||
TLSCert string `toml:"tls_cert"`
|
||||
TLSKey string `toml:"tls_key"`
|
||||
KeepAlivePeriod *internal.Duration `toml:"keep_alive_period"`
|
||||
|
||||
parsers.Parser
|
||||
telegraf.Accumulator
|
||||
@@ -198,6 +203,13 @@ func (sl *SocketListener) SampleConfig() string {
|
||||
## 0 (default) is unlimited.
|
||||
# read_timeout = "30s"
|
||||
|
||||
## Optional TLS configuration.
|
||||
## Only applies to stream sockets (e.g. TCP).
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Enables client authentication if set.
|
||||
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
|
||||
|
||||
## Maximum socket buffer size in bytes.
|
||||
## For stream sockets, once the buffer fills up, the sender will start backing up.
|
||||
## For datagram sockets, once the buffer fills up, metrics will start dropping.
|
||||
@@ -242,7 +254,21 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error {
|
||||
|
||||
switch spl[0] {
|
||||
case "tcp", "tcp4", "tcp6", "unix", "unixpacket":
|
||||
l, err := net.Listen(spl[0], spl[1])
|
||||
var (
|
||||
err error
|
||||
l net.Listener
|
||||
)
|
||||
|
||||
tlsCfg, err := internal.GetServerTLSConfig(sl.TLSCert, sl.TLSKey, sl.TLSAllowedCACerts)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if tlsCfg == nil {
|
||||
l, err = net.Listen(spl[0], spl[1])
|
||||
} else {
|
||||
l, err = tls.Listen(spl[0], spl[1], tlsCfg)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2,12 +2,14 @@ package socket_listener
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -25,6 +27,52 @@ func testEmptyLog(t *testing.T) func() {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSocketListener_tcp_tls(t *testing.T) {
|
||||
defer testEmptyLog(t)()
|
||||
|
||||
sl := newSocketListener()
|
||||
sl.ServiceAddress = "tcp://127.0.0.1:0"
|
||||
sl.TLSCert = "testdata/server.pem"
|
||||
sl.TLSKey = "testdata/server.key"
|
||||
sl.TLSAllowedCACerts = []string{"testdata/ca.pem"}
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
err := sl.Start(acc)
|
||||
require.NoError(t, err)
|
||||
defer sl.Stop()
|
||||
|
||||
tlsCfg, err := internal.GetTLSConfig("testdata/client.pem", "testdata/client.key", "testdata/ca.pem", true)
|
||||
require.NoError(t, err)
|
||||
|
||||
secureClient, err := tls.Dial("tcp", sl.Closer.(net.Listener).Addr().String(), tlsCfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
testSocketListener(t, sl, secureClient)
|
||||
}
|
||||
|
||||
func TestSocketListener_unix_tls(t *testing.T) {
|
||||
defer testEmptyLog(t)()
|
||||
|
||||
sl := newSocketListener()
|
||||
sl.ServiceAddress = "unix:///tmp/telegraf_test.sock"
|
||||
sl.TLSCert = "testdata/server.pem"
|
||||
sl.TLSKey = "testdata/server.key"
|
||||
sl.TLSAllowedCACerts = []string{"testdata/ca.pem"}
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
err := sl.Start(acc)
|
||||
require.NoError(t, err)
|
||||
defer sl.Stop()
|
||||
|
||||
tlsCfg, err := internal.GetTLSConfig("testdata/client.pem", "testdata/client.key", "testdata/ca.pem", true)
|
||||
require.NoError(t, err)
|
||||
|
||||
secureClient, err := tls.Dial("unix", "/tmp/telegraf_test.sock", tlsCfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
testSocketListener(t, sl, secureClient)
|
||||
}
|
||||
|
||||
func TestSocketListener_tcp(t *testing.T) {
|
||||
defer testEmptyLog(t)()
|
||||
|
||||
|
||||
31
plugins/inputs/socket_listener/testdata/ca.pem
vendored
Normal file
31
plugins/inputs/socket_listener/testdata/ca.pem
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFVTCCAz2gAwIBAgIJAOhLvwv6zUf+MA0GCSqGSIb3DQEBCwUAMEExCzAJBgNV
|
||||
BAYTAlVTMQswCQYDVQQIDAJDQTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsG
|
||||
A1UECgwEVGVzdDAeFw0xODA0MTcwNDIwNDZaFw0yMTAyMDQwNDIwNDZaMEExCzAJ
|
||||
BgNVBAYTAlVTMQswCQYDVQQIDAJDQTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzEN
|
||||
MAsGA1UECgwEVGVzdDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKwE
|
||||
Xy814CDH03G3Fg2/XSpYZXVMzwp6oq/bUe3iLhkOpA6C4+j07AxAAa22qEPlvYkb
|
||||
W7oxVJiL0ih1od2FeAxvroBTmjG54j/Syb8OeQsZaJLNp1rRmwYGBIVi284ScaIc
|
||||
dn+2bfmfpSLjK3SbU5XygtwIE3gh/B7x02UJRNJmJ1faRT2CfTeg/56xnTE4bcR5
|
||||
HRrlojoN5laJngowLWAEAvWljCR8oge+ciNYB3xoK8Hgc9+WgTy95G1RBCNkaFFI
|
||||
73nrcHl6dGOH9UgIqfbHJYxNEarI3o/JAr8DIBS0W4r8r4aY4JQ4LoN3bg4mLHQq
|
||||
THKkVW5hyBeWe47qmlL0m4F6/+mzVi95NAWG2BQDCZJAWJNc+PbSRHi81838m7ff
|
||||
O4rixd/F53LUUas8/zVca3vtv+XjOHZzIQLIy1bM4MhzpHlRcSmS9kqxxZ3S70e3
|
||||
ZIWFdM0iRrtlBbJeoHIJRDpgPRYIWdRc6XotljTTi6/lN4Bj/0NK4E3iONcDsscN
|
||||
kiqEHRAWZ4ptCqdVPgYR0S096Fx6OaC3ASODE0Cjb18ylZQRsQi8TiYSihGzuoio
|
||||
wJwSLdIifDbbSUkjT1384cA/HsOjFQ9xHXYa6cQnAg3TUZyG1lAMJyFWYke+rxmG
|
||||
srfL/EtIzgbzmEOC5anQjA2pdgUO9Pk2SinJaMApAgMBAAGjUDBOMB0GA1UdDgQW
|
||||
BBQNJctDLjj8bVKNCYANaOcboPQnmzAfBgNVHSMEGDAWgBQNJctDLjj8bVKNCYAN
|
||||
aOcboPQnmzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQATSr26Kc8g
|
||||
3l2zuccoKWM57DQcgRmzSYwEOKA2jn3FWmrAdwozEIkLaTK0OXz0zh2dZxh9V3GR
|
||||
w0WFCynbGNy/9s33MSi+zIWJOU/MZvt6zGE5CTcTgZ+u5IZyvSubMkPcwQi3Yvcg
|
||||
AHmWzpF42kT2J5C5MfrSU65hrhPX7hT/CUoV3gN7oxFzj+/ED4kgNorO8SUUJCmq
|
||||
DJNFbjgsD63EhnvAhn1AeM35GmKdl2enEKqcZsRkE4ZLpU7ibrThEm1aOQuJUtHk
|
||||
gDAx49QMdQpWnxWxnfoiwpLu7ufR7ls8O9oA8ZJux/SVHEmtkOdRsuMtY5MElFZg
|
||||
dANlQsdFWDko4ixaxFYzppuPNnRlqjGNnaEFJrNc2KR0Dxgmp28Yh2VyLd4r3fLT
|
||||
nLVBYF8KzFchUdXYYPNBXwAf/N52jGfugDx8snLxOfzxoUZ4y64qMCpYhntGgBJ1
|
||||
Rrk2trcn3Dw19gi8p3ylbdoz/Ch1INDDrO35pd0bZpcwASc/UNU72W5v2kGL0H7o
|
||||
nJzgtrqeHcoIzNBmBhHlMlnTF5GMfrYGsf5d30KyKv7UL6qJTvT641dpKpB/FFrk
|
||||
y3AQbKmKRDI+aVzeOlwdy/eJAwt7FikD4bR9GZ4PBX9n9jd4u/PHZNfxtgzplqo1
|
||||
oy7kJv0cB/vRKOblmn/vPUfTFtAX7M3GkQ==
|
||||
-----END CERTIFICATE-----
|
||||
27
plugins/inputs/socket_listener/testdata/client.key
vendored
Normal file
27
plugins/inputs/socket_listener/testdata/client.key
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpAIBAAKCAQEAmRuY+9Gg5V4e9hCd2mYek1jKeoaZijz89EPvox78XzoGdxPf
|
||||
RoukUcTVS9VWN7HyJBjRA9P+KuHI9dX47skxyxH53uXZvRmGQAJBY4cE07JHvGkZ
|
||||
eK1heXoWlBzYtivckha7bLBfn1ttAzcFCblUfJdzsn9XDuC4Jfn4oSaKn1o8Rzy1
|
||||
KRvyLgvsYxMA/XzhyBzVMyoUOulye7EZx4f+AwSNmNHD4OgtxxPofrrMOtXZ2tC6
|
||||
xNOexIZXbsB9dyrUW+4pWXYaadU7fl2V+arAJj+NVxV+3tmGGjmd1MiIypPx6BbP
|
||||
g7xH20nJ/Y0U6V7gklZpYO1i84RbtR/kqBgi9QIDAQABAoIBAEONJJM+KyHnw/tG
|
||||
246HbcgO7c7fYhDW1bgj3S/4NNsC6+VP1Dv40nftQzphFtgd37rDZDyvJL3gvlyQ
|
||||
mnMoO5rgBIGuocHH6C6HkDgMUznft7zOFhnjTVVeY2XX0FmXwoqGEw1iR940ZUV8
|
||||
2fEvXrJV1AsWGeALj9PZlTPsoE6rv5sUk9Lh3wCD73m7GSg7DzBRE+6bBze8Lmwn
|
||||
ZzTvmimhgPJw8LR5rRpYbDbhAJLAfgA7/yPgYEPxA/ffry6Ba4epj8tVNUNOAcOf
|
||||
PURF+uuIF7RceI2PkdvoNuQyVR5oxQUPUfidfVK5ClUmnHECSgb/FFnYC+nU2vSi
|
||||
IAnmC6ECgYEAyrUFHyxxuIQAiinjBxa0OQ3ynvMxDnF/+zvWe8536Y61lz9dblKb
|
||||
0xvFhpOEMfiG/zFdZdWJ+xdq7VQVNMHu4USoskG8sZs5zImMTu50kuDNln7xYqVf
|
||||
SUuN1U7cp7JouI1qkZAOsytPfAgZN/83hLObd07lAvL44jKYaHVeMmkCgYEAwVxZ
|
||||
wKXpboHwQawA+4ubsnZ36IlOk21/+FlGJiDg/LB643BS+QhgVNxuB2gL1gOCYkhl
|
||||
6BBcIhWMvZOIIo5uwnv4fQ+WfFwntU9POFViZgbZvkitQtorB7MXc/NU2BDrNYx2
|
||||
TBCiRn/9BaZ4fziW8I3Fx3xQ3rKDBXrexmrJQq0CgYEAvYGQYT12r47Qxlo0gcsL
|
||||
AA/3E/y9jwgzItglQ6eZ2ULup5C4s0wNm8Zp2s+Mlf8HjgpDi9Gf5ptU/r1N+f2Y
|
||||
awd6QvRMCSraVUr+Xkh1uV7rNNhGqPd75pT460OH7EtRtb+XsrAf3gcOjyEvGnfC
|
||||
GpCjNl4OobwvS6ELdRTM1IkCgYAHUGX4uo3k5zdeVJJI8ZP3ITIR8retLfQsQbw8
|
||||
jvvTsx1C4ynQT7fNHfVvhEkGVGWnMBPivlOt2mDTfvQkUnzwEF5q5J8NnzLFUfWu
|
||||
LNSnBVVRNFCRec0s4mJduXOZJLKw+No0sGBjCE5a21wte8eB2+sCS7qHYftAxtAM
|
||||
c1eflQKBgQDGTFsMvpM8BEPTreinTllFBdjeYchcdY/Ov9DZ3mMVopjAWRD81MKM
|
||||
zM1RCqwLkgv9FvF79B1FLJ1Inr8e/XIGdcrhE1a4sZdIWdqTWQ4xFrlDgxCquq66
|
||||
da09WVBRdvq2kVLAMaBViH2/GP1G4ZV9a8+JHuWKj+Arrr52Qeazjw==
|
||||
-----END RSA PRIVATE KEY-----
|
||||
24
plugins/inputs/socket_listener/testdata/client.pem
vendored
Normal file
24
plugins/inputs/socket_listener/testdata/client.pem
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEEjCCAfoCCQCmcronmMSqXTANBgkqhkiG9w0BAQsFADBBMQswCQYDVQQGEwJV
|
||||
UzELMAkGA1UECAwCQ0ExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNVBAoM
|
||||
BFRlc3QwHhcNMTgwNDE3MDQyNDMwWhcNNDUwOTAyMDQyNDMwWjBVMQswCQYDVQQG
|
||||
EwJVUzELMAkGA1UECAwCQ0ExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xITAfBgNV
|
||||
BAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQAD
|
||||
ggEPADCCAQoCggEBAJkbmPvRoOVeHvYQndpmHpNYynqGmYo8/PRD76Me/F86BncT
|
||||
30aLpFHE1UvVVjex8iQY0QPT/irhyPXV+O7JMcsR+d7l2b0ZhkACQWOHBNOyR7xp
|
||||
GXitYXl6FpQc2LYr3JIWu2ywX59bbQM3BQm5VHyXc7J/Vw7guCX5+KEmip9aPEc8
|
||||
tSkb8i4L7GMTAP184cgc1TMqFDrpcnuxGceH/gMEjZjRw+DoLccT6H66zDrV2drQ
|
||||
usTTnsSGV27AfXcq1FvuKVl2GmnVO35dlfmqwCY/jVcVft7Zhho5ndTIiMqT8egW
|
||||
z4O8R9tJyf2NFOle4JJWaWDtYvOEW7Uf5KgYIvUCAwEAATANBgkqhkiG9w0BAQsF
|
||||
AAOCAgEACJkccOvBavtagiMQc9OLsbo0PkHv7Qk9uTm5Sg9+LjLGUsu+3WLjAAmj
|
||||
YScHyGbvQzXlwpgo8JuwY0lMNoPfwGuydlJPfOBCbaoAqFp6Vpc/E49J9YovCsqa
|
||||
2HJUJeuxpf6SiH1Vc1SECjzwzKo03t8ul7t7SNVqA0r9fV4I936FlJOeQ4d5U+Wv
|
||||
H7c2LmAqbHi2Mwf+m+W6ziOvzp+szspcP2gJDX7hsKEtIlqmHYm2bzZ4fsCuU9xN
|
||||
3quewBVQUOuParO632yaLgzpGmfzzxLmCPO84lxarJKCxjHG2Q2l30TO/wA44m+r
|
||||
Wd17HpCT3PkCDG5eSNCSnYqfLm8DE1hLGfHiXxKmrgU94q4wvwVGOlcYa+CQeP9Q
|
||||
ZW3Tj0Axz0Mqlg1iLLo12+Z/yocSY2nFnFntBFT4qBKNCeD0xH3PxC0HJdK66xBv
|
||||
MVDE/OE2hBtTTts+vC9yjx4W8thtMSA4VCOgtt5sHjt3ZekiYYh5VZK47Bx/a0uc
|
||||
8CouRdyppWyPp/cNC+PcGW3YnXpAkxe/bSY/qgfK5kmbeOf+HzvZAIwAH/d9VK0g
|
||||
AoLNp46eP6U2E2lVvtc/HJ1C/gsiC/1TSIq/kBbYtuIJjhhH3u6IVet7WSD22Akv
|
||||
o5gOpcoKwy8IPDRC5lJEAAVYUKt7ORo2en3OVg6I4FaQmeBFp5s=
|
||||
-----END CERTIFICATE-----
|
||||
27
plugins/inputs/socket_listener/testdata/server.key
vendored
Normal file
27
plugins/inputs/socket_listener/testdata/server.key
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEAzkEDLijGOqXNQPAqUjOz5TLuM28SENauknLtcfIyEN/N6PwZ
|
||||
re5DjokxtDPp+c9yP/9qtn7+dBfdUXg2Mu7HQz8lAKniir2ZH+axkjp5LUE6vYJd
|
||||
I1W8lOOc0kXDjozBetgriE0jkgc3v9oDBbLhN5waKR86jpQaNkfnI7/4U3yrlymK
|
||||
yaT3uD6L1ldUJubdQ/xc1HxdmX8VewBnkK1urYyiRbju2iL9YmtSM72yWXvFsD1O
|
||||
I4fP/XuiaymicBmXKL4cu6KYdfn1qeLAV3U35xG597M031WmR5o67rc63sqs+Q//
|
||||
V3dbGqnFXRMkLhoOnuKK0DD28ujY1kctbNQWVQIDAQABAoIBAHFxFJy41H7BXulO
|
||||
rxhTU6jGoHktqBQW4CGwkKTRf3QEhK6WqlEd8Y5eKzZgL1q1HLPSehEyPCYCUjpT
|
||||
EgxlhLeZ7XI1/mIs8iG3swconimj7Pj60Nt0dqq1njWRJYQsKua0Kw1m0B+rVKBy
|
||||
+qKRxondlA32HTD6iIg+eAUTuzO/KzimZcyL9hiT/g6aN9k0H5+qURi8dO7VV8fD
|
||||
zvP8Y+oOGLwW2ccp+ZjFQizjTOkL4lgldr0hsGQXZJNHL94fA7jPdAxAUbnTicMJ
|
||||
oXM++L3eCwIVabipGxxlqCMj9Dn8yfbQvRGzP2e76QDeROYZHX4osH6vLcZEjx9i
|
||||
tJ4J+ekCgYEA82kKzkSKmFo4gZxnqAywlfZ2X2PADuMmHdqdiDFwt54orlMlKf/b
|
||||
wVSvN/djLXwvFHuyzFmJeMFSHKFkYVTOsh8kPSETAIGkcJEMHD3viYn7DwjkQudY
|
||||
vB/FpBWSiDT0T7qDUCzW3iMbx/JvTUSp7uO4ZuwOu6t6v3PEZwIChQ8CgYEA2Ov9
|
||||
FXHmm7sS54HgvZd6Wk8zLMLIDnyMmECjtYOasJ9c40yQHpRlXsb+Dzn/2xhMMwth
|
||||
Bln2hIiJ/e+G0bzFu4x0cItRPOQeRNyz5Pal8EsATeUwcX4KRKOZaUpDkV6XV1L0
|
||||
r/HSk/wed+90B74sGoJY1qsFflOATIUVs7SIllsCgYEAwhGSB/sl9WqZet1U1+um
|
||||
LyqeHlfNnREGJu9Sgm/Iyt1S2gp4qw/QCkiWmyym6nEEqHQnjj4lGR4pdaJIAkI3
|
||||
ulSR9BsWp2S10voSicHn5eUZQld4hs8lNHiwf66jce2mjJrMb3QQrHOZhsWIcDa6
|
||||
tjjhoU28QWzrJRIMGYTEtYkCgYA17NSJlDsj06mra5oXB6Ue9jlekz1wfH3nC4qn
|
||||
AQRfi/5ncw0QzQs2OHnIBz8XlD69IcMI9SxXXioPuo/la+wr54q6v6d+X6c2rzb5
|
||||
YGd4CO0WcDdOv2qGDbWBezi41q8AwlqZsqAKsc5ROnG5ywjjviufkfxXnyJx41O1
|
||||
zNd3qQKBgGEy+EwUXD5iGeQxdCDnd6iVu14SoBscHO5SpIeDu3DIhnu+7gPq2VMg
|
||||
Vp9j/iNVtEA3HyYCOeXc2rz9Di1wwt3YijED4birLAkC5YW6YB9rmLMfCNc1EyLh
|
||||
BKAkUQN3D+XCN4pXdbKvbkOcfYRUHoD+pPBjRYH020OtPBUc6Wkl
|
||||
-----END RSA PRIVATE KEY-----
|
||||
25
plugins/inputs/socket_listener/testdata/server.pem
vendored
Normal file
25
plugins/inputs/socket_listener/testdata/server.pem
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEJjCCAg4CCQCmcronmMSqXDANBgkqhkiG9w0BAQsFADBBMQswCQYDVQQGEwJV
|
||||
UzELMAkGA1UECAwCQ0ExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNVBAoM
|
||||
BFRlc3QwHhcNMTgwNDE3MDQyNDAwWhcNNDUwOTAyMDQyNDAwWjBpMQswCQYDVQQG
|
||||
EwJVUzELMAkGA1UECAwCQ0ExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xITAfBgNV
|
||||
BAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDESMBAGA1UEAwwJMTI3LjAuMC4x
|
||||
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzkEDLijGOqXNQPAqUjOz
|
||||
5TLuM28SENauknLtcfIyEN/N6PwZre5DjokxtDPp+c9yP/9qtn7+dBfdUXg2Mu7H
|
||||
Qz8lAKniir2ZH+axkjp5LUE6vYJdI1W8lOOc0kXDjozBetgriE0jkgc3v9oDBbLh
|
||||
N5waKR86jpQaNkfnI7/4U3yrlymKyaT3uD6L1ldUJubdQ/xc1HxdmX8VewBnkK1u
|
||||
rYyiRbju2iL9YmtSM72yWXvFsD1OI4fP/XuiaymicBmXKL4cu6KYdfn1qeLAV3U3
|
||||
5xG597M031WmR5o67rc63sqs+Q//V3dbGqnFXRMkLhoOnuKK0DD28ujY1kctbNQW
|
||||
VQIDAQABMA0GCSqGSIb3DQEBCwUAA4ICAQCVgzqFrehoRAMFLMEL8avfokYtsSYc
|
||||
50Yug4Es0ISo/PRWGeUnv8k1inyE3Y1iR/gbN5n/yjLXJKEflan6BuqGuukfr2eA
|
||||
fRdDCyPvzQLABdxCx2n6ByQFxj92z82tizf35R2OMuHHWzTckta+7s5EvxwIiUsd
|
||||
rUuXp+0ltJzlYYW9xTGFiJO9hAbRgMgZiwL8F7ayic8GmLQ1eRK/DfKDCOH3afeX
|
||||
MNN5FulgjqNyhXHF33vwgIJynGDg2JEhkWjB1DkUAxll0+SMQoYyVGZVrQSGbGw1
|
||||
JhOLc8C8bTzfK3qcJDuyldvjiut+To+lpu76R0u0+sn+wxQFL1uCWuAbMJgGsJgM
|
||||
ARavu2XDeae9X+e8MgJuN1FYS3tihBplPjMJD3UYRybRvHAvQh26BZ7Ch3JNSNST
|
||||
AL2l5T7JKU+XaWWeo+crV+AnGIJyqyh9Su/n97PEoZoEMGH4Kcl/n/w2Jms60+5s
|
||||
K0FK2OGNL42ddUfQiVL9CwYQQo70hydjsIo1x8S6+tSFLMAAysQEToSjfAA6qxDu
|
||||
fgGVMuIYHo0rSkpTVsHVwru08Z5o4m+XDAK0iHalZ4knKsO0lJ+9l7vFnQHlzwt7
|
||||
JTjDhnyOKWPIANeWf3PrHPWE7kKpFVBqFBzOvWLJuxDu5NlgLo1PFahsahTqB9bz
|
||||
qwUyMg/oYWnwqw==
|
||||
-----END CERTIFICATE-----
|
||||
@@ -59,7 +59,7 @@ func TestConcurrentConns(t *testing.T) {
|
||||
require.NoError(t, listener.Start(acc))
|
||||
defer listener.Stop()
|
||||
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
time.Sleep(time.Millisecond * 250)
|
||||
_, err := net.Dial("tcp", "127.0.0.1:8125")
|
||||
assert.NoError(t, err)
|
||||
_, err = net.Dial("tcp", "127.0.0.1:8125")
|
||||
@@ -72,7 +72,7 @@ func TestConcurrentConns(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
_, err = conn.Write([]byte(testMsg))
|
||||
assert.NoError(t, err)
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
assert.Zero(t, acc.NFields())
|
||||
}
|
||||
|
||||
@@ -89,7 +89,7 @@ func TestConcurrentConns1(t *testing.T) {
|
||||
require.NoError(t, listener.Start(acc))
|
||||
defer listener.Stop()
|
||||
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
time.Sleep(time.Millisecond * 250)
|
||||
_, err := net.Dial("tcp", "127.0.0.1:8125")
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -100,7 +100,7 @@ func TestConcurrentConns1(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
_, err = conn.Write([]byte(testMsg))
|
||||
assert.NoError(t, err)
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
assert.Zero(t, acc.NFields())
|
||||
}
|
||||
|
||||
@@ -116,7 +116,7 @@ func TestCloseConcurrentConns(t *testing.T) {
|
||||
acc := &testutil.Accumulator{}
|
||||
require.NoError(t, listener.Start(acc))
|
||||
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
time.Sleep(time.Millisecond * 250)
|
||||
_, err := net.Dial("tcp", "127.0.0.1:8125")
|
||||
assert.NoError(t, err)
|
||||
_, err = net.Dial("tcp", "127.0.0.1:8125")
|
||||
@@ -141,7 +141,7 @@ func BenchmarkUDP(b *testing.B) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
time.Sleep(time.Millisecond * 250)
|
||||
conn, err := net.Dial("udp", "127.0.0.1:8125")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -172,7 +172,7 @@ func BenchmarkTCP(b *testing.B) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
time.Sleep(time.Millisecond * 250)
|
||||
conn, err := net.Dial("tcp", "127.0.0.1:8125")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
||||
@@ -33,6 +33,9 @@ type Sysstat struct {
|
||||
// Sadc represents the path to the sadc collector utility.
|
||||
Sadc string `toml:"sadc_path"`
|
||||
|
||||
// Force the execution time of sadc
|
||||
SadcInterval internal.Duration `toml:"sadc_interval"`
|
||||
|
||||
// Sadf represents the path to the sadf cmd.
|
||||
Sadf string `toml:"sadf_path"`
|
||||
|
||||
@@ -136,6 +139,11 @@ func (*Sysstat) SampleConfig() string {
|
||||
}
|
||||
|
||||
func (s *Sysstat) Gather(acc telegraf.Accumulator) error {
|
||||
if s.SadcInterval.Duration != 0 {
|
||||
// Collect interval is calculated as interval - parseInterval
|
||||
s.interval = int(s.SadcInterval.Duration.Seconds()) + parseInterval
|
||||
}
|
||||
|
||||
if s.interval == 0 {
|
||||
if firstTimestamp.IsZero() {
|
||||
firstTimestamp = time.Now()
|
||||
|
||||
@@ -20,7 +20,7 @@ This plugin gathers stats from [Varnish HTTP Cache](https://varnish-cache.org/)
|
||||
|
||||
## Optional name for the varnish instance (or working directory) to query
|
||||
## Usually appened after -n in varnish cli
|
||||
# instance_name = instanceName
|
||||
#name = instanceName
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
@@ -48,7 +48,7 @@ var sampleConfig = `
|
||||
|
||||
## Optional name for the varnish instance (or working directory) to query
|
||||
## Usually appened after -n in varnish cli
|
||||
# instance_name = instanceName
|
||||
#name = instanceName
|
||||
`
|
||||
|
||||
func (s *Varnish) Description() string {
|
||||
|
||||
@@ -192,6 +192,14 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error {
|
||||
var size uint32 = uint32(unsafe.Sizeof(PDH_FMT_COUNTERVALUE_ITEM_DOUBLE{}))
|
||||
var emptyBuf [1]PDH_FMT_COUNTERVALUE_ITEM_DOUBLE // need at least 1 addressable null ptr.
|
||||
|
||||
type InstanceGrouping struct {
|
||||
name string
|
||||
instance string
|
||||
objectname string
|
||||
}
|
||||
|
||||
var collectFields = make(map[InstanceGrouping]map[string]interface{})
|
||||
|
||||
// For iterate over the known metrics and get the samples.
|
||||
for _, metric := range m.itemCache {
|
||||
// collect
|
||||
@@ -231,20 +239,22 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
if add {
|
||||
fields := make(map[string]interface{})
|
||||
tags := make(map[string]string)
|
||||
if s != "" {
|
||||
tags["instance"] = s
|
||||
}
|
||||
tags["objectname"] = metric.objectName
|
||||
fields[sanitizedChars.Replace(metric.counter)] =
|
||||
float32(c.FmtValue.DoubleValue)
|
||||
|
||||
measurement := sanitizedChars.Replace(metric.measurement)
|
||||
if measurement == "" {
|
||||
measurement = "win_perf_counters"
|
||||
}
|
||||
acc.AddFields(measurement, fields, tags)
|
||||
var instance = InstanceGrouping{measurement, s, metric.objectName}
|
||||
|
||||
if collectFields[instance] == nil {
|
||||
collectFields[instance] = make(map[string]interface{})
|
||||
}
|
||||
collectFields[instance][sanitizedChars.Replace(metric.counter)] = float32(c.FmtValue.DoubleValue)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -257,6 +267,14 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
}
|
||||
|
||||
for instance, fields := range collectFields {
|
||||
var tags = map[string]string{
|
||||
"instance": instance.instance,
|
||||
"objectname": instance.objectname,
|
||||
}
|
||||
acc.AddFields(instance.name, fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -29,6 +29,11 @@ const (
|
||||
DefaultShutdownTimeout = 5
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultNetwork is the network to listen on; use only in tests.
|
||||
DefaultNetwork = "tcp"
|
||||
)
|
||||
|
||||
// Recorder represents a type which can record zipkin trace data as well as
|
||||
// any accompanying errors, and process that data.
|
||||
type Recorder interface {
|
||||
@@ -94,7 +99,7 @@ func (z *Zipkin) Start(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
addr := ":" + strconv.Itoa(z.Port)
|
||||
ln, err := net.Listen("tcp", addr)
|
||||
ln, err := net.Listen(DefaultNetwork, addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -557,6 +557,10 @@ func TestZipkinPlugin(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
// Workaround for Go 1.8
|
||||
// https://github.com/golang/go/issues/18806
|
||||
DefaultNetwork = "tcp4"
|
||||
|
||||
z := &Zipkin{
|
||||
Path: "/api/v1/spans",
|
||||
Port: 0,
|
||||
|
||||
@@ -177,8 +177,6 @@ func BuildMetricDatum(point telegraf.Metric) []*cloudwatch.MetricDatum {
|
||||
value = float64(t)
|
||||
case int64:
|
||||
value = float64(t)
|
||||
case uint64:
|
||||
value = float64(t)
|
||||
case float64:
|
||||
value = t
|
||||
case bool:
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -17,8 +16,6 @@ import (
|
||||
_ "github.com/jackc/pgx/stdlib"
|
||||
)
|
||||
|
||||
const MaxInt64 = int64(^uint64(0) >> 1)
|
||||
|
||||
type CrateDB struct {
|
||||
URL string
|
||||
Timeout internal.Duration
|
||||
@@ -118,19 +115,11 @@ func escapeValue(val interface{}) (string, error) {
|
||||
switch t := val.(type) {
|
||||
case string:
|
||||
return escapeString(t, `'`), nil
|
||||
case int64, float64:
|
||||
// We don't handle uint, uint32 and uint64 here because CrateDB doesn't
|
||||
// seem to support unsigned types. But it seems like input plugins don't
|
||||
// produce those types, so it's hopefully ok.
|
||||
case int, int32, int64, float32, float64:
|
||||
return fmt.Sprint(t), nil
|
||||
case uint64:
|
||||
// The long type is the largest integer type in CrateDB and is the
|
||||
// size of a signed int64. If our value is too large send the largest
|
||||
// possible value.
|
||||
if t <= uint64(MaxInt64) {
|
||||
return strconv.FormatInt(int64(t), 10), nil
|
||||
} else {
|
||||
return strconv.FormatInt(MaxInt64, 10), nil
|
||||
}
|
||||
case bool:
|
||||
return strconv.FormatBool(t), nil
|
||||
case time.Time:
|
||||
// see https://crate.io/docs/crate/reference/sql/data_types.html#timestamp
|
||||
return escapeValue(t.Format("2006-01-02T15:04:05.999-0700"))
|
||||
|
||||
@@ -111,12 +111,12 @@ func Test_escapeValue(t *testing.T) {
|
||||
{`foo`, `'foo'`},
|
||||
{`foo'bar 'yeah`, `'foo''bar ''yeah'`},
|
||||
// int types
|
||||
{123, `123`}, // int
|
||||
{int64(123), `123`},
|
||||
{uint64(123), `123`},
|
||||
{uint64(MaxInt64) + 1, `9223372036854775807`},
|
||||
{true, `true`},
|
||||
{false, `false`},
|
||||
{int32(123), `123`},
|
||||
// float types
|
||||
{123.456, `123.456`},
|
||||
{float32(123.456), `123.456`}, // floating point SNAFU
|
||||
{float64(123.456), `123.456`},
|
||||
// time.Time
|
||||
{time.Date(2017, 8, 7, 16, 44, 52, 123*1000*1000, time.FixedZone("Dreamland", 5400)), `'2017-08-07T16:44:52.123+0130'`},
|
||||
|
||||
@@ -179,9 +179,13 @@ func verifyValue(v interface{}) bool {
|
||||
|
||||
func (p *Point) setValue(v interface{}) error {
|
||||
switch d := v.(type) {
|
||||
case int:
|
||||
p[1] = float64(int(d))
|
||||
case int32:
|
||||
p[1] = float64(int32(d))
|
||||
case int64:
|
||||
p[1] = float64(d)
|
||||
case uint64:
|
||||
p[1] = float64(int64(d))
|
||||
case float32:
|
||||
p[1] = float64(d)
|
||||
case float64:
|
||||
p[1] = float64(d)
|
||||
@@ -191,7 +195,7 @@ func (p *Point) setValue(v interface{}) error {
|
||||
p[1] = float64(1)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("undeterminable field type: %T", v)
|
||||
return fmt.Errorf("undeterminable type")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -168,30 +168,6 @@ func TestBuildPoint(t *testing.T) {
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestMetric(int64(0), "test int64"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
0.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestMetric(uint64(0), "test uint64"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
0.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestMetric(true, "test bool"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
1.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
}
|
||||
for _, tt := range tagtests {
|
||||
pt, err := buildMetrics(tt.ptIn)
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
type File struct {
|
||||
Files []string
|
||||
|
||||
writer io.Writer
|
||||
writers []io.Writer
|
||||
closers []io.Closer
|
||||
|
||||
serializer serializers.Serializer
|
||||
@@ -35,15 +35,13 @@ func (f *File) SetSerializer(serializer serializers.Serializer) {
|
||||
}
|
||||
|
||||
func (f *File) Connect() error {
|
||||
writers := []io.Writer{}
|
||||
|
||||
if len(f.Files) == 0 {
|
||||
f.Files = []string{"stdout"}
|
||||
}
|
||||
|
||||
for _, file := range f.Files {
|
||||
if file == "stdout" {
|
||||
writers = append(writers, os.Stdout)
|
||||
f.writers = append(f.writers, os.Stdout)
|
||||
} else {
|
||||
var of *os.File
|
||||
var err error
|
||||
@@ -56,11 +54,10 @@ func (f *File) Connect() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writers = append(writers, of)
|
||||
f.writers = append(f.writers, of)
|
||||
f.closers = append(f.closers, of)
|
||||
}
|
||||
}
|
||||
f.writer = io.MultiWriter(writers...)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -90,17 +87,21 @@ func (f *File) Write(metrics []telegraf.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var writeErr error = nil
|
||||
for _, metric := range metrics {
|
||||
b, err := f.serializer.Serialize(metric)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to serialize message: %s", err)
|
||||
}
|
||||
_, err = f.writer.Write(b)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write message: %s, %s", b, err)
|
||||
|
||||
for _, writer := range f.writers {
|
||||
_, err = writer.Write(b)
|
||||
if err != nil && writer != os.Stdout {
|
||||
writeErr = fmt.Errorf("E! failed to write message: %s, %s", b, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return writeErr
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -11,6 +11,7 @@ This InfluxDB output plugin writes metrics to the [InfluxDB](https://github.com/
|
||||
##
|
||||
## Multiple URLs can be specified for a single cluster, only ONE of the
|
||||
## urls will be written to each interval.
|
||||
# urls = ["unix:///var/run/influxdb.sock"]
|
||||
# urls = ["udp://127.0.0.1:8089"]
|
||||
# urls = ["http://127.0.0.1:8086"]
|
||||
|
||||
@@ -23,10 +24,11 @@ This InfluxDB output plugin writes metrics to the [InfluxDB](https://github.com/
|
||||
# skip_database_creation = false
|
||||
|
||||
## Name of existing retention policy to write to. Empty string writes to
|
||||
## the default retention policy.
|
||||
## the default retention policy. Only takes effect when using HTTP.
|
||||
# retention_policy = ""
|
||||
|
||||
## Write consistency (clusters only), can be: "any", "one", "quorum", "all"
|
||||
## Write consistency (clusters only), can be: "any", "one", "quorum", "all".
|
||||
## Only takes effect when using HTTP.
|
||||
# write_consistency = "any"
|
||||
|
||||
## Timeout for HTTP messages.
|
||||
@@ -42,7 +44,7 @@ This InfluxDB output plugin writes metrics to the [InfluxDB](https://github.com/
|
||||
## UDP payload size is the maximum packet size to send.
|
||||
# udp_payload = 512
|
||||
|
||||
## Optional SSL Config
|
||||
## Optional SSL Config for use on HTTP connections.
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
|
||||
@@ -84,10 +84,11 @@ var sampleConfig = `
|
||||
# skip_database_creation = false
|
||||
|
||||
## Name of existing retention policy to write to. Empty string writes to
|
||||
## the default retention policy.
|
||||
## the default retention policy. Only takes effect when using HTTP.
|
||||
# retention_policy = ""
|
||||
|
||||
## Write consistency (clusters only), can be: "any", "one", "quorum", "all"
|
||||
## Write consistency (clusters only), can be: "any", "one", "quorum", "all".
|
||||
## Only takes effect when using HTTP.
|
||||
# write_consistency = "any"
|
||||
|
||||
## Timeout for HTTP messages.
|
||||
@@ -103,7 +104,7 @@ var sampleConfig = `
|
||||
## UDP payload size is the maximum packet size to send.
|
||||
# udp_payload = 512
|
||||
|
||||
## Optional SSL Config
|
||||
## Optional SSL Config for use on HTTP connections.
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
|
||||
@@ -137,15 +137,12 @@ func TestConnectHTTPConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWriteRecreateDatabaseIfDatabaseNotFound(t *testing.T) {
|
||||
var createDatabaseCalled bool
|
||||
|
||||
output := influxdb.InfluxDB{
|
||||
URLs: []string{"http://localhost:8086"},
|
||||
|
||||
CreateHTTPClientF: func(config *influxdb.HTTPConfig) (influxdb.Client, error) {
|
||||
return &MockClient{
|
||||
CreateDatabaseF: func(ctx context.Context) error {
|
||||
createDatabaseCalled = true
|
||||
return nil
|
||||
},
|
||||
WriteF: func(ctx context.Context, metrics []telegraf.Metric) error {
|
||||
@@ -178,7 +175,6 @@ func TestWriteRecreateDatabaseIfDatabaseNotFound(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
metrics := []telegraf.Metric{m}
|
||||
|
||||
createDatabaseCalled = false
|
||||
err = output.Write(metrics)
|
||||
// We only have one URL, so we expect an error
|
||||
require.Error(t, err)
|
||||
|
||||
@@ -197,7 +197,7 @@ func TestUDP_SerializeError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUDP_WriteWithRealConn(t *testing.T) {
|
||||
conn, err := net.ListenPacket("udp", ":0")
|
||||
conn, err := net.ListenPacket("udp", "127.0.0.0:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
# Kafka Output Plugin
|
||||
# Kafka Producer Output Plugin
|
||||
|
||||
This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.html) acting a Kafka Producer.
|
||||
|
||||
### Configuration:
|
||||
```toml
|
||||
```
|
||||
[[outputs.kafka]]
|
||||
## URLs of kafka brokers
|
||||
brokers = ["localhost:9092"]
|
||||
@@ -46,7 +45,7 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm
|
||||
## 0 : No compression
|
||||
## 1 : Gzip compression
|
||||
## 2 : Snappy compression
|
||||
# compression_codec = 0
|
||||
compression_codec = 0
|
||||
|
||||
## RequiredAcks is used in Produce Requests to tell the broker how many
|
||||
## replica acknowledgements it must see before responding
|
||||
@@ -62,11 +61,10 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm
|
||||
## received the data. This option provides the best durability, we
|
||||
## guarantee that no messages will be lost as long as at least one in
|
||||
## sync replica remains.
|
||||
# required_acks = -1
|
||||
required_acks = -1
|
||||
|
||||
## The maximum number of times to retry sending a metric before failing
|
||||
## until the next flush.
|
||||
# max_retry = 3
|
||||
## The total number of times to retry sending a message
|
||||
max_retry = 3
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
@@ -79,21 +77,24 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm
|
||||
# sasl_username = "kafka"
|
||||
# sasl_password = "secret"
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
# data_format = "influx"
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
#### `max_retry`
|
||||
### Required parameters:
|
||||
|
||||
This option controls the number of retries before a failure notification is
|
||||
displayed for each message when no acknowledgement is received from the
|
||||
broker. When the setting is greater than `0`, message latency can be reduced,
|
||||
duplicate messages can occur in cases of transient errors, and broker loads
|
||||
can increase during downtime.
|
||||
* `brokers`: List of strings, this is for speaking to a cluster of `kafka` brokers. On each flush interval, Telegraf will randomly choose one of the urls to write to. Each URL should just include host and port e.g. -> `["{host}:{port}","{host2}:{port2}"]`
|
||||
* `topic`: The `kafka` topic to publish to.
|
||||
|
||||
The option is similar to the
|
||||
[retries](https://kafka.apache.org/documentation/#producerconfigs) Producer
|
||||
option in the Java Kafka Producer.
|
||||
### Optional parameters:
|
||||
|
||||
* `routing_tag`: If this tag exists, its value will be used as the routing key
|
||||
* `compression_codec`: What level of compression to use: `0` -> no compression, `1` -> gzip compression, `2` -> snappy compression
|
||||
* `required_acks`: a setting for how may `acks` required from the `kafka` broker cluster.
|
||||
* `max_retry`: Max number of times to retry failed write
|
||||
* `ssl_ca`: SSL CA
|
||||
* `ssl_cert`: SSL CERT
|
||||
* `ssl_key`: SSL key
|
||||
* `insecure_skip_verify`: Use SSL but skip chain & host verification (default: false)
|
||||
* `data_format`: [About Telegraf data formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md)
|
||||
* `topic_suffix`: Which, if any, method of calculating `kafka` topic suffix to use.
|
||||
For examples, please refer to sample configuration.
|
||||
@@ -113,7 +113,7 @@ var sampleConfig = `
|
||||
## 0 : No compression
|
||||
## 1 : Gzip compression
|
||||
## 2 : Snappy compression
|
||||
# compression_codec = 0
|
||||
compression_codec = 0
|
||||
|
||||
## RequiredAcks is used in Produce Requests to tell the broker how many
|
||||
## replica acknowledgements it must see before responding
|
||||
@@ -129,11 +129,10 @@ var sampleConfig = `
|
||||
## received the data. This option provides the best durability, we
|
||||
## guarantee that no messages will be lost as long as at least one in
|
||||
## sync replica remains.
|
||||
# required_acks = -1
|
||||
required_acks = -1
|
||||
|
||||
## The maximum number of times to retry sending a metric before failing
|
||||
## until the next flush.
|
||||
# max_retry = 3
|
||||
## The total number of times to retry sending a message
|
||||
max_retry = 3
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
@@ -150,7 +149,7 @@ var sampleConfig = `
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
# data_format = "influx"
|
||||
data_format = "influx"
|
||||
`
|
||||
|
||||
func ValidateTopicSuffixMethod(method string) error {
|
||||
|
||||
@@ -107,6 +107,7 @@ func (l *Librato) Write(metrics []telegraf.Metric) error {
|
||||
for _, gauge := range gauges {
|
||||
tempGauges = append(tempGauges, gauge)
|
||||
log.Printf("D! Got a gauge: %v\n", gauge)
|
||||
|
||||
}
|
||||
} else {
|
||||
log.Printf("I! unable to build Gauge for %s, skipping\n", m.Name())
|
||||
@@ -233,18 +234,16 @@ func verifyValue(v interface{}) bool {
|
||||
|
||||
func (g *Gauge) setValue(v interface{}) error {
|
||||
switch d := v.(type) {
|
||||
case int:
|
||||
g.Value = float64(int(d))
|
||||
case int32:
|
||||
g.Value = float64(int32(d))
|
||||
case int64:
|
||||
g.Value = float64(int64(d))
|
||||
case uint64:
|
||||
case float32:
|
||||
g.Value = float64(d)
|
||||
case float64:
|
||||
g.Value = float64(d)
|
||||
case bool:
|
||||
if d {
|
||||
g.Value = float64(1.0)
|
||||
} else {
|
||||
g.Value = float64(0.0)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("undeterminable type %+v", d)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user