Compare commits

...

185 Commits

Author SHA1 Message Date
Gunnar Aasen
4b8d0ad35d Additional code cleanup 2018-05-24 09:45:45 -07:00
Gunnar Aasen
78e5f52966 Revert Godeps changes 2018-05-24 09:45:45 -07:00
Gunnar Aasen
79b6edadd2 Refactor to use AggregatingOutput 2018-05-24 09:44:16 -07:00
Gunnar Aasen
9490a22aeb Output: Azure Monitor: Cleanup and add README 2018-05-24 09:44:16 -07:00
Gunnar Aasen
17093efad5 Output: Azure Monitor: Initial aggregated metric implementation 2018-05-24 09:44:16 -07:00
Mark Simms
d077f5dbc7 Starting on azure monitor metrics integration with MSI auth 2018-05-24 09:42:31 -07:00
Daniel Nelson
6cea487bfc Add idea for an output that aggregates before adding to metric buffer 2018-05-24 09:42:31 -07:00
Daniel Nelson
14d97e5416 Add special syslog timestamp parser that uses current year (#4190)
Previously it was impossible to parse syslog timestamps without the date
being reported as year 0, due to the year not being specified
2018-05-23 16:37:14 -07:00
Daniel Nelson
44e3b9bee3 Update changelog 2018-05-23 14:30:55 -07:00
Daniel Nelson
7f93911f43 Add converter processor (#4178) 2018-05-23 14:29:57 -07:00
Daniel Nelson
f417cec036 Update changelog 2018-05-23 14:28:59 -07:00
Daniel Nelson
dbd02ebb74 Add support for TLS and username/password auth to aerospike input (#4183) 2018-05-23 14:28:17 -07:00
Daniel Nelson
fbf09409e9 Update changelog 2018-05-23 12:26:17 -07:00
Daniel Nelson
54728f54c6 Update unbound README 2018-05-23 12:22:25 -07:00
Rodrigo Pereira
83b03ecb18 Add option to unbound module to use threads as tags (#3969) 2018-05-23 12:03:49 -07:00
Daniel Nelson
8826cdc423 Revert "Add tengine input plugin (#4160)"
This reverts commit 697d8ceae5.
2018-05-23 11:58:22 -07:00
arterforyou
697d8ceae5 Add tengine input plugin (#4160) 2018-05-23 11:19:50 -07:00
Daniel Nelson
089eb2c8d6 Update changelog 2018-05-22 14:53:21 -07:00
Arkady Emelyanov
fd22b1ef1f Add burrow input plugin (#3489) 2018-05-22 14:10:41 -07:00
Daniel Nelson
a86c2d54ad Update changelog 2018-05-22 14:00:52 -07:00
Daniel Nelson
daacfc6368 Add timeout option to sensors input (#4162) 2018-05-22 13:59:59 -07:00
Daniel Nelson
795c8057ad Update changelog 2018-05-21 16:40:30 -07:00
Daniel Nelson
6a21e23bcc Update graphite output dataf format docs 2018-05-21 16:39:33 -07:00
Daniel Nelson
0d21296aed Expose graphite_tag_support option in graphite output data format 2018-05-21 16:39:15 -07:00
Pavel Boev
7660315e45 Add support for Graphite 1.1.x tags (#4165) 2018-05-21 15:59:56 -07:00
Daniel Nelson
703be4f124 Add regex processor to readme and changelog 2018-05-21 15:48:22 -07:00
Alexander Shepelin
ccc4a85fd6 Add regex processor plugin (#3839) 2018-05-21 15:46:10 -07:00
Daniel Nelson
3be9cad309 Set release date for 1.6.3 2018-05-21 12:43:52 -07:00
Daniel Nelson
45c1a45f4a Add aurora input to changelog and readme 2018-05-21 12:01:58 -07:00
Daniel Nelson
1a407ceaf9 Add aurora input plugin (#4158) 2018-05-21 11:59:39 -07:00
Daniel Nelson
61a0e500a8 Update changelog 2018-05-21 10:43:57 -07:00
Arkady Emelyanov
7f46aafcd6 Fix waitgroup deadlock if url is incorrect in apache input (#4176) 2018-05-21 10:43:02 -07:00
Daniel Nelson
3072b5a493 Update mqtt output docs and changelog 2018-05-18 19:03:00 -07:00
jvrahav
81f5a41bc9 Add batch mode to mqtt output (#4094) 2018-05-18 18:55:02 -07:00
Daniel Nelson
a688eefd1c Update changelog 2018-05-18 18:52:32 -07:00
Feliksas The Lion
1a8786712c Added 3 important elasticsearch cluster health metrics (#4167) 2018-05-18 18:49:23 -07:00
Daniel Nelson
339cebbc21 Use -parallel=false in gdm to avoid issues on appveyor 2018-05-17 15:19:17 -07:00
Daniel Nelson
b62f7a3c68 Return to using latest image file on appveyor 2018-05-17 14:38:21 -07:00
Daniel Nelson
cce4f520bd Update changelog 2018-05-17 14:25:35 -07:00
Leszek Charkiewicz
6d73cb85cc Add consul service tags to metric (#4155) 2018-05-17 14:24:51 -07:00
Daniel Nelson
2948dec6f5 Update changelog and docs for application_insights plugin 2018-05-15 16:42:56 -07:00
Karol Zadora-Przylecki
863af9d1d4 Add Microsoft Application Insights output plugin (#4010) 2018-05-15 16:05:59 -07:00
Daniel Nelson
99033241c4 Update changelog 2018-05-15 15:55:38 -07:00
Daniel Nelson
e45822e2e2 Fix librato output support for uint and bool (#4151) 2018-05-15 15:54:20 -07:00
Daniel Nelson
0eba72d2c0 Add http output to changelog/readme 2018-05-14 17:19:49 -07:00
Daniel Nelson
d5f57715dc Add method, basic auth, and tls support to http output 2018-05-14 17:18:07 -07:00
Dark
190a4128c5 Add HTTP output plugin (#2491) 2018-05-14 17:15:40 -07:00
Daniel Nelson
d19a33dd6f Update changelog 2018-05-14 11:01:24 -07:00
Daniel Nelson
0af40a8a5d Fix dropwizard parsing error for metrics that need escaped (#4142)
If the dropwizard parser cannot convert the metric name into a valid
line protocol series then we will accept the name as is.
2018-05-14 11:00:03 -07:00
Daniel Nelson
558caf57de Update sample config 2018-05-11 18:18:53 -07:00
Daniel Nelson
18db718d7f Add jti_openconfig_telemetry to docs 2018-05-11 18:16:52 -07:00
Ajay Kumar Chintala
fdd899e9d4 Add service input plugin for OpenConfig streaming telemetry (#2292) 2018-05-11 17:58:19 -07:00
Daniel Nelson
7e0e664860 Update changelog 2018-05-11 17:50:46 -07:00
Daniel Nelson
5030373a4c Reuse transport on next interval in jolokia agent (#4137) 2018-05-11 17:48:27 -07:00
Daniel Nelson
5b599337a3 Use internal.Duration for jolokia timeouts (#4136) 2018-05-11 17:47:38 -07:00
Daniel Nelson
2add516eee Update changelog 2018-05-09 11:56:59 -07:00
Oleksandr Vilchynskyy
1f29612918 Update aerospike-client-go version to latest release (#4128) 2018-05-09 11:54:00 -07:00
Daniel Nelson
851efc9ca0 Update changelog 2018-05-08 16:40:42 -07:00
Daniel Nelson
fa04e539ff Merge branch 'update_net_response' 2018-05-08 16:17:56 -07:00
Daniel Nelson
3ef28e332f Use result and result_code in net_response 2018-05-08 16:17:22 -07:00
Randy Coburn
5953db88df Add tag/integer pair for result to net_response (#3455) 2018-05-08 16:07:15 -07:00
Daniel Nelson
2bf2b51039 Skip fields that report "not supported" in nvidia-smi (#4123) 2018-05-08 13:11:12 -07:00
Daniel Nelson
2a2cc3212f Update changelog 2018-05-08 12:12:03 -07:00
Daniel Nelson
b11468757c Add uint/bool support to cratedb output (#4117) 2018-05-08 12:10:25 -07:00
Daniel Nelson
339c5d0312 Add instructions on how to repair windows performance counters 2018-05-07 18:41:05 -07:00
Daniel Nelson
1c6cfcfbab Update changelog 2018-05-07 18:19:55 -07:00
Daniel Nelson
c16ecaa124 Don't report 0ms on timeout in dns_query (#4118) 2018-05-07 18:18:01 -07:00
Daniel Nelson
ce58926feb Run apt-get update in release.sh 2018-05-07 15:12:01 -07:00
Daniel Nelson
cff2aa1863 Update changelog 2018-05-07 15:01:40 -07:00
Jake Champlin
4790a21c04 Add cursor metrics to mongodb input (#4114) 2018-05-07 15:00:24 -07:00
Daniel Nelson
21167a6232 Remove combined issue template 2018-05-07 11:43:23 -07:00
Daniel Nelson
2fe167b8a7 Update issue templates (#4116) 2018-05-07 11:38:09 -07:00
Daniel Nelson
d96bcac3ec Update changelog 2018-05-04 18:42:36 -07:00
Germán Jaber
ac9b308cee Add topk processor plugin (#4096) 2018-05-04 18:40:05 -07:00
Daniel Nelson
4c35a56edd Update changelog 2018-05-04 18:31:45 -07:00
Daniel Nelson
73c22a8189 Add SerializeBatch method to the Serializer interface (#4107) 2018-05-04 18:27:31 -07:00
Daniel Nelson
de355b76d6 Simplify testing with TLS (#4095) 2018-05-04 16:33:23 -07:00
Daniel Nelson
b2bb44363a Update kafka readme 2018-05-04 14:39:31 -07:00
Daniel Nelson
8b687a8e21 Only lowercase mysql slave metrics with metric_version = 2 2018-05-04 14:31:16 -07:00
Nicolas Steinmetz
81620c69c5 Fix name_override example in mysql rreadme (#4100) 2018-05-04 14:20:34 -07:00
Mauro Murari
3ae0c20200 Fix platform not supported error in build.py (#4102) 2018-05-04 14:18:59 -07:00
Daniel Nelson
7c0754ebe5 Move usage string to internal to fix go run 2018-05-04 14:16:21 -07:00
Daniel Nelson
757e23a5f2 Remove -i flag from make telegraf 2018-05-04 14:08:23 -07:00
Daniel Nelson
fd63591b15 Fix grammar 2018-05-03 17:26:01 -07:00
Daniel Nelson
2108582b43 Clarify max_retry option in kafka output 2018-05-03 17:22:49 -07:00
Daniel Nelson
c125cb1d27 Update gopsutil version 2018-05-03 12:32:53 -07:00
Daniel Nelson
2fb3f7a585 Update changelog 2018-05-03 11:41:18 -07:00
Daniel Meiners
9647ea88ea Ignore UTF8 BOM in JSON parser (#4099) 2018-05-03 11:40:28 -07:00
Daniel Nelson
c1d4b0b154 Update telegraf.conf 2018-05-02 11:50:11 -07:00
Daniel Nelson
239333ad90 Remove dead link from logparser sample config 2018-05-02 11:49:51 -07:00
Daniel Nelson
fd64487be5 Update changelog 2018-05-01 18:57:26 -07:00
Daniel Nelson
cff7ee8edf Fix handling of uint64 in datadog output (#4091) 2018-05-01 18:56:39 -07:00
Daniel Nelson
c03e8918a2 Update changelog, add mcrouter to README 2018-05-01 12:01:08 -07:00
Craig Thayer
83345ec2b3 Add input plugin for McRouter (#4077) 2018-05-01 11:58:15 -07:00
Daniel Nelson
f094f83da5 Update changelog 2018-04-30 19:21:12 -07:00
Mariusz Brzeski
0768022240 Support busybox ping in the ping input (#3877) 2018-04-30 19:20:13 -07:00
Daniel Nelson
92956104d6 Update changelog 2018-04-30 17:51:04 -07:00
Daniel Nelson
964856eb5f Fix win_perf_counters to collect counters per instance (#4036) 2018-04-30 17:48:45 -07:00
Daniel Nelson
377547aa4c Document one field per line requirement in logparser 2018-04-30 16:15:51 -07:00
Grégoire Bellon-Gervais
1662b6feb9 Metrics values have same names as old cassandra plugin (#4080) 2018-04-27 15:12:59 -07:00
Daniel Nelson
908170b207 Update changelog 2018-04-27 14:56:31 -07:00
Vincent Caron
ec47cab950 Use same timestamp for fields in system input (#4078) 2018-04-27 14:55:10 -07:00
Daniel Nelson
06671777e9 Update changelog 2018-04-25 19:02:00 -07:00
Adrián López
46a8bdbfe5 Add parameter to force the interval of gather for sysstat (#4068) 2018-04-25 18:59:42 -07:00
Daniel Nelson
abdff033cc Note options that only work with influxdb HTTP 2018-04-25 13:47:16 -07:00
Daniel Nelson
535e9e9a68 Update changelog 2018-04-25 13:47:16 -07:00
Jack Zampolin
c256f17870 Fix timeout parsing error in nvidia_smi (#4070) 2018-04-24 14:40:19 -07:00
Yosuke Hara
b8d5df2076 Add support for LeoFS v1.4 to leofs input (#4044) 2018-04-24 14:14:31 -07:00
Daniel Nelson
538baee8a4 Fix nightly build 2018-04-24 13:42:42 -07:00
Daniel Nelson
d3d8d52e2f Fix links to jolokia example configs 2018-04-24 12:46:40 -07:00
Daniel Nelson
286f14f730 Update changelog 2018-04-23 15:15:08 -07:00
Daniel Nelson
9f4752ba12 Add docker input server version (#4035) 2018-04-23 15:09:04 -07:00
Daniel Nelson
f639f994b5 Ignore writer error in file output (#4055) 2018-04-23 15:08:04 -07:00
Daniel Nelson
911f0e4b57 Deprecate the cassandra input plugin (#4050) 2018-04-23 15:06:26 -07:00
Daniel Nelson
86a3b8cad4 Update changelog 2018-04-23 14:01:38 -07:00
Daniel Nelson
a3500cc33a Fix handling of floats with multiple leading zeroes (#4065) 2018-04-23 13:29:49 -07:00
Daniel Nelson
bf0c59f56c Return errors in mongodb SSL/TLS configuration (#4066) 2018-04-23 13:29:12 -07:00
Fred Cox
c7b3667ac4 Add server argument as first argument in unbound input (#4062) 2018-04-23 13:27:29 -07:00
Daniel Nelson
638853be05 Update changelog 2018-04-20 18:49:55 -07:00
Daniel Nelson
ee9a2f73a1 Fix duplicate tags when overriding tag (#4056) 2018-04-20 18:39:31 -07:00
Daniel Nelson
648d7ae922 Run 32-bit tests in CircleCI 2018-04-20 15:10:22 -07:00
Daniel Nelson
13937d511d Update changelog 2018-04-20 15:05:39 -07:00
Daniel Nelson
fe4d3cd117 Fix ints being capped at 32-bits on 32-bit archs (#4054) 2018-04-20 14:56:28 -07:00
Leandro Piccilli
eacf11fcd8 Update gopsutils to include fixes for #4037 and #3750 (#4045) 2018-04-20 14:32:19 -07:00
Daniel Nelson
3a8ca4d08d Update changelog 2018-04-19 16:58:59 -07:00
Daniel Nelson
00e3363d45 Add only valid field types in cassandra input (#4048) 2018-04-19 16:56:46 -07:00
Daniel Nelson
29b37e67c2 Allow metrics to be unserializable in influx.Reader (#4047)
Metrics that are unserializable will be logged at debug level, but the
rest of the batch will be sent.  Unserializable metrics can occur during
normal operation such as if you remove all fields from a metric or the
metric cannot fit within the line size limit.
2018-04-19 16:24:31 -07:00
Daniel Nelson
42fee824f8 Update changelog 2018-04-18 16:57:15 -07:00
Daniel Nelson
120be7e87b Report available fields if utmp is unreadable (#4043) 2018-04-18 16:55:18 -07:00
Daniel Nelson
9e4a330ee5 Update github.com/gorilla/mux version (#4042) 2018-04-18 16:55:02 -07:00
Daniel Nelson
78d4a95ce6 Test using Go 1.8-1.10; official builds with 1.10 (#4041) 2018-04-18 16:14:06 -07:00
Daniel Nelson
571ce86d10 Update changelog 2018-04-18 12:14:58 -07:00
Daniel Nelson
dd2c60e620 Fix graphite serialization of unsigned ints (#4033) 2018-04-18 12:13:25 -07:00
Daniel Nelson
1486ae25c0 Tidy up last change to socket listener/writer 2018-04-17 17:48:30 -07:00
Daniel Nelson
da5b46e770 Update changelog 2018-04-17 17:36:35 -07:00
Matt
9ef902f4a1 Add snmp input option to strip non fixed length index suffixes (#4025) 2018-04-17 17:34:39 -07:00
Daniel Nelson
058510464c Update changelog 2018-04-17 17:03:18 -07:00
Bob Shannon
0b4f4b089f Add TLS support to socket_writer and socket_listener plugins (#4021) 2018-04-17 17:02:04 -07:00
Daniel Nelson
7c592558d8 Update changelog 2018-04-17 15:45:49 -07:00
James Maidment
1e1d9e8acb Update mem values to gauge (#4034) 2018-04-17 15:43:10 -07:00
Daniel Nelson
3b3d16273d Update changelog adding nvidia_smi 2018-04-17 13:43:36 -07:00
Jack Zampolin
3046f957d5 Add nvidia_smi input to monitor nvidia GPUs (#4026) 2018-04-17 13:40:55 -07:00
Daniel Nelson
bcf1cf59c1 Fix docs about outputs and fieldpass/fielddrop
This has been allowed since 1.1.0
2018-04-17 13:35:27 -07:00
Daniel Nelson
c8d2ba2bc8 Remove RateLimiter tests due to race conditions
These tests are fundamentally racy, removing to improve reliability of
test cases.
2018-04-16 18:52:52 -07:00
Daniel Nelson
04ab9a4fe4 Set 1.6 release date in changelog 2018-04-16 12:04:31 -07:00
Daniel Nelson
e4009234e9 Fix HashID conflicts in pathological cases
Use "\n" as delimiter as it cannot occur in the series name.
2018-04-12 18:09:31 -07:00
Daniel Nelson
8d516d26e9 Fix MQTT sample config 2018-04-12 14:34:55 -07:00
Daniel Nelson
0a02363c03 Update changelog 2018-04-11 16:52:40 -07:00
jvassev
2c19d74829 Prevent loading config twice in K8S (#3999)
When config dir is mounted from configmap, filepath.Walk() finds the same
.conf file twice as 20-acme.conf is a link to ..data/20-acme.conf for example.

This patch skips all folder names starting with '..' which is pretty
uncommon and mainly used by Kubernetes mounts.
2018-04-11 16:51:19 -07:00
Daniel Nelson
3f4e1af222 Add --console and --service to usage message in Windows (#3993) 2018-04-11 16:44:55 -07:00
Daniel Nelson
10c7324d74 Update changelog 2018-04-10 18:18:27 -07:00
Daniel Nelson
55cfc383f3 Allow grok pattern to contain newlines (#4005) 2018-04-10 18:16:21 -07:00
Daniel Nelson
7b8f12b377 Update changelog 2018-04-10 18:15:02 -07:00
Daniel Nelson
15f19375e7 Typesetting changes to fibaro README 2018-04-10 18:14:27 -07:00
Pierrick Brossin
93e2381f42 Add Fibaro input plugin (#2741) 2018-04-10 18:04:58 -07:00
Daniel Nelson
387bae9b9f Fix host ordering in mongodb unit tests 2018-04-10 17:24:40 -07:00
Daniel Nelson
34416e0da8 Updated changelog 2018-04-10 17:11:25 -07:00
Jake Champlin
32f56140a3 Add per-host shard metrics in mongodb input (#3819) 2018-04-10 17:10:29 -07:00
Boris Schrijver
64a23c0b18 Fix make test-ci run (#4002) 2018-04-10 15:35:58 -07:00
Daniel Nelson
af68975e2f Document that InfluxDB input metrics vary with version 2018-04-09 19:30:18 -07:00
Daniel Nelson
0223b22b3e Update changelog 2018-04-09 17:06:34 -07:00
Daniel Nelson
1890efbb70 Rename repl_oplog_window_s to repl_oplog_window_sec
To match existing metric style.
2018-04-09 17:05:45 -07:00
Daniel Nelson
e4f8a82ee6 Fix newline escaping in line protocol (#3992) 2018-04-09 15:29:52 -07:00
Daniel Nelson
a28de4b5cd Update changelog 2018-04-06 16:45:07 -07:00
Daniel Nelson
caac224276 Add details about MongoDB permissions 2018-04-06 16:43:03 -07:00
Daniel Nelson
fe31ce9d7d Modernize mongodb docs 2018-04-06 16:36:03 -07:00
Matvey Kruglov
01ede2ea0b Add repl_oplog_window_s metric to mongodb input (#3964) 2018-04-06 16:34:47 -07:00
alekseyp
fb6390e7ab Fix typo in phpfpm README (#3985) 2018-04-06 16:20:36 -07:00
Mark Wilkinson - m82labs
ff40da6019 Use explicit casts to avoid datatype issues (#3980) 2018-04-06 14:58:33 -07:00
Daniel Nelson
43a044542e Update changelog 2018-04-06 13:19:02 -07:00
Daniel Nelson
00203fa889 Export all vars defined in /etc/default/telegraf (#3981)
This keeps the format of this file the same between systemd and
sysvinit.
2018-04-06 13:17:24 -07:00
Daniel Nelson
7177e0473f Fix conversion of unsigned ints in prometheus output (#3978) 2018-04-05 16:38:41 -07:00
Daniel Nelson
252101b7c6 Update changelog 2018-04-05 11:19:01 -07:00
Daniel Nelson
efdf36746c Update gosnmp revision (#3973) 2018-04-05 11:15:20 -07:00
Daniel Nelson
df78133bf3 Log error if scheme is unsupported 2018-04-05 11:08:31 -07:00
Jeff Ashton
bf915fa79c Fix https in InfluxDB output (#3976) 2018-04-05 10:50:32 -07:00
Daniel Nelson
c160b56229 Fix build.py next_version 2018-04-04 21:53:20 -07:00
Daniel Nelson
627f0e5d9d Use automatic extension naming when running go build 2018-04-04 19:00:28 -07:00
Daniel Nelson
4551b4c5d2 Enable ntpq tests on Windows (#3972) 2018-04-04 18:35:05 -07:00
Daniel Nelson
a9afd2f030 Add config-directory documentation for Windows service 2018-04-04 16:30:22 -07:00
Daniel Nelson
caf860bc88 Don't print name of plugin or interval size during --test 2018-04-04 16:30:22 -07:00
Daniel Nelson
beeab2c509 Sort field names when running --test 2018-04-04 16:30:22 -07:00
Scott Anderson
a50acadc44 Add details about why not all logstash patterns are supported (#3971) 2018-04-04 14:42:58 -07:00
Daniel Nelson
265d0e6d84 Fix bug preventing database from being recreated (#3962) 2018-04-02 16:18:33 -07:00
Daniel Nelson
413cf6dd23 Set next version to 1.7 on master 2018-04-02 14:44:09 -07:00
258 changed files with 32505 additions and 17457 deletions

View File

@@ -1,49 +1,105 @@
---
defaults: &defaults
docker:
- image: 'circleci/golang:1.9.4'
working_directory: '/go/src/github.com/influxdata/telegraf'
defaults:
defaults: &defaults
working_directory: '/go/src/github.com/influxdata/telegraf'
go-1_8: &go-1_8
docker:
- image: 'circleci/golang:1.8.7'
go-1_9: &go-1_9
docker:
- image: 'circleci/golang:1.9.5'
go-1_10: &go-1_10
docker:
- image: 'circleci/golang:1.10.1'
version: 2
jobs:
build:
<<: *defaults
deps:
<<: [ *defaults, *go-1_10 ]
steps:
- checkout
- run: 'make deps'
- run: 'make test-ci'
release:
<<: *defaults
- persist_to_workspace:
root: '/go/src'
paths:
- '*'
test-go-1.8:
<<: [ *defaults, *go-1_8 ]
steps:
- checkout
- attach_workspace:
at: '/go/src'
- run: 'make test-ci'
test-go-1.9:
<<: [ *defaults, *go-1_9 ]
steps:
- attach_workspace:
at: '/go/src'
- run: 'make test-ci'
test-go-1.10:
<<: [ *defaults, *go-1_10 ]
steps:
- attach_workspace:
at: '/go/src'
- run: 'make test-ci'
- run: 'GOARCH=386 make test-ci'
release:
<<: [ *defaults, *go-1_10 ]
steps:
- attach_workspace:
at: '/go/src'
- run: './scripts/release.sh'
- store_artifacts:
path: './artifacts'
destination: '.'
nightly:
<<: *defaults
<<: [ *defaults, *go-1_10 ]
steps:
- checkout
- attach_workspace:
at: '/go/src'
- run: './scripts/release.sh'
- store_artifacts:
path: './artifacts'
destination: '.'
workflows:
version: 2
build_and_release:
jobs:
- 'build'
- 'deps'
- 'test-go-1.8':
requires:
- 'deps'
- 'test-go-1.9':
requires:
- 'deps'
- 'test-go-1.10':
requires:
- 'deps'
- 'release':
requires:
- 'build'
- 'test-go-1.8'
- 'test-go-1.9'
- 'test-go-1.10'
nightly:
jobs:
- 'build'
- 'deps'
- 'test-go-1.8':
requires:
- 'deps'
- 'test-go-1.9':
requires:
- 'deps'
- 'test-go-1.10':
requires:
- 'deps'
- 'nightly':
requires:
- 'build'
- 'test-go-1.8'
- 'test-go-1.9'
- 'test-go-1.10'
triggers:
- schedule:
cron: "0 18 * * *"
cron: "0 7 * * *"
filters:
branches:
only:

View File

@@ -1,44 +0,0 @@
## Directions
GitHub Issues are reserved for actionable bug reports and feature requests.
General questions should be asked at the [InfluxData Community](https://community.influxdata.com) site.
Before opening an issue, search for similar bug reports or feature requests on GitHub Issues.
If no similar issue can be found, fill out either the "Bug Report" or the "Feature Request" section below.
Erase the other section and everything on and above this line.
*Please note, the quickest way to fix a bug is to open a Pull Request.*
## Bug report
### Relevant telegraf.conf:
### System info:
[Include Telegraf version, operating system name, and other relevant details]
### Steps to reproduce:
1. ...
2. ...
### Expected behavior:
### Actual behavior:
### Additional info:
[Include gist of relevant config, logs, etc.]
## Feature Request
Opening a feature request kicks off a discussion.
### Proposal:
### Current behavior:
### Desired behavior:
### Use case: [Why is this important (helps with prioritizing requests)]

24
.github/ISSUE_TEMPLATE/Bug_report.md vendored Normal file
View File

@@ -0,0 +1,24 @@
---
name: Bug report
about: Create a report to help us improve
---
### Relevant telegraf.conf:
### System info:
[Include Telegraf version, operating system name, and other relevant details]
### Steps to reproduce:
1. ...
2. ...
### Expected behavior:
### Actual behavior:
### Additional info:
[Include gist of relevant config, logs, etc.]

View File

@@ -0,0 +1,17 @@
---
name: Feature request
about: Suggest an idea for this project
---
## Feature Request
Opening a feature request kicks off a discussion.
### Proposal:
### Current behavior:
### Desired behavior:
### Use case: [Why is this important (helps with prioritizing requests)]

1
.gitignore vendored
View File

@@ -1,3 +1,4 @@
/build
/telegraf
/telegraf.exe
/telegraf.gz

View File

@@ -1,4 +1,106 @@
## v1.6 [unreleased]
## v1.7 [unreleased]
### Release Notes
- The `cassandra` input plugin has been deprecated in favor of the `jolokia2`
input plugin which is much more configurable and more performant. There is
an [example configuration](./plugins/inputs/jolokia2/examples) to help you
get started.
- For plugins supporting TLS, you can now specify the certificate and keys
using `tls_ca`, `tls_cert`, `tls_key`. These options behave the same as
the, now deprecated, `ssl` forms.
### New Inputs
- [aurora](./plugins/inputs/aurora/README.md) - Contributed by @influxdata
- [burrow](./plugins/inputs/burrow/README.md) - Contributed by @arkady-emelyanov
- [fibaro](./plugins/inputs/fibaro/README.md) - Contributed by @dynek
- [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry/README.md) - Contributed by @ajhai
- [mcrouter](./plugins/inputs/mcrouter/README.md) - Contributed by @cthayer
- [nvidia_smi](./plugins/inputs/nvidia_smi/README.md) - Contributed by @jackzampolin
### New Processors
- [converter](./plugins/processors/converter/README.md) - Contributed by @influxdata
- [regex](./plugins/processors/regex/README.md) - Contributed by @44px
- [topk](./plugins/processors/topk/README.md) - Contributed by @mirath
### New Outputs
- [http](./plugins/outputs/http/README.md) - Contributed by @Dark0096
- [application_insights](./plugins/outputs/application_insights/README.md): Contribute by @karolz-ms
### Features
- [#3964](https://github.com/influxdata/telegraf/pull/3964): Add repl_oplog_window_sec metric to mongodb input.
- [#3819](https://github.com/influxdata/telegraf/pull/3819): Add per-host shard metrics in mongodb input.
- [#3999](https://github.com/influxdata/telegraf/pull/3999): Skip files with leading `..` in config directory.
- [#4021](https://github.com/influxdata/telegraf/pull/4021): Add TLS support to socket_writer and socket_listener plugins.
- [#4025](https://github.com/influxdata/telegraf/pull/4025): Add snmp input option to strip non fixed length index suffixes.
- [#4035](https://github.com/influxdata/telegraf/pull/4035): Add server version tag to docker input.
- [#4044](https://github.com/influxdata/telegraf/pull/4044): Add support for LeoFS 1.4 to leofs input.
- [#4068](https://github.com/influxdata/telegraf/pull/4068): Add parameter to force the interval of gather for sysstat.
- [#3877](https://github.com/influxdata/telegraf/pull/3877): Support busybox ping in the ping input.
- [#4077](https://github.com/influxdata/telegraf/pull/4077): Add input plugin for McRouter.
- [#4096](https://github.com/influxdata/telegraf/pull/4096): Add topk processor plugin.
- [#4114](https://github.com/influxdata/telegraf/pull/4114): Add cursor metrics to mongodb input.
- [#3455](https://github.com/influxdata/telegraf/pull/3455): Add tag/integer pair for result to net_response.
- [#4010](https://github.com/influxdata/telegraf/pull/3455): Add application_insights output plugin.
- [#4167](https://github.com/influxdata/telegraf/pull/4167): Added several important elasticsearch cluster health metrics.
- [#4094](https://github.com/influxdata/telegraf/pull/4094): Add batch mode to mqtt output.
- [#4158](https://github.com/influxdata/telegraf/pull/4158): Add aurora input plugin.
- [#3839](https://github.com/influxdata/telegraf/pull/3839): Add regex processor plugin.
- [#4165](https://github.com/influxdata/telegraf/pull/4165): Add support for Graphite 1.1 tags.
- [#4162](https://github.com/influxdata/telegraf/pull/4162): Add timeout option to sensors input.
- [#3489](https://github.com/influxdata/telegraf/pull/3489): Add burrow input plugin.
- [#3969](https://github.com/influxdata/telegraf/pull/3969): Add option to unbound module to use threads as tags.
- [#4183](https://github.com/influxdata/telegraf/pull/4183): Add support for TLS and username/password auth to aerospike input.
### Bugfixes
- [#4018](https://github.com/influxdata/telegraf/pull/4018): Write to working file outputs if any files are not writeable.
- [#4036](https://github.com/influxdata/telegraf/pull/4036): Add all win_perf_counters fields for a series in a single metric.
- [#4118](https://github.com/influxdata/telegraf/pull/4118): Report results of dns_query instead of 0ms on timeout.
- [#4155](https://github.com/influxdata/telegraf/pull/4155): Add consul service tags to metric.
## v1.6.3 [2018-05-21]
### Bugfixes
- [#4127](https://github.com/influxdata/telegraf/issues/4127): Fix intermittent panic in aerospike input.
- [#4130](https://github.com/influxdata/telegraf/issues/4130): Fix connection leak in jolokia2_agent.
- [#4136](https://github.com/influxdata/telegraf/pull/4130): Fix jolokia2 timeout parsing.
- [#4142](https://github.com/influxdata/telegraf/pull/4142): Fix error parsing dropwizard metrics.
- [#4149](https://github.com/influxdata/telegraf/issues/4149): Fix librato output support for uint and bool.
- [#4176](https://github.com/influxdata/telegraf/pull/4176): Fix waitgroup deadlock if url is incorrect in apache input.
## v1.6.2 [2018-05-08]
### Bugfixes
- [#4078](https://github.com/influxdata/telegraf/pull/4078): Use same timestamp for fields in system input.
- [#4091](https://github.com/influxdata/telegraf/pull/4091): Fix handling of uint64 in datadog output.
- [#4099](https://github.com/influxdata/telegraf/pull/4099): Ignore UTF8 BOM in JSON parser.
- [#4104](https://github.com/influxdata/telegraf/issues/4104): Fix case for slave metrics in mysql input.
- [#4110](https://github.com/influxdata/telegraf/issues/4110): Fix uint support in cratedb output.
## v1.6.1 [2018-04-23]
### Bugfixes
- [#3835](https://github.com/influxdata/telegraf/issues/3835): Report mem input fields as gauges instead counters.
- [#4030](https://github.com/influxdata/telegraf/issues/4030): Fix graphite outputs unsigned integers in wrong format.
- [#4043](https://github.com/influxdata/telegraf/issues/4043): Report available fields if utmp is unreadable.
- [#4039](https://github.com/influxdata/telegraf/issues/4039): Fix potential "no fields" error writing to outputs.
- [#4037](https://github.com/influxdata/telegraf/issues/4037): Fix uptime reporting in system input when ran inside docker.
- [#3750](https://github.com/influxdata/telegraf/issues/3750): Fix mem input "cannot allocate memory" error on FreeBSD based systems.
- [#4056](https://github.com/influxdata/telegraf/pull/4056): Fix duplicate tags when overriding an existing tag.
- [#4062](https://github.com/influxdata/telegraf/pull/4062): Add server argument as first argument in unbound input.
- [#4063](https://github.com/influxdata/telegraf/issues/4063): Fix handling of floats with multiple leading zeroes.
- [#4064](https://github.com/influxdata/telegraf/issues/4064): Return errors in mongodb SSL/TLS configuration.
## v1.6 [2018-04-16]
### Release Notes
@@ -105,6 +207,9 @@
- [#3648](https://github.com/influxdata/telegraf/issues/3648): Fix InfluxDB output not able to reconnect when server address changes.
- [#3957](https://github.com/influxdata/telegraf/issues/3957): Fix parsing of dos line endings in the smart input.
- [#3754](https://github.com/influxdata/telegraf/issues/3754): Fix precision truncation when no timestamp included.
- [#3655](https://github.com/influxdata/telegraf/issues/3655): Fix SNMPv3 connection with Cisco ASA 5515 in snmp input.
- [#3981](https://github.com/influxdata/telegraf/pull/3981): Export all vars defined in /etc/default/telegraf.
- [#4004](https://github.com/influxdata/telegraf/issues/4004): Allow grok pattern to contain newlines.
## v1.5.3 [2018-03-14]

17
Godeps
View File

@@ -1,5 +1,7 @@
code.cloudfoundry.org/clock e9dc86bbf0e5bbe6bf7ff5a6f71e048959b61f71
collectd.org 2ce144541b8903101fb8f1483cc0497a68798122
github.com/aerospike/aerospike-client-go 95e1ad7791bdbca44707fedbb29be42024900d9c
github.com/aerospike/aerospike-client-go 9701404f4c60a6ea256595d24bf318f721a7e8b8
github.com/Azure/go-autorest 9ad9326b278af8fa5cc67c30c0ce9a58cc0862b2
github.com/amir/raidman c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985
github.com/apache/thrift 4aaa92ece8503a6da9bc6701604f69acf2b99d07
github.com/aws/aws-sdk-go c861d27d0304a79f727e9a8a4e2ac1e74602fdc0
@@ -26,11 +28,11 @@ github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
github.com/google/go-cmp f94e52cad91c65a63acc1e75d4be223ea22e99bc
github.com/gorilla/mux 392c28fe23e1c45ddba891b0320b3b5df220beea
github.com/gorilla/mux 53c1911da2b537f792e7cafcb446b05ffe33b996
github.com/go-redis/redis 73b70592cdaa9e6abdfcfbf97b4a90d80728c836
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
github.com/hashicorp/consul 5174058f0d2bda63fa5198ab96c33d9a909c58ed
github.com/influxdata/tail c43482518d410361b6c383d7aebce33d0471d7bc
github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
@@ -41,6 +43,7 @@ github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413
github.com/kardianos/service 6d3a0ee7d3425d9d835debc51a0ca1ffa28f4893
github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
github.com/Microsoft/ApplicationInsights-Go 3612f58550c1de70f1a110c78c830e55f29aa65d
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
github.com/miekg/dns 99f84ae56e75126dd77e5de4fae2ea034a468ca1
github.com/mitchellh/mapstructure d0303fe809921458f417bcf828397a65db30a7e4
@@ -66,11 +69,11 @@ github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
github.com/shirou/gopsutil fc04d2dd9a512906a2604242b35275179e250eda
github.com/shirou/gopsutil c95755e4bcd7a62bb8bd33f3a597a7c7f35e2cf3
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
github.com/Shopify/sarama 3b1b38866a79f06deddf0487d5c27ba0697ccd65
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
github.com/soniah/gosnmp f15472a4cd6f6ea7929e4c7d9f163c49f059924f
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
github.com/stretchr/objx facf9a85c22f48d2f52f2380e4efce1768749a89
@@ -83,9 +86,11 @@ github.com/wvanbergen/kazoo-go 968957352185472eacb69215fa3dbfcfdbac1096
github.com/yuin/gopher-lua 66c871e454fcf10251c61bf8eff02d0978cae75a
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
golang.org/x/crypto dc137beb6cce2043eb6b5f223ab8bf51c32459f4
golang.org/x/net f2499483f923065a842d38eb4c7f1927e6fc6e6d
golang.org/x/net a337091b0525af65de94df2eb7e98bd9962dcbe2
golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
golang.org/x/text 506f9d5c962f284575e88337e7d9296d27e729d3
google.golang.org/genproto 11c7f9e547da6db876260ce49ea7536985904c9b
google.golang.org/grpc de2209a968d48e8970546c8a710189f7461370f7
gopkg.in/asn1-ber.v1 4e86f4367175e39f69d9358a5f17b4dda270378d
gopkg.in/fatih/pool.v2 6e328e67893eb46323ad06f0e92cb9536babbabc
gopkg.in/gorethink/gorethink.v3 7ab832f7b65573104a555d84a27992ae9ea1f659

View File

@@ -4,7 +4,7 @@ BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
COMMIT := $(shell git rev-parse --short HEAD)
GOFILES ?= $(shell git ls-files '*.go')
GOFMT ?= $(shell gofmt -l $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)))
BUILDFLAGS ?=
BUILDFLAGS ?=
ifdef GOBIN
PATH := $(GOBIN):$(PATH)
@@ -12,8 +12,6 @@ else
PATH := $(subst :,/bin:,$(GOPATH))/bin:$(PATH)
endif
TELEGRAF := telegraf$(shell go tool dist env | grep -q 'GOOS=.windows.' && echo .exe)
LDFLAGS := $(LDFLAGS) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)
ifdef VERSION
LDFLAGS += -X main.version=$(VERSION)
@@ -26,10 +24,10 @@ all:
deps:
go get -u github.com/golang/lint/golint
go get github.com/sparrc/gdm
gdm restore
gdm restore --parallel=false
telegraf:
go build -i -o $(TELEGRAF) -ldflags "$(LDFLAGS)" ./cmd/telegraf/telegraf.go
go build -ldflags "$(LDFLAGS)" ./cmd/telegraf
go-install:
go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf
@@ -46,7 +44,7 @@ fmt:
fmtcheck:
@echo '[INFO] running gofmt to identify incorrectly formatted code...'
@if [ ! -z $(GOFMT) ]; then \
@if [ ! -z "$(GOFMT)" ]; then \
echo "[ERROR] gofmt has found errors in the following files:" ; \
echo "$(GOFMT)" ; \
echo "" ;\
@@ -60,12 +58,13 @@ test-windows:
go test ./plugins/inputs/win_perf_counters/...
go test ./plugins/inputs/win_services/...
go test ./plugins/inputs/procstat/...
go test ./plugins/inputs/ntpq/...
# vet runs the Go source code static analysis tool `vet` to find
# any common errors.
vet:
@echo 'go vet $$(go list ./... | grep -v ./plugins/parsers/influx)'
@go vet $$(go list ./... | grep -v ./plugins/parsers/influx) ; if [ $$? -eq 1 ]; then \
@go vet $$(go list ./... | grep -v ./plugins/parsers/influx) ; if [ $$? -ne 0 ]; then \
echo ""; \
echo "go vet has found suspicious constructs. Please remediate any reported errors"; \
echo "to fix them before submitting code for review."; \
@@ -73,7 +72,7 @@ vet:
fi
test-ci: fmtcheck vet
go test -short./...
go test -short ./...
test-all: fmtcheck vet
go test ./...

View File

@@ -5,7 +5,7 @@ and writing metrics.
Design goals are to have a minimal memory footprint with a plugin system so
that developers in the community can easily add support for collecting metrics
from local or remote services.
. For an example configuration referencet from local or remote services.
Telegraf is plugin-driven and has the concept of 4 distinct plugins:
@@ -127,10 +127,12 @@ configuration options.
* [aerospike](./plugins/inputs/aerospike)
* [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq)
* [apache](./plugins/inputs/apache)
* [aurora](./plugins/inputs/aurora)
* [aws cloudwatch](./plugins/inputs/cloudwatch)
* [bcache](./plugins/inputs/bcache)
* [bond](./plugins/inputs/bond)
* [cassandra](./plugins/inputs/cassandra)
* [cassandra](./plugins/inputs/cassandra) (deprecated, use [jolokia2](./plugins/inputs/jolokia2))
* [burrow](./plugins/inputs/burrow)
* [ceph](./plugins/inputs/ceph)
* [cgroup](./plugins/inputs/cgroup)
* [chrony](./plugins/inputs/chrony)
@@ -147,6 +149,7 @@ configuration options.
* [elasticsearch](./plugins/inputs/elasticsearch)
* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
* [fail2ban](./plugins/inputs/fail2ban)
* [fibaro](./plugins/inputs/fibaro)
* [filestat](./plugins/inputs/filestat)
* [fluentd](./plugins/inputs/fluentd)
* [graylog](./plugins/inputs/graylog)
@@ -162,12 +165,14 @@ configuration options.
* [iptables](./plugins/inputs/iptables)
* [ipset](./plugins/inputs/ipset)
* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2))
* [jolokia2](./plugins/inputs/jolokia2)
* [jolokia2](./plugins/inputs/jolokia2) (java, cassandra, kafka)
- [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry)
* [kapacitor](./plugins/inputs/kapacitor)
* [kubernetes](./plugins/inputs/kubernetes)
* [leofs](./plugins/inputs/leofs)
* [lustre2](./plugins/inputs/lustre2)
* [mailchimp](./plugins/inputs/mailchimp)
* [mcrouter](./plugins/inputs/mcrouter)
* [memcached](./plugins/inputs/memcached)
* [mesos](./plugins/inputs/mesos)
* [minecraft](./plugins/inputs/minecraft)
@@ -180,6 +185,7 @@ configuration options.
* [nsq](./plugins/inputs/nsq)
* [nstat](./plugins/inputs/nstat)
* [ntpq](./plugins/inputs/ntpq)
* [nvidia_smi](./plugins/inputs/nvidia_smi)
* [openldap](./plugins/inputs/openldap)
* [opensmtpd](./plugins/inputs/opensmtpd)
* [pf](./plugins/inputs/pf)
@@ -263,8 +269,11 @@ formats may be used with input plugins supporting the `data_format` option:
## Processor Plugins
* [printer](./plugins/processors/printer)
* [converter](./plugins/processors/converter)
* [override](./plugins/processors/override)
* [printer](./plugins/processors/printer)
* [regex](./plugins/processors/regex)
* [topk](./plugins/processors/topk)
## Aggregator Plugins
@@ -277,6 +286,7 @@ formats may be used with input plugins supporting the `data_format` option:
* [influxdb](./plugins/outputs/influxdb)
* [amon](./plugins/outputs/amon)
* [amqp](./plugins/outputs/amqp) (rabbitmq)
* [application_insights](./plugins/outputs/application_insights)
* [aws kinesis](./plugins/outputs/kinesis)
* [aws cloudwatch](./plugins/outputs/cloudwatch)
* [cratedb](./plugins/outputs/cratedb)
@@ -286,6 +296,7 @@ formats may be used with input plugins supporting the `data_format` option:
* [file](./plugins/outputs/file)
* [graphite](./plugins/outputs/graphite)
* [graylog](./plugins/outputs/graylog)
* [http](./plugins/outputs/http)
* [instrumental](./plugins/outputs/instrumental)
* [kafka](./plugins/outputs/kafka)
* [librato](./plugins/outputs/librato)

View File

@@ -203,11 +203,6 @@ func (a *Agent) Test() error {
input.SetTrace(true)
input.SetDefaultTags(a.Config.Tags)
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name())
if input.Config.Interval != 0 {
fmt.Printf("* Internal: %s\n", input.Config.Interval)
}
if err := input.Input.Gather(acc); err != nil {
return err
}
@@ -217,7 +212,6 @@ func (a *Agent) Test() error {
switch input.Name() {
case "inputs.cpu", "inputs.mongodb", "inputs.procstat":
time.Sleep(500 * time.Millisecond)
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name())
if err := input.Input.Gather(acc); err != nil {
return err
}

View File

@@ -1,4 +1,3 @@
image: Previous Visual Studio 2015
version: "{build}"
cache:
@@ -13,11 +12,11 @@ platform: x64
install:
- IF NOT EXIST "C:\Cache" mkdir C:\Cache
- IF NOT EXIST "C:\Cache\go1.9.4.msi" curl -o "C:\Cache\go1.9.4.msi" https://storage.googleapis.com/golang/go1.9.4.windows-amd64.msi
- IF NOT EXIST "C:\Cache\go1.10.1.msi" curl -o "C:\Cache\go1.10.1.msi" https://storage.googleapis.com/golang/go1.10.1.windows-amd64.msi
- IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip
- IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip
- IF EXIST "C:\Go" rmdir /S /Q C:\Go
- msiexec.exe /i "C:\Cache\go1.9.4.msi" /quiet
- msiexec.exe /i "C:\Cache\go1.10.1.msi" /quiet
- 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
- go version

View File

@@ -13,6 +13,7 @@ import (
"syscall"
"github.com/influxdata/telegraf/agent"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/config"
"github.com/influxdata/telegraf/logger"
_ "github.com/influxdata/telegraf/plugins/aggregators/all"
@@ -57,7 +58,7 @@ var fService = flag.String("service", "",
var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)")
var (
nextVersion = "1.6.0"
nextVersion = "1.7.0"
version string
commit string
branch string
@@ -73,48 +74,6 @@ func init() {
}
}
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
Usage:
telegraf [commands|flags]
The commands & flags are:
config print out full sample configuration to stdout
version print the version to stdout
--config <file> configuration file to load
--test gather metrics once, print them to stdout, and exit
--config-directory directory containing additional *.conf files
--input-filter filter the input plugins to enable, separator is :
--output-filter filter the output plugins to enable, separator is :
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
--debug print metrics as they're generated to stdout
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
--quiet run in quiet mode
Examples:
# generate a telegraf config file:
telegraf config > telegraf.conf
# generate config with only cpu input & influxdb output plugins defined
telegraf --input-filter cpu --output-filter influxdb config
# run a single telegraf collection, outputing metrics to stdout
telegraf --config telegraf.conf --test
# run telegraf with all plugins defined in config file
telegraf --config telegraf.conf
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
# run telegraf with pprof
telegraf --config telegraf.conf --pprof-addr localhost:6060
`
var stop chan struct{}
func reloadLoop(
@@ -234,7 +193,7 @@ func reloadLoop(
}
func usageExit(rc int) {
fmt.Println(usage)
fmt.Println(internal.Usage)
os.Exit(rc)
}
@@ -365,7 +324,7 @@ func main() {
DisplayName: "Telegraf Data Collector Service",
Description: "Collects data using a series of plugins and publishes it to" +
"another series of plugins.",
Arguments: []string{"-config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
Arguments: []string{"--config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
}
prg := &program{
@@ -378,14 +337,14 @@ func main() {
if err != nil {
log.Fatal("E! " + err.Error())
}
// Handle the -service flag here to prevent any issues with tooling that
// Handle the --service flag here to prevent any issues with tooling that
// may not have an interactive session, e.g. installing from Ansible.
if *fService != "" {
if *fConfig != "" {
(*svcConfig).Arguments = []string{"-config", *fConfig}
(*svcConfig).Arguments = []string{"--config", *fConfig}
}
if *fConfigDirectory != "" {
(*svcConfig).Arguments = append((*svcConfig).Arguments, "-config-directory", *fConfigDirectory)
(*svcConfig).Arguments = append((*svcConfig).Arguments, "--config-directory", *fConfigDirectory)
}
err := service.Control(s, *fService)
if err != nil {

View File

@@ -79,15 +79,15 @@ services:
- "389:389"
- "636:636"
crate:
image: crate/crate
ports:
- "4200:4200"
- "4230:4230"
command:
- crate
- -Cnetwork.host=0.0.0.0
- -Ctransport.host=localhost
- -Clicense.enterprise=false
environment:
- CRATE_HEAP_SIZE=128m
- JAVA_OPTS='-Xms256m -Xmx256m'
image: crate/crate
ports:
- "4200:4200"
- "4230:4230"
- "5432:5432"
command:
- crate
- -Cnetwork.host=0.0.0.0
- -Ctransport.host=localhost
- -Clicense.enterprise=false
environment:
- CRATE_HEAP_SIZE=128m

View File

@@ -153,11 +153,11 @@ The inverse of `namepass`. If a match is found the point is discarded. This
is tested on points after they have passed the `namepass` test.
* **fieldpass**:
An array of glob pattern strings. Only fields whose field key matches a
pattern in this list are emitted. Not available for outputs.
pattern in this list are emitted.
* **fielddrop**:
The inverse of `fieldpass`. Fields with a field key matching one of the
patterns will be discarded from the point. This is tested on points after
they have passed the `fieldpass` test. Not available for outputs.
they have passed the `fieldpass` test.
* **tagpass**:
A table mapping tag keys to arrays of glob pattern strings. Only points
that contain a tag key in the table and a tag value matching one of its

View File

@@ -1,35 +1,15 @@
# Telegraf Output Data Formats
# Output Data Formats
Telegraf is able to serialize metrics into the following output data formats:
In addition to output specific data formats, Telegraf supports a set of
standard data formats that may be selected from when configuring many output
plugins.
1. [InfluxDB Line Protocol](#influx)
1. [JSON](#json)
1. [Graphite](#graphite)
Telegraf metrics, like InfluxDB
[points](https://docs.influxdata.com/influxdb/latest/concepts/glossary/#point),
are a combination of four basic parts:
1. Measurement Name
1. Tags
1. Fields
1. Timestamp
In InfluxDB line protocol, these 4 parts are easily defined in textual form:
```
measurement_name[,tag1=val1,...] field1=val1[,field2=val2,...] [timestamp]
```
For Telegraf outputs that write textual data (such as `kafka`, `mqtt`, and `file`),
InfluxDB line protocol was originally the only available output format. But now
we are normalizing telegraf metric "serializers" into a
[plugin-like interface](https://github.com/influxdata/telegraf/tree/master/plugins/serializers)
across all output plugins that can support it.
You will be able to identify a plugin that supports different data formats
by the presence of a `data_format`
config option, for example, in the `file` output plugin:
You will be able to identify the plugins with support by the presence of a
`data_format` config option, for example, in the `file` output plugin:
```toml
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file.
@@ -40,22 +20,16 @@ config option, for example, in the `file` output plugin:
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
## Additional configuration options go here
```
Each data_format has an additional set of configuration options available, which
I'll go over below.
## Influx
# Influx:
The `influx` format outputs data as
The `influx` data format outputs metrics using
[InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/latest/write_protocols/line_protocol_tutorial/).
This is the recommended format to use unless another format is required for
This is the recommended format unless another format is required for
interoperability.
### Influx Configuration:
### Influx Configuration
```toml
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file.
@@ -82,11 +56,17 @@ interoperability.
# influx_uint_support = false
```
# Graphite:
## Graphite
The Graphite data format translates Telegraf metrics into _dot_ buckets. A
template can be specified for the output of Telegraf metrics into Graphite
buckets. The default template is:
The Graphite data format is translated from Telegraf Metrics using either the
template pattern or tag support method. You can select between the two
methods using the [`graphite_tag_support`](#graphite-tag-support) option. When set, the tag support
method is used, otherwise the [`template` pattern](#template-pattern) is used.
#### Template Pattern
The `template` option describes how Telegraf traslates metrics into _dot_
buckets. The default template is:
```
template = "host.tags.measurement.field"
@@ -103,7 +83,7 @@ tag keys are filled.
1. _measurement_ is a special keyword that outputs the measurement name.
1. _field_ is a special keyword that outputs the field name.
Which means the following influx metric -> graphite conversion would happen:
**Example Conversion**:
```
cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758
@@ -115,7 +95,25 @@ tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
Fields with string values will be skipped. Boolean fields will be converted
to 1 (true) or 0 (false).
### Graphite Configuration:
#### Graphite Tag Support
When the `graphite_tag_support` option is enabled, the template pattern is not
used. Instead, tags are encoded using
[Graphite tag support](http://graphite.readthedocs.io/en/latest/tags.html)
added in Graphite 1.1. The `metric_path` is a combination of the optional
`prefix` option, measurement name, and field name.
The tag `name` is reserved by Graphite, any conflicting tags and will be encoded as `_name`.
**Example Conversion**:
```
cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758
=>
cpu.usage_user;cpu=cpu-total;dc=us-east-1;host=tars 0.89 1455320690
cpu.usage_idle;cpu=cpu-total;dc=us-east-1;host=tars 98.09 1455320690
```
### Graphite Configuration
```toml
[[outputs.file]]
@@ -128,33 +126,72 @@ to 1 (true) or 0 (false).
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "graphite"
# prefix each graphite bucket
## Prefix added to each graphite bucket
prefix = "telegraf"
# graphite template
## Graphite template pattern
template = "host.tags.measurement.field"
## Support Graphite tags, recommended to enable when using Graphite 1.1 or later.
# graphite_tag_support = false
```
# JSON:
The JSON data format serialized Telegraf metrics in json format. The format is:
## JSON
The JSON output data format output for a single metric is in the
form:
```json
{
"fields":{
"field_1":30,
"field_2":4,
"field_N":59,
"n_images":660
},
"name":"docker",
"tags":{
"host":"raynor"
},
"timestamp":1458229140
"fields": {
"field_1": 30,
"field_2": 4,
"field_N": 59,
"n_images": 660
},
"name": "docker",
"tags": {
"host": "raynor"
},
"timestamp": 1458229140
}
```
### JSON Configuration:
When an output plugin needs to emit multiple metrics at one time, it may use
the batch format. The use of batch format is determined by the plugin,
reference the documentation for the specific plugin.
```json
{
"metrics": [
{
"fields": {
"field_1": 30,
"field_2": 4,
"field_N": 59,
"n_images": 660
},
"name": "docker",
"tags": {
"host": "raynor"
},
"timestamp": 1458229140
},
{
"fields": {
"field_1": 30,
"field_2": 4,
"field_N": 59,
"n_images": 660
},
"name": "docker",
"tags": {
"host": "raynor"
},
"timestamp": 1458229140
}
]
}
```
### JSON Configuration
```toml
[[outputs.file]]
@@ -166,14 +203,9 @@ The JSON data format serialized Telegraf metrics in json format. The format is:
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "json"
json_timestamp_units = "1ns"
```
By default, the timestamp that is output in JSON data format serialized Telegraf
metrics is in seconds. The precision of this timestamp can be adjusted for any output
by adding the optional `json_timestamp_units` parameter to the configuration for
that output. This parameter can be used to set the timestamp units to nanoseconds (`ns`),
microseconds (`us` or `µs`), milliseconds (`ms`), or seconds (`s`). Note that this
parameter will be truncated to the nearest power of 10 that, so if the `json_timestamp_units`
are set to `15ms` the timestamps for the JSON format serialized Telegraf metrics will be
output in hundredths of a second (`10ms`).
## The resolution to use for the metric timestamp. Must be a duration string
## such as "1ns", "1us", "1ms", "10ms", "1s". Durations are truncated to
## the power of 10 less than the specified units.
json_timestamp_units = "1s"
```

View File

@@ -3,6 +3,7 @@
When distributed in a binary form, Telegraf may contain portions of the
following works:
- code.cloudfoundry.org/clock [APACHE](https://github.com/cloudfoundry/clock/blob/master/LICENSE)
- collectd.org [MIT](https://github.com/collectd/go-collectd/blob/master/LICENSE)
- github.com/aerospike/aerospike-client-go [APACHE](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE)
- github.com/amir/raidman [PUBLIC DOMAIN](https://github.com/amir/raidman/blob/master/UNLICENSE)
@@ -51,6 +52,7 @@ following works:
- github.com/kballard/go-shellquote [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
- github.com/lib/pq [MIT](https://github.com/lib/pq/blob/master/LICENSE.md)
- github.com/matttproud/golang_protobuf_extensions [APACHE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
- github.com/Microsoft/ApplicationInsights-Go [APACHE](https://github.com/Microsoft/ApplicationInsights-Go/blob/master/LICENSE)
- github.com/Microsoft/go-winio [MIT](https://github.com/Microsoft/go-winio/blob/master/LICENSE)
- github.com/miekg/dns [BSD](https://github.com/miekg/dns/blob/master/LICENSE)
- github.com/naoina/go-stringutil [MIT](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
@@ -97,6 +99,8 @@ following works:
- golang.org/x/net [BSD](https://go.googlesource.com/net/+/master/LICENSE)
- golang.org/x/text [BSD](https://go.googlesource.com/text/+/master/LICENSE)
- golang.org/x/sys [BSD](https://go.googlesource.com/sys/+/master/LICENSE)
- google.golang.org/grpc [APACHE](https://github.com/google/grpc-go/blob/master/LICENSE)
- google.golang.org/genproto [APACHE](https://github.com/google/go-genproto/blob/master/LICENSE)
- gopkg.in/asn1-ber.v1 [MIT](https://github.com/go-asn1-ber/asn1-ber/blob/v1.2/LICENSE)
- gopkg.in/dancannon/gorethink.v1 [APACHE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
- gopkg.in/fatih/pool.v2 [MIT](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)

View File

@@ -5,7 +5,7 @@ the general steps to set it up.
1. Obtain the telegraf windows distribution
2. Create the directory `C:\Program Files\Telegraf` (if you install in a different
location simply specify the `-config` parameter with the desired location)
location simply specify the `--config` parameter with the desired location)
3. Place the telegraf.exe and the telegraf.conf config file into `C:\Program Files\Telegraf`
4. To install the service into the Windows Service Manager, run the following in PowerShell as an administrator (If necessary, you can wrap any spaces in the file paths in double quotes ""):
@@ -26,6 +26,15 @@ the general steps to set it up.
> net start telegraf
```
## Config Directory
You can also specify a `--config-directory` for the service to use:
1. Create a directory for config snippets: `C:\Program Files\Telegraf\telegraf.d`
2. Include the `--config-directory` option when registering the service:
```
> C:\"Program Files"\Telegraf\telegraf.exe --service install --config C:\"Program Files"\Telegraf\telegraf.conf --config-directory C:\"Program Files"\Telegraf\telegraf.d
```
## Other supported operations
Telegraf can manage its own service through the --service flag:
@@ -37,7 +46,6 @@ Telegraf can manage its own service through the --service flag:
| `telegraf.exe --service start` | Start the telegraf service |
| `telegraf.exe --service stop` | Stop the telegraf service |
Troubleshooting common error #1067
When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start

View File

@@ -101,10 +101,11 @@
# skip_database_creation = false
## Name of existing retention policy to write to. Empty string writes to
## the default retention policy.
## the default retention policy. Only takes effect when using HTTP.
# retention_policy = ""
## Write consistency (clusters only), can be: "any", "one", "quorum", "all"
## Write consistency (clusters only), can be: "any", "one", "quorum", "all".
## Only takes effect when using HTTP.
# write_consistency = "any"
## Timeout for HTTP messages.
@@ -120,11 +121,11 @@
## UDP payload size is the maximum packet size to send.
# udp_payload = 512
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
## Optional TLS Config for use on HTTP connections.
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## HTTP Proxy override, if unset values the standard proxy environment
@@ -183,11 +184,11 @@
# ## to 5s. 0s means no timeout (not recommended).
# # timeout = "5s"
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Data format to output.
@@ -197,6 +198,26 @@
# data_format = "influx"
# # Send metrics to Azure Application Insights
# [[outputs.application_insights]]
# ## Instrumentation key of the Application Insights resource.
# instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx"
#
# ## Timeout for closing (default: 5s).
# # timeout = "5s"
#
# ## Enable additional diagnostic logging.
# # enable_diagnosic_logging = false
#
# ## Context Tag Sources add Application Insights context tags to a tag value.
# ##
# ## For list of allowed context tag keys see:
# ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go
# # [outputs.application_insights.context_tag_sources]
# # "ai.cloud.role" = "kubernetes_container_name"
# # "ai.cloud.roleInstance" = "kubernetes_pod_name"
# # Configuration for AWS CloudWatch output.
# [[outputs.cloudwatch]]
# ## Amazon REGION
@@ -283,11 +304,11 @@
# # default_tag_value = "none"
# index_name = "telegraf-%Y.%m.%d" # required.
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Template Config
@@ -326,11 +347,11 @@
# ## timeout in seconds for the write connection to graphite
# timeout = 2
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
@@ -340,6 +361,40 @@
# servers = ["127.0.0.1:12201", "192.168.1.1:12201"]
# # A plugin that can transmit metrics over HTTP
# [[outputs.http]]
# ## URL is the address to send metrics to
# url = "http://127.0.0.1:8080/metric"
#
# ## Timeout for HTTP message
# # timeout = "5s"
#
# ## HTTP method, one of: "POST" or "PUT"
# # method = "POST"
#
# ## HTTP Basic Auth credentials
# # username = "username"
# # password = "pa$$word"
#
# ## Additional HTTP headers
# # [outputs.http.headers]
# # # Should be set to "application/json" for json data_format
# # Content-Type = "text/plain; charset=utf-8"
#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# # data_format = "influx"
# # Configuration for sending metrics to an Instrumental project
# [[outputs.instrumental]]
# ## Project API Token (required)
@@ -398,7 +453,7 @@
# ## 0 : No compression
# ## 1 : Gzip compression
# ## 2 : Snappy compression
# compression_codec = 0
# # compression_codec = 0
#
# ## RequiredAcks is used in Produce Requests to tell the broker how many
# ## replica acknowledgements it must see before responding
@@ -414,16 +469,17 @@
# ## received the data. This option provides the best durability, we
# ## guarantee that no messages will be lost as long as at least one in
# ## sync replica remains.
# required_acks = -1
# # required_acks = -1
#
# ## The total number of times to retry sending a message
# max_retry = 3
# ## The maximum number of times to retry sending a metric before failing
# ## until the next flush.
# # max_retry = 3
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Optional SASL Config
@@ -434,7 +490,7 @@
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
# # data_format = "influx"
# # Configuration for the AWS Kinesis output.
@@ -525,23 +581,33 @@
# ## ex: prefix/web01.example.com/mem
# topic_prefix = "telegraf"
#
# ## QoS policy for messages
# ## 0 = at most once
# ## 1 = at least once
# ## 2 = exactly once
# # qos = 2
#
# ## username and password to connect MQTT server.
# # username = "telegraf"
# # password = "metricsmetricsmetricsmetrics"
#
# ## Timeout for write operations. default: 5s
# # timeout = "5s"
#
# ## client ID, if not set a random ID is generated
# # client_id = ""
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# ## Timeout for write operations. default: 5s
# # timeout = "5s"
#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
#
# ## When true, metrics will be sent in one MQTT message per flush. Otherwise,
# ## metrics are written one metric per MQTT message.
# # batch = false
#
# ## Data format to output.
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
@@ -559,11 +625,11 @@
# ## NATS subject for producer messages
# subject = "telegraf"
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Data format to output.
@@ -694,6 +760,13 @@
# # address = "unix:///tmp/telegraf.sock"
# # address = "unixgram:///tmp/telegraf.sock"
#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Period between keep alive probes.
# ## Only applies to TCP sockets.
# ## 0 disables keep alive probes.
@@ -768,6 +841,56 @@
# [[processors.printer]]
# # Print all metrics that pass through this filter.
# [[processors.topk]]
# ## How many seconds between aggregations
# # period = 10
#
# ## How many top metrics to return
# # k = 10
#
# ## Over which tags should the aggregation be done. Globs can be specified, in
# ## which case any tag matching the glob will aggregated over. If set to an
# ## empty list is no aggregation over tags is done
# # group_by = ['*']
#
# ## Over which fields are the top k are calculated
# # fields = ["value"]
#
# ## What aggregation to use. Options: sum, mean, min, max
# # aggregation = "mean"
#
# ## Instead of the top k largest metrics, return the bottom k lowest metrics
# # bottomk = false
#
# ## The plugin assigns each metric a GroupBy tag generated from its name and
# ## tags. If this setting is different than "" the plugin will add a
# ## tag (which name will be the value of this setting) to each metric with
# ## the value of the calculated GroupBy tag. Useful for debugging
# # add_groupby_tag = ""
#
# ## These settings provide a way to know the position of each metric in
# ## the top k. The 'add_rank_field' setting allows to specify for which
# ## fields the position is required. If the list is non empty, then a field
# ## will be added to each and every metric for each string present in this
# ## setting. This field will contain the ranking of the group that
# ## the metric belonged to when aggregated over that field.
# ## The name of the field will be set to the name of the aggregation field,
# ## suffixed with the string '_topk_rank'
# # add_rank_fields = []
#
# ## These settings provide a way to know what values the plugin is generating
# ## when aggregating metrics. The 'add_agregate_field' setting allows to
# ## specify for which fields the final aggregation value is required. If the
# ## list is non empty, then a field will be added to each every metric for
# ## each field present in this setting. This field will contain
# ## the computed aggregation for the group that the metric belonged to when
# ## aggregated over that field.
# ## The name of the field will be set to the name of the aggregation field,
# ## suffixed with the string '_topk_aggregate'
# # add_aggregate_fields = []
###############################################################################
# AGGREGATOR PLUGINS #
@@ -920,11 +1043,11 @@
# ## Maximum time to receive response.
# # response_timeout = "5s"
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
@@ -952,24 +1075,6 @@
# # bond_interfaces = ["bond0"]
# # Read Cassandra metrics through Jolokia
# [[inputs.cassandra]]
# # This is the context root used to compose the jolokia url
# context = "/jolokia/read"
# ## List of cassandra servers exposing jolokia read service
# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
# ## List of metrics collected on above servers
# ## Each metric consists of a jmx path.
# ## This will collect all heap memory usage metrics from the jvm and
# ## ReadLatency metrics for all keyspaces and tables.
# ## "type=Table" in the query works with Cassandra3.0. Older versions might
# ## need to use "type=ColumnFamily"
# metrics = [
# "/java.lang:type=Memory/HeapMemoryUsage",
# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
# ]
# # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
# [[inputs.ceph]]
# ## This is the recommended interval to poll. Too frequent and you will lose
@@ -1122,12 +1227,17 @@
# ## Data centre to query the health checks from
# # datacentre = ""
#
# ## SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## If false, skip chain & host verification
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = true
#
# ## Consul checks' tag splitting
# # When tags are formatted like "key:value" with ":" as a delimiter then
# # they will be splitted and reported as proper key:value in Telegraf
# # tag_delimiter = ":"
# # Read metrics from one or many couchbase clusters
@@ -1183,10 +1293,10 @@
# ## Maximum time to receive a response from cluster.
# # response_timeout = "20s"
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## If false, skip chain & host verification
# # insecure_skip_verify = true
#
@@ -1271,11 +1381,11 @@
# docker_label_include = []
# docker_label_exclude = []
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
@@ -1327,11 +1437,11 @@
# ## "breaker". Per default, all stats are gathered.
# # node_stats = ["jvm", "http"]
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
@@ -1363,6 +1473,20 @@
# use_sudo = false
# # Read devices value(s) from a Fibaro controller
# [[inputs.fibaro]]
# ## Required Fibaro controller address/hostname.
# ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available
# url = "http://<controller>:80"
#
# ## Required credentials to access the API (http://<controller/api/<component>)
# username = "<username>"
# password = "<password>"
#
# ## Amount of time allowed to complete the HTTP request
# # timeout = "5s"
# # Read stats about given file(s)
# [[inputs.filestat]]
# ## Files to gather stats about.
@@ -1424,11 +1548,11 @@
# username = ""
# password = ""
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
@@ -1452,11 +1576,11 @@
# ## field names.
# # keep_field_names = false
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
@@ -1493,11 +1617,11 @@
# ## Tag all metrics with the url
# # tag_url = true
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Amount of time allowed to complete the HTTP request
@@ -1537,11 +1661,11 @@
# # response_string_match = "ok"
# # response_string_match = "\".*_status\".?:.?\"up\""
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
#
# ## HTTP Request Headers (all values must be strings)
@@ -1577,11 +1701,11 @@
# # "my_tag_2"
# # ]
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
#
# ## HTTP parameters (all values must be strings). For "GET" requests, data
@@ -1609,11 +1733,11 @@
# "http://localhost:8086/debug/vars"
# ]
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
#
# ## http request & header timeout
@@ -1767,10 +1891,10 @@
# # password = ""
# # response_timeout = "5s"
#
# ## Optional SSL config
# # ssl_ca = "/var/private/ca.pem"
# # ssl_cert = "/var/private/client.pem"
# # ssl_key = "/var/private/client-key.pem"
# ## Optional TLS config
# # tls_ca = "/var/private/ca.pem"
# # tls_cert = "/var/private/client.pem"
# # tls_key = "/var/private/client-key.pem"
# # insecure_skip_verify = false
#
# ## Add metrics to read
@@ -1792,22 +1916,22 @@
# # password = ""
# # response_timeout = "5s"
#
# ## Optional SSL config
# # ssl_ca = "/var/private/ca.pem"
# # ssl_cert = "/var/private/client.pem"
# # ssl_key = "/var/private/client-key.pem"
# ## Optional TLS config
# # tls_ca = "/var/private/ca.pem"
# # tls_cert = "/var/private/client.pem"
# # tls_key = "/var/private/client-key.pem"
# # insecure_skip_verify = false
#
# ## Add proxy targets to query
# # default_target_username = ""
# # default_target_password = ""
# [[inputs.jolokia_proxy.target]]
# [[inputs.jolokia2_proxy.target]]
# url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi"
# # username = ""
# # password = ""
# # username = ""
# # password = ""
#
# ## Add metrics to read
# [[inputs.jolokia_proxy.metric]]
# [[inputs.jolokia2_proxy.metric]]
# name = "java_runtime"
# mbean = "java.lang:type=Runtime"
# paths = ["Uptime"]
@@ -1824,11 +1948,11 @@
# ## Time limit for http requests
# timeout = "5s"
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
@@ -1848,11 +1972,11 @@
# ## Set response_timeout (default 5 seconds)
# # response_timeout = "5s"
#
# ## Optional SSL Config
# # ssl_ca = /path/to/cafile
# # ssl_cert = /path/to/certfile
# # ssl_key = /path/to/keyfile
# ## Use SSL but skip chain & host verification
# ## Optional TLS Config
# # tls_ca = /path/to/cafile
# # tls_cert = /path/to/certfile
# # tls_key = /path/to/keyfile
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
@@ -1896,6 +2020,16 @@
# # campaign_id = ""
# # Read metrics from one or many mcrouter servers
# [[inputs.mcrouter]]
# ## An array of address to gather stats about. Specify an ip or hostname
# ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc.
# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"]
#
# ## Timeout for metric collections from all servers. Minimum timeout is "1s".
# # timeout = "5s"
# # Read metrics from one or many memcached servers
# [[inputs.memcached]]
# ## An array of address to gather stats about. Specify an ip on hostname
@@ -1934,11 +2068,11 @@
# # "messages",
# # ]
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
@@ -1960,13 +2094,15 @@
# ## mongodb://user:auth_key@10.10.3.30:27017,
# ## mongodb://10.10.3.33:18832,
# servers = ["mongodb://127.0.0.1:27017"]
# gather_perdb_stats = false
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# ## When true, collect per database stats
# # gather_perdb_stats = false
#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
@@ -2045,10 +2181,12 @@
# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
# interval_slow = "30m"
#
# ## Optional SSL Config (will be used if tls=custom parameter specified in server uri)
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
# ## Optional TLS Config (will be used if tls=custom parameter specified in server uri)
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
# # Provides metrics about the state of a NATS server
@@ -2075,7 +2213,7 @@
# ##
# # TCP or UDP 'ping' given url and collect response time in seconds
# # Collect response time of a TCP or UDP connection
# [[inputs.net_response]]
# ## Protocol, must be "tcp" or "udp"
# ## NOTE: because the "udp" protocol does not respond to requests, it requires
@@ -2083,11 +2221,12 @@
# protocol = "tcp"
# ## Server address (default localhost)
# address = "localhost:80"
#
# ## Set timeout
# timeout = "1s"
# # timeout = "1s"
#
# ## Set read timeout (only used if expecting a response)
# read_timeout = "1s"
# # read_timeout = "1s"
#
# ## The following options are required for UDP checks. For TCP, they are
# ## optional. The plugin will send the given string to the server and then
@@ -2096,6 +2235,9 @@
# # send = "ssh"
# ## expected string in answer
# # expect = "ssh"
#
# ## Uncomment to remove deprecated fields
# # fieldexclude = ["result_type", "string_found"]
# # Read TCP metrics such as established, time wait and sockets counts.
@@ -2108,10 +2250,11 @@
# # An array of Nginx stub_status URI to gather stats.
# urls = ["http://localhost/server_status"]
#
# # TLS/SSL configuration
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.cer"
# ssl_key = "/etc/telegraf/key.key"
# ## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.cer"
# tls_key = "/etc/telegraf/key.key"
# ## Use TLS but skip chain & host verification
# insecure_skip_verify = false
#
# # HTTP response timeout (default: 5s)
@@ -2151,6 +2294,15 @@
# dns_lookup = true
# # Pulls statistics from nvidia GPUs attached to the host
# [[inputs.nvidia_smi]]
# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath
# # bin_path = /usr/bin/nvidia-smi
#
# ## Optional: timeout for GPU polling
# # timeout = 5s
# # OpenLDAP cn=Monitor plugin
# [[inputs.openldap]]
# host = "localhost"
@@ -2165,7 +2317,7 @@
# insecure_skip_verify = false
#
# # Path to PEM-encoded Root certificate to use to verify server certificate
# ssl_ca = "/etc/ssl/certs.pem"
# tls_ca = "/etc/ssl/certs.pem"
#
# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed.
# bind_dn = ""
@@ -2316,11 +2468,11 @@
# ## Specify timeout duration for slower prometheus clients (default is 3s)
# # response_timeout = "3s"
#
# ## Optional SSL Config
# # ssl_ca = /path/to/cafile
# # ssl_cert = /path/to/certfile
# # ssl_key = /path/to/keyfile
# ## Use SSL but skip chain & host verification
# ## Optional TLS Config
# # tls_ca = /path/to/cafile
# # tls_cert = /path/to/certfile
# # tls_key = /path/to/keyfile
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
@@ -2340,11 +2492,11 @@
# # username = "guest"
# # password = "guest"
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Optional request timeouts
@@ -2773,11 +2925,11 @@
# ## Request timeout
# # timeout = "5s"
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
@@ -2861,11 +3013,11 @@
# ## Timeout for metric collections from all servers. Minimum timeout is "1s".
# # timeout = "5s"
#
# ## Optional SSL Config
# # enable_ssl = true
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Optional TLS Config
# # enable_tls = true
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## If false, skip chain & host verification
# # insecure_skip_verify = true
@@ -2894,11 +3046,11 @@
# ## described here: https://www.rabbitmq.com/plugins.html
# # auth_method = "PLAIN"
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Data format to consume.
@@ -2908,6 +3060,28 @@
# data_format = "influx"
# # Read Cassandra metrics through Jolokia
# [[inputs.cassandra]]
# ## DEPRECATED: The cassandra plugin has been deprecated. Please use the
# ## jolokia2 plugin instead.
# ##
# ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
#
# context = "/jolokia/read"
# ## List of cassandra servers exposing jolokia read service
# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
# ## List of metrics collected on above servers
# ## Each metric consists of a jmx path.
# ## This will collect all heap memory usage metrics from the jvm and
# ## ReadLatency metrics for all keyspaces and tables.
# ## "type=Table" in the query works with Cassandra3.0. Older versions might
# ## need to use "type=ColumnFamily"
# metrics = [
# "/java.lang:type=Memory/HeapMemoryUsage",
# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
# ]
# # Influx HTTP write listener
# [[inputs.http_listener]]
# ## Address and port to host HTTP listener on
@@ -2940,6 +3114,53 @@
# # basic_password = "barfoo"
# # Read JTI OpenConfig Telemetry from listed sensors
# [[inputs.jti_openconfig_telemetry]]
# ## List of device addresses to collect telemetry from
# servers = ["localhost:1883"]
#
# ## Authentication details. Username and password are must if device expects
# ## authentication. Client ID must be unique when connecting from multiple instances
# ## of telegraf to the same device
# username = "user"
# password = "pass"
# client_id = "telegraf"
#
# ## Frequency to get data
# sample_frequency = "1000ms"
#
# ## Sensors to subscribe for
# ## A identifier for each sensor can be provided in path by separating with space
# ## Else sensor path will be used as identifier
# ## When identifier is used, we can provide a list of space separated sensors.
# ## A single subscription will be created with all these sensors and data will
# ## be saved to measurement with this identifier name
# sensors = [
# "/interfaces/",
# "collection /components/ /lldp",
# ]
#
# ## We allow specifying sensor group level reporting rate. To do this, specify the
# ## reporting rate in Duration at the beginning of sensor paths / collection
# ## name. For entries without reporting rate, we use configured sample frequency
# sensors = [
# "1000ms customReporting /interfaces /lldp",
# "2000ms collection /components",
# "/interfaces",
# ]
#
# ## x509 Certificate to use with TLS connection. If it is not provided, an insecure
# ## channel will be opened with server
# ssl_cert = "/etc/telegraf/cert.pem"
#
# ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms.
# ## Failed streams/calls will not be retried if 0 is provided
# retry_delay = "1000ms"
#
# ## To treat all string values as tags, set this to true
# str_as_tags = false
# # Read metrics from Kafka topic(s)
# [[inputs.kafka_consumer]]
# ## kafka servers
@@ -2947,11 +3168,11 @@
# ## topic(s) to consume
# topics = ["telegraf"]
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Optional SASL Config
@@ -3017,7 +3238,6 @@
# # watch_method = "inotify"
#
# ## Parse logstash-style "grok" patterns:
# ## Telegraf built-in parsing patterns: https://goo.gl/dkay10
# [inputs.logparser.grok]
# ## This is a list of patterns to check the given log file(s) for.
# ## Note that adding patterns here increases processing time. The most
@@ -3078,11 +3298,11 @@
# # username = "telegraf"
# # password = "metricsmetricsmetricsmetrics"
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Data format to consume.
@@ -3257,6 +3477,13 @@
# ## 0 (default) is unlimited.
# # read_timeout = "30s"
#
# ## Optional TLS configuration.
# ## Only applies to stream sockets (e.g. TCP).
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Enables client authentication if set.
# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
#
# ## Maximum socket buffer size in bytes.
# ## For stream sockets, once the buffer fills up, the sender will start backing up.
# ## For datagram sockets, once the buffer fills up, metrics will start dropping.

View File

@@ -519,7 +519,13 @@ func (c *Config) LoadDirectory(path string) error {
log.Printf("W! Telegraf is not permitted to read %s", thispath)
return nil
}
if info.IsDir() {
if strings.HasPrefix(info.Name(), "..") {
// skip Kubernetes mounts, prevening loading the same config twice
return filepath.SkipDir
}
return nil
}
name := info.Name()
@@ -1403,6 +1409,18 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
}
}
if node, ok := tbl.Fields["graphite_tag_support"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
c.GraphiteTagSupport, err = b.Boolean()
if err != nil {
return nil, err
}
}
}
}
if node, ok := tbl.Fields["json_timestamp_units"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
@@ -1422,6 +1440,7 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
delete(tbl.Fields, "influx_max_line_bytes")
delete(tbl.Fields, "influx_sort_fields")
delete(tbl.Fields, "influx_uint_support")
delete(tbl.Fields, "graphite_tag_support")
delete(tbl.Fields, "data_format")
delete(tbl.Fields, "prefix")
delete(tbl.Fields, "template")

View File

@@ -0,0 +1,4 @@
# This invalid config file should be skipped during testing
# as it is an ..data folder
[[outputs.influxdb

View File

@@ -4,11 +4,7 @@ import (
"bufio"
"bytes"
"crypto/rand"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io/ioutil"
"log"
"math/big"
"os"
@@ -112,49 +108,6 @@ func RandomString(n int) string {
return string(bytes)
}
// GetTLSConfig gets a tls.Config object from the given certs, key, and CA files.
// you must give the full path to the files.
// If all files are blank and InsecureSkipVerify=false, returns a nil pointer.
func GetTLSConfig(
SSLCert, SSLKey, SSLCA string,
InsecureSkipVerify bool,
) (*tls.Config, error) {
if SSLCert == "" && SSLKey == "" && SSLCA == "" && !InsecureSkipVerify {
return nil, nil
}
t := &tls.Config{
InsecureSkipVerify: InsecureSkipVerify,
}
if SSLCA != "" {
caCert, err := ioutil.ReadFile(SSLCA)
if err != nil {
return nil, errors.New(fmt.Sprintf("Could not load TLS CA: %s",
err))
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
t.RootCAs = caCertPool
}
if SSLCert != "" && SSLKey != "" {
cert, err := tls.LoadX509KeyPair(SSLCert, SSLKey)
if err != nil {
return nil, errors.New(fmt.Sprintf(
"Could not load TLS client key/certificate from %s:%s: %s",
SSLKey, SSLCert, err))
}
t.Certificates = []tls.Certificate{cert}
t.BuildNameToCertificate()
}
// will be nil by default if nothing is provided
return t, nil
}
// SnakeCase converts the given string to snake case following the Golang format:
// acronyms are converted to lower-case and preceded by an underscore.
func SnakeCase(in string) string {

View File

@@ -1,54 +0,0 @@
package limiter
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestRateLimiter(t *testing.T) {
r := NewRateLimiter(5, time.Second)
ticker := time.NewTicker(time.Millisecond * 75)
// test that we can only get 5 receives from the rate limiter
counter := 0
outer:
for {
select {
case <-r.C:
counter++
case <-ticker.C:
break outer
}
}
assert.Equal(t, 5, counter)
r.Stop()
// verify that the Stop function closes the channel.
_, ok := <-r.C
assert.False(t, ok)
}
func TestRateLimiterMultipleIterations(t *testing.T) {
r := NewRateLimiter(5, time.Millisecond*50)
ticker := time.NewTicker(time.Millisecond * 250)
// test that we can get 15 receives from the rate limiter
counter := 0
outer:
for {
select {
case <-ticker.C:
break outer
case <-r.C:
counter++
}
}
assert.True(t, counter > 10)
r.Stop()
// verify that the Stop function closes the channel.
_, ok := <-r.C
assert.False(t, ok)
}

View File

@@ -77,6 +77,7 @@ func (r *RunningInput) MakeMetric(
if r.trace && m != nil {
s := influx.NewSerializer()
s.SetFieldSortOrder(influx.SortFields)
octets, err := s.Serialize(m)
if err == nil {
fmt.Print("> " + string(octets))

View File

@@ -113,6 +113,11 @@ func (ro *RunningOutput) AddMetric(m telegraf.Metric) {
m, _ = metric.New(name, tags, fields, t)
}
if output, ok := ro.Output.(telegraf.AggregatingOutput); ok {
output.Add(m)
return
}
ro.metrics.Add(m)
if ro.metrics.Len() == ro.MetricBatchSize {
batch := ro.metrics.Batch(ro.MetricBatchSize)
@@ -125,6 +130,12 @@ func (ro *RunningOutput) AddMetric(m telegraf.Metric) {
// Write writes all cached points to this output.
func (ro *RunningOutput) Write() error {
if output, ok := ro.Output.(telegraf.AggregatingOutput); ok {
metrics := output.Push()
ro.metrics.Add(metrics...)
output.Reset()
}
nFails, nMetrics := ro.failMetrics.Len(), ro.metrics.Len()
ro.BufferSize.Set(int64(nFails + nMetrics))
log.Printf("D! Output [%s] buffer fullness: %d / %d metrics. ",

130
internal/tls/config.go Normal file
View File

@@ -0,0 +1,130 @@
package tls
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
)
// ClientConfig represents the standard client TLS config.
type ClientConfig struct {
TLSCA string `toml:"tls_ca"`
TLSCert string `toml:"tls_cert"`
TLSKey string `toml:"tls_key"`
InsecureSkipVerify bool `toml:"insecure_skip_verify"`
// Deprecated in 1.7; use TLS variables above
SSLCA string `toml:"ssl_ca"`
SSLCert string `toml:"ssl_cert"`
SSLKey string `toml:"ssl_ca"`
}
// ServerConfig represents the standard server TLS config.
type ServerConfig struct {
TLSCert string `toml:"tls_cert"`
TLSKey string `toml:"tls_key"`
TLSAllowedCACerts []string `toml:"tls_allowed_cacerts"`
}
// TLSConfig returns a tls.Config, may be nil without error if TLS is not
// configured.
func (c *ClientConfig) TLSConfig() (*tls.Config, error) {
// Support deprecated variable names
if c.TLSCA == "" && c.SSLCA != "" {
c.TLSCA = c.SSLCA
}
if c.TLSCert == "" && c.SSLCert != "" {
c.TLSCert = c.SSLCert
}
if c.TLSKey == "" && c.SSLKey != "" {
c.TLSKey = c.SSLKey
}
// TODO: return default tls.Config; plugins should not call if they don't
// want TLS, this will require using another option to determine. In the
// case of an HTTP plugin, you could use `https`. Other plugins may need
// the dedicated option `TLSEnable`.
if c.TLSCA == "" && c.TLSKey == "" && c.TLSCert == "" && !c.InsecureSkipVerify {
return nil, nil
}
tlsConfig := &tls.Config{
InsecureSkipVerify: c.InsecureSkipVerify,
Renegotiation: tls.RenegotiateNever,
}
if c.TLSCA != "" {
pool, err := makeCertPool([]string{c.TLSCA})
if err != nil {
return nil, err
}
tlsConfig.RootCAs = pool
}
if c.TLSCert != "" && c.TLSKey != "" {
err := loadCertificate(tlsConfig, c.TLSCert, c.TLSKey)
if err != nil {
return nil, err
}
}
return tlsConfig, nil
}
// TLSConfig returns a tls.Config, may be nil without error if TLS is not
// configured.
func (c *ServerConfig) TLSConfig() (*tls.Config, error) {
if c.TLSCert == "" && c.TLSKey == "" && len(c.TLSAllowedCACerts) == 0 {
return nil, nil
}
tlsConfig := &tls.Config{}
if len(c.TLSAllowedCACerts) != 0 {
pool, err := makeCertPool(c.TLSAllowedCACerts)
if err != nil {
return nil, err
}
tlsConfig.ClientCAs = pool
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
}
if c.TLSCert != "" && c.TLSKey != "" {
err := loadCertificate(tlsConfig, c.TLSCert, c.TLSKey)
if err != nil {
return nil, err
}
}
return tlsConfig, nil
}
func makeCertPool(certFiles []string) (*x509.CertPool, error) {
pool := x509.NewCertPool()
for _, certFile := range certFiles {
pem, err := ioutil.ReadFile(certFile)
if err != nil {
return nil, fmt.Errorf(
"could not read certificate %q: %v", certFile, err)
}
ok := pool.AppendCertsFromPEM(pem)
if !ok {
return nil, fmt.Errorf(
"could not parse any PEM certificates %q: %v", certFile, err)
}
}
return pool, nil
}
func loadCertificate(config *tls.Config, certFile, keyFile string) error {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return fmt.Errorf(
"could not load keypair %s:%s: %v", certFile, keyFile, err)
}
config.Certificates = []tls.Certificate{cert}
config.BuildNameToCertificate()
return nil
}

226
internal/tls/config_test.go Normal file
View File

@@ -0,0 +1,226 @@
package tls_test
import (
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/influxdata/telegraf/internal/tls"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
var pki = testutil.NewPKI("../../testutil/pki")
func TestClientConfig(t *testing.T) {
tests := []struct {
name string
client tls.ClientConfig
expNil bool
expErr bool
}{
{
name: "unset",
client: tls.ClientConfig{},
expNil: true,
},
{
name: "success",
client: tls.ClientConfig{
TLSCA: pki.CACertPath(),
TLSCert: pki.ClientCertPath(),
TLSKey: pki.ClientKeyPath(),
},
},
{
name: "invalid ca",
client: tls.ClientConfig{
TLSCA: pki.ClientKeyPath(),
TLSCert: pki.ClientCertPath(),
TLSKey: pki.ClientKeyPath(),
},
expNil: true,
expErr: true,
},
{
name: "missing ca is okay",
client: tls.ClientConfig{
TLSCert: pki.ClientCertPath(),
TLSKey: pki.ClientKeyPath(),
},
},
{
name: "invalid cert",
client: tls.ClientConfig{
TLSCA: pki.CACertPath(),
TLSCert: pki.ClientKeyPath(),
TLSKey: pki.ClientKeyPath(),
},
expNil: true,
expErr: true,
},
{
name: "missing cert skips client keypair",
client: tls.ClientConfig{
TLSCA: pki.CACertPath(),
TLSKey: pki.ClientKeyPath(),
},
expNil: false,
expErr: false,
},
{
name: "missing key skips client keypair",
client: tls.ClientConfig{
TLSCA: pki.CACertPath(),
TLSCert: pki.ClientCertPath(),
},
expNil: false,
expErr: false,
},
{
name: "support deprecated ssl field names",
client: tls.ClientConfig{
SSLCA: pki.CACertPath(),
SSLCert: pki.ClientCertPath(),
SSLKey: pki.ClientKeyPath(),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tlsConfig, err := tt.client.TLSConfig()
if !tt.expNil {
require.NotNil(t, tlsConfig)
} else {
require.Nil(t, tlsConfig)
}
if !tt.expErr {
require.NoError(t, err)
} else {
require.Error(t, err)
}
})
}
}
func TestServerConfig(t *testing.T) {
tests := []struct {
name string
server tls.ServerConfig
expNil bool
expErr bool
}{
{
name: "unset",
server: tls.ServerConfig{},
expNil: true,
},
{
name: "success",
server: tls.ServerConfig{
TLSCert: pki.ServerCertPath(),
TLSKey: pki.ServerKeyPath(),
TLSAllowedCACerts: []string{pki.CACertPath()},
},
},
{
name: "invalid ca",
server: tls.ServerConfig{
TLSCert: pki.ServerCertPath(),
TLSKey: pki.ServerKeyPath(),
TLSAllowedCACerts: []string{pki.ServerKeyPath()},
},
expNil: true,
expErr: true,
},
{
name: "missing allowed ca is okay",
server: tls.ServerConfig{
TLSCert: pki.ServerCertPath(),
TLSKey: pki.ServerKeyPath(),
},
expNil: true,
expErr: true,
},
{
name: "invalid cert",
server: tls.ServerConfig{
TLSCert: pki.ServerKeyPath(),
TLSKey: pki.ServerKeyPath(),
TLSAllowedCACerts: []string{pki.CACertPath()},
},
expNil: true,
expErr: true,
},
{
name: "missing cert",
server: tls.ServerConfig{
TLSKey: pki.ServerKeyPath(),
TLSAllowedCACerts: []string{pki.CACertPath()},
},
expNil: true,
expErr: true,
},
{
name: "missing key",
server: tls.ServerConfig{
TLSCert: pki.ServerCertPath(),
TLSAllowedCACerts: []string{pki.CACertPath()},
},
expNil: true,
expErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tlsConfig, err := tt.server.TLSConfig()
if !tt.expNil {
require.NotNil(t, tlsConfig)
}
if !tt.expErr {
require.NoError(t, err)
}
})
}
}
func TestConnect(t *testing.T) {
clientConfig := tls.ClientConfig{
TLSCA: pki.CACertPath(),
TLSCert: pki.ClientCertPath(),
TLSKey: pki.ClientKeyPath(),
}
serverConfig := tls.ServerConfig{
TLSCert: pki.ServerCertPath(),
TLSKey: pki.ServerKeyPath(),
TLSAllowedCACerts: []string{pki.CACertPath()},
}
serverTLSConfig, err := serverConfig.TLSConfig()
require.NoError(t, err)
ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
ts.TLS = serverTLSConfig
ts.StartTLS()
defer ts.Close()
clientTLSConfig, err := clientConfig.TLSConfig()
require.NoError(t, err)
client := http.Client{
Transport: &http.Transport{
TLSClientConfig: clientTLSConfig,
},
Timeout: 10 * time.Second,
}
resp, err := client.Get(ts.URL)
require.NoError(t, err)
require.Equal(t, 200, resp.StatusCode)
}

45
internal/usage.go Normal file
View File

@@ -0,0 +1,45 @@
// +build !windows
package internal
const Usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
Usage:
telegraf [commands|flags]
The commands & flags are:
config print out full sample configuration to stdout
version print the version to stdout
--config <file> configuration file to load
--test gather metrics once, print them to stdout, and exit
--config-directory directory containing additional *.conf files
--input-filter filter the input plugins to enable, separator is :
--output-filter filter the output plugins to enable, separator is :
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
--debug print metrics as they're generated to stdout
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
--quiet run in quiet mode
Examples:
# generate a telegraf config file:
telegraf config > telegraf.conf
# generate config with only cpu input & influxdb output plugins defined
telegraf --input-filter cpu --output-filter influxdb config
# run a single telegraf collection, outputing metrics to stdout
telegraf --config telegraf.conf --test
# run telegraf with all plugins defined in config file
telegraf --config telegraf.conf
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
# run telegraf with pprof
telegraf --config telegraf.conf --pprof-addr localhost:6060
`

54
internal/usage_windows.go Normal file
View File

@@ -0,0 +1,54 @@
// +build windows
package internal
const Usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
Usage:
telegraf [commands|flags]
The commands & flags are:
config print out full sample configuration to stdout
version print the version to stdout
--config <file> configuration file to load
--test gather metrics once, print them to stdout, and exit
--config-directory directory containing additional *.conf files
--input-filter filter the input plugins to enable, separator is :
--output-filter filter the output plugins to enable, separator is :
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
--debug print metrics as they're generated to stdout
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
--quiet run in quiet mode
--console run as console application
--service operate on service, one of: install, uninstall, start, stop
Examples:
# generate a telegraf config file:
telegraf config > telegraf.conf
# generate config with only cpu input & influxdb output plugins defined
telegraf --input-filter cpu --output-filter influxdb config
# run a single telegraf collection, outputing metrics to stdout
telegraf --config telegraf.conf --test
# run telegraf with all plugins defined in config file
telegraf --config telegraf.conf
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
# run telegraf with pprof
telegraf --config telegraf.conf --pprof-addr localhost:6060
# run telegraf without service controller
telegraf --console install --config "C:\Program Files\Telegraf\telegraf.conf"
# install telegraf service
telegraf --service install --config "C:\Program Files\Telegraf\telegraf.conf"
`

View File

@@ -54,6 +54,8 @@ type Metric interface {
AddField(key string, value interface{})
RemoveField(key string)
SetTime(t time.Time)
// HashID returns an unique identifier for the series.
HashID() uint64

View File

@@ -123,6 +123,7 @@ func (m *metric) AddTag(key, value string) {
if key == tag.Key {
tag.Value = value
return
}
m.tags = append(m.tags, nil)
@@ -201,6 +202,10 @@ func (m *metric) RemoveField(key string) {
}
}
func (m *metric) SetTime(t time.Time) {
m.tm = t
}
func (m *metric) Copy() telegraf.Metric {
m2 := &metric{
name: m.name,
@@ -232,9 +237,12 @@ func (m *metric) IsAggregate() bool {
func (m *metric) HashID() uint64 {
h := fnv.New64a()
h.Write([]byte(m.name))
h.Write([]byte("\n"))
for _, tag := range m.tags {
h.Write([]byte(tag.Key))
h.Write([]byte("\n"))
h.Write([]byte(tag.Value))
h.Write([]byte("\n"))
}
return h.Sum64()
}

View File

@@ -63,6 +63,7 @@ func TestAddTagOverwrites(t *testing.T) {
value, ok := m.GetTag("host")
require.True(t, ok)
require.Equal(t, "example.org", value)
require.Equal(t, 1, len(m.TagList()))
}
func TestRemoveTagNoEffectOnMissingTags(t *testing.T) {
@@ -267,6 +268,32 @@ func TestHashID_Consistency(t *testing.T) {
assert.Equal(t, m2.HashID(), m3.HashID())
}
func TestHashID_Delimiting(t *testing.T) {
m1, _ := New(
"cpu",
map[string]string{
"a": "x",
"b": "y",
"c": "z",
},
map[string]interface{}{
"value": float64(1),
},
time.Now(),
)
m2, _ := New(
"cpu",
map[string]string{
"a": "xbycz",
},
map[string]interface{}{
"value": float64(1),
},
time.Now(),
)
assert.NotEqual(t, m1.HashID(), m2.HashID())
}
func TestSetName(t *testing.T) {
m := baseMetric()
m.SetName("foo")

View File

@@ -13,6 +13,12 @@ type Output interface {
Write(metrics []Metric) error
}
type AggregatingOutput interface {
Add(in Metric)
Push() []Metric
Reset()
}
type ServiceOutput interface {
// Connect to the Output
Connect() error

View File

@@ -9,6 +9,27 @@ The metric names, to make it less complicated in querying, have replaced all `-`
All metrics are attempted to be cast to integers, then booleans, then strings.
### Configuration:
```toml
# Read stats from aerospike server(s)
[[inputs.aerospike]]
## Aerospike servers to connect to (with port)
## This plugin will query all namespaces the aerospike
## server has configured and get stats for them.
servers = ["localhost:3000"]
# username = "telegraf"
# password = "pa$$word"
## Optional TLS Config
# enable_tls = false
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## If false, skip chain & host verification
# insecure_skip_verify = true
```
### Measurements:
The aerospike metrics are under two measurement names:

View File

@@ -1,6 +1,7 @@
package aerospike
import (
"crypto/tls"
"errors"
"log"
"net"
@@ -10,13 +11,24 @@ import (
"time"
"github.com/influxdata/telegraf"
tlsint "github.com/influxdata/telegraf/internal/tls"
"github.com/influxdata/telegraf/plugins/inputs"
as "github.com/aerospike/aerospike-client-go"
)
type Aerospike struct {
Servers []string
Servers []string `toml:"servers"`
Username string `toml:"username"`
Password string `toml:"password"`
EnableTLS bool `toml:"enable_tls"`
EnableSSL bool `toml:"enable_ssl"` // deprecated in 1.7; use enable_tls
tlsint.ClientConfig
initialized bool
tlsConfig *tls.Config
}
var sampleConfig = `
@@ -24,6 +36,17 @@ var sampleConfig = `
## This plugin will query all namespaces the aerospike
## server has configured and get stats for them.
servers = ["localhost:3000"]
# username = "telegraf"
# password = "pa$$word"
## Optional TLS Config
# enable_tls = false
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## If false, skip chain & host verification
# insecure_skip_verify = true
`
func (a *Aerospike) SampleConfig() string {
@@ -35,6 +58,18 @@ func (a *Aerospike) Description() string {
}
func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
if !a.initialized {
tlsConfig, err := a.ClientConfig.TLSConfig()
if err != nil {
return err
}
if tlsConfig == nil && (a.EnableTLS || a.EnableSSL) {
tlsConfig = &tls.Config{}
}
a.tlsConfig = tlsConfig
a.initialized = true
}
if len(a.Servers) == 0 {
return a.gatherServer("127.0.0.1:3000", acc)
}
@@ -63,7 +98,11 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
iport = 3000
}
c, err := as.NewClient(host, iport)
policy := as.NewClientPolicy()
policy.User = a.Username
policy.Password = a.Password
policy.TlsConfig = a.tlsConfig
c, err := as.NewClientWithPolicy(policy, host, iport)
if err != nil {
return err
}

View File

@@ -4,8 +4,10 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/aerospike"
_ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/apache"
_ "github.com/influxdata/telegraf/plugins/inputs/aurora"
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
_ "github.com/influxdata/telegraf/plugins/inputs/bond"
_ "github.com/influxdata/telegraf/plugins/inputs/burrow"
_ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
_ "github.com/influxdata/telegraf/plugins/inputs/ceph"
_ "github.com/influxdata/telegraf/plugins/inputs/cgroup"
@@ -24,6 +26,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
_ "github.com/influxdata/telegraf/plugins/inputs/fail2ban"
_ "github.com/influxdata/telegraf/plugins/inputs/fibaro"
_ "github.com/influxdata/telegraf/plugins/inputs/filestat"
_ "github.com/influxdata/telegraf/plugins/inputs/fluentd"
_ "github.com/influxdata/telegraf/plugins/inputs/graylog"
@@ -41,6 +44,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia2"
_ "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry"
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer_legacy"
_ "github.com/influxdata/telegraf/plugins/inputs/kapacitor"
@@ -49,6 +53,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/logparser"
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
_ "github.com/influxdata/telegraf/plugins/inputs/mcrouter"
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
_ "github.com/influxdata/telegraf/plugins/inputs/mesos"
_ "github.com/influxdata/telegraf/plugins/inputs/minecraft"
@@ -64,6 +69,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/nstat"
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
_ "github.com/influxdata/telegraf/plugins/inputs/nvidia_smi"
_ "github.com/influxdata/telegraf/plugins/inputs/openldap"
_ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd"
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"

View File

@@ -32,11 +32,11 @@ The following defaults are known to work with RabbitMQ:
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
## described here: https://www.rabbitmq.com/plugins.html
# auth_method = "PLAIN"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Data format to consume.

View File

@@ -10,7 +10,7 @@ import (
"github.com/streadway/amqp"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/tls"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
)
@@ -31,14 +31,7 @@ type AMQPConsumer struct {
// AMQP Auth method
AuthMethod string
// Path to CA file
SSLCA string `toml:"ssl_ca"`
// Path to host cert file
SSLCert string `toml:"ssl_cert"`
// Path to cert key file
SSLKey string `toml:"ssl_key"`
// Use SSL but skip chain & host verification
InsecureSkipVerify bool
tls.ClientConfig
parser parsers.Parser
conn *amqp.Connection
@@ -78,11 +71,11 @@ func (a *AMQPConsumer) SampleConfig() string {
## described here: https://www.rabbitmq.com/plugins.html
# auth_method = "PLAIN"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Data format to consume.
@@ -108,8 +101,7 @@ func (a *AMQPConsumer) Gather(_ telegraf.Accumulator) error {
func (a *AMQPConsumer) createConfig() (*amqp.Config, error) {
// make new tls config
tls, err := internal.GetTLSConfig(
a.SSLCert, a.SSLKey, a.SSLCA, a.InsecureSkipVerify)
tls, err := a.ClientConfig.TLSConfig()
if err != nil {
return nil, err
}

View File

@@ -21,11 +21,11 @@ Typically, the `mod_status` module is configured to expose a page at the `/serve
## Maximum time to receive response.
# response_timeout = "5s"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
```

View File

@@ -13,6 +13,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -21,14 +22,7 @@ type Apache struct {
Username string
Password string
ResponseTimeout internal.Duration
// Path to CA file
SSLCA string `toml:"ssl_ca"`
// Path to host cert file
SSLCert string `toml:"ssl_cert"`
// Path to cert key file
SSLKey string `toml:"ssl_key"`
// Use SSL but skip chain & host verification
InsecureSkipVerify bool
tls.ClientConfig
client *http.Client
}
@@ -46,11 +40,11 @@ var sampleConfig = `
## Maximum time to receive response.
# response_timeout = "5s"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
`
@@ -63,6 +57,8 @@ func (n *Apache) Description() string {
}
func (n *Apache) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup
if len(n.Urls) == 0 {
n.Urls = []string{"http://localhost/server-status?auto"}
}
@@ -78,8 +74,6 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
n.client = client
}
var wg sync.WaitGroup
wg.Add(len(n.Urls))
for _, u := range n.Urls {
addr, err := url.Parse(u)
if err != nil {
@@ -87,6 +81,7 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
continue
}
wg.Add(1)
go func(addr *url.URL) {
defer wg.Done()
acc.AddError(n.gatherUrl(addr, acc))
@@ -98,8 +93,7 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
}
func (n *Apache) createHttpClient() (*http.Client, error) {
tlsCfg, err := internal.GetTLSConfig(
n.SSLCert, n.SSLKey, n.SSLCA, n.InsecureSkipVerify)
tlsCfg, err := n.ClientConfig.TLSConfig()
if err != nil {
return nil, err
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,280 @@
package aurora
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
type RoleType int
const (
Unknown RoleType = iota
Leader
Follower
)
func (r RoleType) String() string {
switch r {
case Leader:
return "leader"
case Follower:
return "follower"
default:
return "unknown"
}
}
var (
defaultTimeout = 5 * time.Second
defaultRoles = []string{"leader", "follower"}
)
type Vars map[string]interface{}
type Aurora struct {
Schedulers []string `toml:"schedulers"`
Roles []string `toml:"roles"`
Timeout internal.Duration `toml:"timeout"`
Username string `toml:"username"`
Password string `toml:"password"`
tls.ClientConfig
client *http.Client
urls []*url.URL
}
var sampleConfig = `
## Schedulers are the base addresses of your Aurora Schedulers
schedulers = ["http://127.0.0.1:8081"]
## Set of role types to collect metrics from.
##
## The scheduler roles are checked each interval by contacting the
## scheduler nodes; zookeeper is not contacted.
# roles = ["leader", "follower"]
## Timeout is the max time for total network operations.
# timeout = "5s"
## Username and password are sent using HTTP Basic Auth.
# username = "username"
# password = "pa$$word"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
`
func (a *Aurora) SampleConfig() string {
return sampleConfig
}
func (a *Aurora) Description() string {
return "Gather metrics from Apache Aurora schedulers"
}
func (a *Aurora) Gather(acc telegraf.Accumulator) error {
if a.client == nil {
err := a.initialize()
if err != nil {
return err
}
}
ctx, cancel := context.WithTimeout(context.Background(), a.Timeout.Duration)
defer cancel()
var wg sync.WaitGroup
for _, u := range a.urls {
wg.Add(1)
go func(u *url.URL) {
defer wg.Done()
role, err := a.gatherRole(ctx, u)
if err != nil {
acc.AddError(fmt.Errorf("%s: %v", u, err))
return
}
if !a.roleEnabled(role) {
return
}
err = a.gatherScheduler(ctx, u, role, acc)
if err != nil {
acc.AddError(fmt.Errorf("%s: %v", u, err))
}
}(u)
}
wg.Wait()
return nil
}
func (a *Aurora) initialize() error {
tlsCfg, err := a.ClientConfig.TLSConfig()
if err != nil {
return err
}
client := &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: tlsCfg,
},
}
urls := make([]*url.URL, 0, len(a.Schedulers))
for _, s := range a.Schedulers {
loc, err := url.Parse(s)
if err != nil {
return err
}
urls = append(urls, loc)
}
if a.Timeout.Duration < time.Second {
a.Timeout.Duration = defaultTimeout
}
if len(a.Roles) == 0 {
a.Roles = defaultRoles
}
a.client = client
a.urls = urls
return nil
}
func (a *Aurora) roleEnabled(role RoleType) bool {
if len(a.Roles) == 0 {
return true
}
for _, v := range a.Roles {
if role.String() == v {
return true
}
}
return false
}
func (a *Aurora) gatherRole(ctx context.Context, origin *url.URL) (RoleType, error) {
loc := *origin
loc.Path = "leaderhealth"
req, err := http.NewRequest("GET", loc.String(), nil)
if err != nil {
return Unknown, err
}
if a.Username != "" || a.Password != "" {
req.SetBasicAuth(a.Username, a.Password)
}
req.Header.Add("Accept", "text/plain")
resp, err := a.client.Do(req.WithContext(ctx))
if err != nil {
return Unknown, err
}
resp.Body.Close()
switch resp.StatusCode {
case http.StatusOK:
return Leader, nil
case http.StatusBadGateway:
fallthrough
case http.StatusServiceUnavailable:
return Follower, nil
default:
return Unknown, fmt.Errorf("%v", resp.Status)
}
}
func (a *Aurora) gatherScheduler(
ctx context.Context, origin *url.URL, role RoleType, acc telegraf.Accumulator,
) error {
loc := *origin
loc.Path = "vars.json"
req, err := http.NewRequest("GET", loc.String(), nil)
if err != nil {
return err
}
if a.Username != "" || a.Password != "" {
req.SetBasicAuth(a.Username, a.Password)
}
req.Header.Add("Accept", "application/json")
resp, err := a.client.Do(req.WithContext(ctx))
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("%v", resp.Status)
}
var vars Vars
decoder := json.NewDecoder(resp.Body)
decoder.UseNumber()
err = decoder.Decode(&vars)
if err != nil {
return fmt.Errorf("decoding response: %v", err)
}
var fields = make(map[string]interface{}, len(vars))
for k, v := range vars {
switch v := v.(type) {
case json.Number:
// Aurora encodes numbers as you would specify them as a literal,
// use this to determine if a value is a float or int.
if strings.ContainsAny(v.String(), ".eE") {
fv, err := v.Float64()
if err != nil {
acc.AddError(err)
continue
}
fields[k] = fv
} else {
fi, err := v.Int64()
if err != nil {
acc.AddError(err)
continue
}
fields[k] = fi
}
default:
continue
}
}
acc.AddFields("aurora",
fields,
map[string]string{
"scheduler": origin.String(),
"role": role.String(),
},
)
return nil
}
func init() {
inputs.Add("aurora", func() telegraf.Input {
return &Aurora{}
})
}

View File

@@ -0,0 +1,259 @@
package aurora
import (
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
type (
TestHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request)
CheckFunc func(t *testing.T, err error, acc *testutil.Accumulator)
)
func TestAurora(t *testing.T) {
ts := httptest.NewServer(http.NotFoundHandler())
defer ts.Close()
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
require.NoError(t, err)
tests := []struct {
name string
plugin *Aurora
schedulers []string
roles []string
leaderhealth TestHandlerFunc
varsjson TestHandlerFunc
check CheckFunc
}{
{
name: "minimal",
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
},
varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
body := `{
"variable_scrape_events": 2958,
"variable_scrape_events_per_sec": 1.0,
"variable_scrape_micros_per_event": 1484.0,
"variable_scrape_micros_total": 4401084,
"variable_scrape_micros_total_per_sec": 1485.0
}`
w.WriteHeader(http.StatusOK)
w.Write([]byte(body))
},
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
require.NoError(t, err)
require.Equal(t, 1, len(acc.Metrics))
acc.AssertContainsTaggedFields(t,
"aurora",
map[string]interface{}{
"variable_scrape_events": int64(2958),
"variable_scrape_events_per_sec": 1.0,
"variable_scrape_micros_per_event": 1484.0,
"variable_scrape_micros_total": int64(4401084),
"variable_scrape_micros_total_per_sec": 1485.0,
},
map[string]string{
"scheduler": u.String(),
"role": "leader",
},
)
},
},
{
name: "disabled role",
roles: []string{"leader"},
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusServiceUnavailable)
},
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
require.NoError(t, err)
require.NoError(t, acc.FirstError())
require.Equal(t, 0, len(acc.Metrics))
},
},
{
name: "no metrics available",
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
},
varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte("{}"))
},
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
require.NoError(t, err)
require.NoError(t, acc.FirstError())
require.Equal(t, 0, len(acc.Metrics))
},
},
{
name: "string metrics skipped",
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
},
varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
body := `{
"foo": "bar"
}`
w.WriteHeader(http.StatusOK)
w.Write([]byte(body))
},
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
require.NoError(t, err)
require.NoError(t, acc.FirstError())
require.Equal(t, 0, len(acc.Metrics))
},
},
{
name: "float64 unparseable",
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
},
varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
// too large
body := `{
"foo": 1e309
}`
w.WriteHeader(http.StatusOK)
w.Write([]byte(body))
},
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
require.NoError(t, err)
require.Error(t, acc.FirstError())
require.Equal(t, 0, len(acc.Metrics))
},
},
{
name: "int64 unparseable",
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
},
varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
// too large
body := `{
"foo": 9223372036854775808
}`
w.WriteHeader(http.StatusOK)
w.Write([]byte(body))
},
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
require.NoError(t, err)
require.Error(t, acc.FirstError())
require.Equal(t, 0, len(acc.Metrics))
},
},
{
name: "bad json",
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
},
varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
body := `{]`
w.WriteHeader(http.StatusOK)
w.Write([]byte(body))
},
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
require.NoError(t, err)
require.Error(t, acc.FirstError())
require.Equal(t, 0, len(acc.Metrics))
},
},
{
name: "wrong status code",
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
},
varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
body := `{
"value": 42
}`
w.WriteHeader(http.StatusServiceUnavailable)
w.Write([]byte(body))
},
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
require.NoError(t, err)
require.Error(t, acc.FirstError())
require.Equal(t, 0, len(acc.Metrics))
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/leaderhealth":
tt.leaderhealth(t, w, r)
case "/vars.json":
tt.varsjson(t, w, r)
default:
w.WriteHeader(http.StatusNotFound)
}
})
var acc testutil.Accumulator
plugin := &Aurora{}
plugin.Schedulers = []string{u.String()}
plugin.Roles = tt.roles
err := plugin.Gather(&acc)
tt.check(t, err, &acc)
})
}
}
func TestBasicAuth(t *testing.T) {
ts := httptest.NewServer(http.NotFoundHandler())
defer ts.Close()
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
require.NoError(t, err)
tests := []struct {
name string
username string
password string
}{
{
name: "no auth",
},
{
name: "basic auth",
username: "username",
password: "pa$$word",
},
{
name: "username only",
username: "username",
},
{
name: "password only",
password: "pa$$word",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
username, password, _ := r.BasicAuth()
require.Equal(t, tt.username, username)
require.Equal(t, tt.password, password)
w.WriteHeader(http.StatusOK)
w.Write([]byte("{}"))
})
var acc testutil.Accumulator
plugin := &Aurora{}
plugin.Schedulers = []string{u.String()}
plugin.Username = tt.username
plugin.Password = tt.password
err := plugin.Gather(&acc)
require.NoError(t, err)
})
}
}

View File

@@ -0,0 +1,98 @@
# Telegraf Plugin: Burrow
Collect Kafka topic, consumer and partition status
via [Burrow](https://github.com/linkedin/Burrow) HTTP [API](https://github.com/linkedin/Burrow/wiki/HTTP-Endpoint).
Supported Burrow version: `1.x`
### Configuration
```
## Burrow API endpoints in format "schema://host:port".
## Default is "http://localhost:8000".
servers = ["http://localhost:8000"]
## Override Burrow API prefix.
## Useful when Burrow is behind reverse-proxy.
# api_prefix = "/v3/kafka"
## Maximum time to receive response.
# response_timeout = "5s"
## Limit per-server concurrent connections.
## Useful in case of large number of topics or consumer groups.
# concurrent_connections = 20
## Filter clusters, default is no filtering.
## Values can be specified as glob patterns.
# clusters_include = []
# clusters_exclude = []
## Filter consumer groups, default is no filtering.
## Values can be specified as glob patterns.
# groups_include = []
# groups_exclude = []
## Filter topics, default is no filtering.
## Values can be specified as glob patterns.
# topics_include = []
# topics_exclude = []
## Credentials for basic HTTP authentication.
# username = ""
# password = ""
## Optional SSL config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
# insecure_skip_verify = false
```
### Partition Status mappings
* `OK` = 1
* `NOT_FOUND` = 2
* `WARN` = 3
* `ERR` = 4
* `STOP` = 5
* `STALL` = 6
> unknown value will be mapped to 0
### Fields
* `burrow_group` (one event per each consumer group)
- status (string, see Partition Status mappings)
- status_code (int, `1..6`, see Partition status mappings)
- parition_count (int, `number of partitions`)
- total_lag (int64, `totallag`)
- lag (int64, `maxlag.current_lag || 0`)
* `burrow_partition` (one event per each topic partition)
- status (string, see Partition Status mappings)
- status_code (int, `1..6`, see Partition status mappings)
- lag (int64, `current_lag || 0`)
- offset (int64, `end.timestamp`)
- timestamp (int64, `end.timestamp`)
* `burrow_topic` (one event per topic offset)
- offset (int64)
### Tags
* `burrow_group`
- cluster (string)
- group (string)
* `burrow_partition`
- cluster (string)
- group (string)
- topic (string)
- partition (int)
* `burrow_topic`
- cluster (string)
- topic (string)
- partition (int)

View File

@@ -0,0 +1,485 @@
package burrow
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
const (
defaultBurrowPrefix = "/v3/kafka"
defaultConcurrentConnections = 20
defaultResponseTimeout = time.Second * 5
defaultServer = "http://localhost:8000"
)
const configSample = `
## Burrow API endpoints in format "schema://host:port".
## Default is "http://localhost:8000".
servers = ["http://localhost:8000"]
## Override Burrow API prefix.
## Useful when Burrow is behind reverse-proxy.
# api_prefix = "/v3/kafka"
## Maximum time to receive response.
# response_timeout = "5s"
## Limit per-server concurrent connections.
## Useful in case of large number of topics or consumer groups.
# concurrent_connections = 20
## Filter clusters, default is no filtering.
## Values can be specified as glob patterns.
# clusters_include = []
# clusters_exclude = []
## Filter consumer groups, default is no filtering.
## Values can be specified as glob patterns.
# groups_include = []
# groups_exclude = []
## Filter topics, default is no filtering.
## Values can be specified as glob patterns.
# topics_include = []
# topics_exclude = []
## Credentials for basic HTTP authentication.
# username = ""
# password = ""
## Optional SSL config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
# insecure_skip_verify = false
`
type (
burrow struct {
tls.ClientConfig
Servers []string
Username string
Password string
ResponseTimeout internal.Duration
ConcurrentConnections int
APIPrefix string `toml:"api_prefix"`
ClustersExclude []string
ClustersInclude []string
GroupsExclude []string
GroupsInclude []string
TopicsExclude []string
TopicsInclude []string
client *http.Client
filterClusters filter.Filter
filterGroups filter.Filter
filterTopics filter.Filter
}
// response
apiResponse struct {
Clusters []string `json:"clusters"`
Groups []string `json:"consumers"`
Topics []string `json:"topics"`
Offsets []int64 `json:"offsets"`
Status apiStatusResponse `json:"status"`
}
// response: status field
apiStatusResponse struct {
Partitions []apiStatusResponseLag `json:"partitions"`
Status string `json:"status"`
PartitionCount int `json:"partition_count"`
Maxlag *apiStatusResponseLag `json:"maxlag"`
TotalLag int64 `json:"totallag"`
}
// response: lag field
apiStatusResponseLag struct {
Topic string `json:"topic"`
Partition int32 `json:"partition"`
Status string `json:"status"`
Start apiStatusResponseLagItem `json:"start"`
End apiStatusResponseLagItem `json:"end"`
CurrentLag int64 `json:"current_lag"`
}
// response: lag field item
apiStatusResponseLagItem struct {
Offset int64 `json:"offset"`
Timestamp int64 `json:"timestamp"`
Lag int64 `json:"lag"`
}
)
func init() {
inputs.Add("burrow", func() telegraf.Input {
return &burrow{}
})
}
func (b *burrow) SampleConfig() string {
return configSample
}
func (b *burrow) Description() string {
return "Collect Kafka topics and consumers status from Burrow HTTP API."
}
func (b *burrow) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup
if len(b.Servers) == 0 {
b.Servers = []string{defaultServer}
}
if b.client == nil {
b.setDefaults()
if err := b.compileGlobs(); err != nil {
return err
}
c, err := b.createClient()
if err != nil {
return err
}
b.client = c
}
for _, addr := range b.Servers {
u, err := url.Parse(addr)
if err != nil {
acc.AddError(fmt.Errorf("unable to parse address '%s': %s", addr, err))
continue
}
if u.Path == "" {
u.Path = b.APIPrefix
}
wg.Add(1)
go func(u *url.URL) {
defer wg.Done()
acc.AddError(b.gatherServer(u, acc))
}(u)
}
wg.Wait()
return nil
}
func (b *burrow) setDefaults() {
if b.APIPrefix == "" {
b.APIPrefix = defaultBurrowPrefix
}
if b.ConcurrentConnections < 1 {
b.ConcurrentConnections = defaultConcurrentConnections
}
if b.ResponseTimeout.Duration < time.Second {
b.ResponseTimeout = internal.Duration{
Duration: defaultResponseTimeout,
}
}
}
func (b *burrow) compileGlobs() error {
var err error
// compile glob patterns
b.filterClusters, err = filter.NewIncludeExcludeFilter(b.ClustersInclude, b.ClustersExclude)
if err != nil {
return err
}
b.filterGroups, err = filter.NewIncludeExcludeFilter(b.GroupsInclude, b.GroupsExclude)
if err != nil {
return err
}
b.filterTopics, err = filter.NewIncludeExcludeFilter(b.TopicsInclude, b.TopicsExclude)
if err != nil {
return err
}
return nil
}
func (b *burrow) createClient() (*http.Client, error) {
tlsCfg, err := b.ClientConfig.TLSConfig()
if err != nil {
return nil, err
}
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsCfg,
},
Timeout: b.ResponseTimeout.Duration,
}
return client, nil
}
func (b *burrow) getResponse(u *url.URL) (*apiResponse, error) {
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
if b.Username != "" {
req.SetBasicAuth(b.Username, b.Password)
}
res, err := b.client.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, fmt.Errorf("wrong response: %d", res.StatusCode)
}
ares := &apiResponse{}
dec := json.NewDecoder(res.Body)
return ares, dec.Decode(ares)
}
func (b *burrow) gatherServer(src *url.URL, acc telegraf.Accumulator) error {
var wg sync.WaitGroup
r, err := b.getResponse(src)
if err != nil {
return err
}
guard := make(chan struct{}, b.ConcurrentConnections)
for _, cluster := range r.Clusters {
if !b.filterClusters.Match(cluster) {
continue
}
wg.Add(1)
go func(cluster string) {
defer wg.Done()
// fetch topic list
// endpoint: <api_prefix>/(cluster)/topic
ut := appendPathToURL(src, cluster, "topic")
b.gatherTopics(guard, ut, cluster, acc)
}(cluster)
wg.Add(1)
go func(cluster string) {
defer wg.Done()
// fetch consumer group list
// endpoint: <api_prefix>/(cluster)/consumer
uc := appendPathToURL(src, cluster, "consumer")
b.gatherGroups(guard, uc, cluster, acc)
}(cluster)
}
wg.Wait()
return nil
}
func (b *burrow) gatherTopics(guard chan struct{}, src *url.URL, cluster string, acc telegraf.Accumulator) {
var wg sync.WaitGroup
r, err := b.getResponse(src)
if err != nil {
acc.AddError(err)
return
}
for _, topic := range r.Topics {
if !b.filterTopics.Match(topic) {
continue
}
guard <- struct{}{}
wg.Add(1)
go func(topic string) {
defer func() {
<-guard
wg.Done()
}()
// fetch topic offsets
// endpoint: <api_prefix>/<cluster>/topic/<topic>
tu := appendPathToURL(src, topic)
tr, err := b.getResponse(tu)
if err != nil {
acc.AddError(err)
return
}
b.genTopicMetrics(tr, cluster, topic, acc)
}(topic)
}
wg.Wait()
}
func (b *burrow) genTopicMetrics(r *apiResponse, cluster, topic string, acc telegraf.Accumulator) {
for i, offset := range r.Offsets {
tags := map[string]string{
"cluster": cluster,
"topic": topic,
"partition": strconv.Itoa(i),
}
acc.AddFields(
"burrow_topic",
map[string]interface{}{
"offset": offset,
},
tags,
)
}
}
func (b *burrow) gatherGroups(guard chan struct{}, src *url.URL, cluster string, acc telegraf.Accumulator) {
var wg sync.WaitGroup
r, err := b.getResponse(src)
if err != nil {
acc.AddError(err)
return
}
for _, group := range r.Groups {
if !b.filterGroups.Match(group) {
continue
}
guard <- struct{}{}
wg.Add(1)
go func(group string) {
defer func() {
<-guard
wg.Done()
}()
// fetch consumer group status
// endpoint: <api_prefix>/<cluster>/consumer/<group>/lag
gl := appendPathToURL(src, group, "lag")
gr, err := b.getResponse(gl)
if err != nil {
acc.AddError(err)
return
}
b.genGroupStatusMetrics(gr, cluster, group, acc)
b.genGroupLagMetrics(gr, cluster, group, acc)
}(group)
}
wg.Wait()
}
func (b *burrow) genGroupStatusMetrics(r *apiResponse, cluster, group string, acc telegraf.Accumulator) {
partitionCount := r.Status.PartitionCount
if partitionCount == 0 {
partitionCount = len(r.Status.Partitions)
}
// get max timestamp and offset from partitions list
offset := int64(0)
timestamp := int64(0)
for _, partition := range r.Status.Partitions {
if partition.End.Offset > offset {
offset = partition.End.Offset
}
if partition.End.Timestamp > timestamp {
timestamp = partition.End.Timestamp
}
}
lag := int64(0)
if r.Status.Maxlag != nil {
lag = r.Status.Maxlag.CurrentLag
}
acc.AddFields(
"burrow_group",
map[string]interface{}{
"status": r.Status.Status,
"status_code": mapStatusToCode(r.Status.Status),
"partition_count": partitionCount,
"total_lag": r.Status.TotalLag,
"lag": lag,
"offset": offset,
"timestamp": timestamp,
},
map[string]string{
"cluster": cluster,
"group": group,
},
)
}
func (b *burrow) genGroupLagMetrics(r *apiResponse, cluster, group string, acc telegraf.Accumulator) {
for _, partition := range r.Status.Partitions {
acc.AddFields(
"burrow_partition",
map[string]interface{}{
"status": partition.Status,
"status_code": mapStatusToCode(partition.Status),
"lag": partition.CurrentLag,
"offset": partition.End.Offset,
"timestamp": partition.End.Timestamp,
},
map[string]string{
"cluster": cluster,
"group": group,
"topic": partition.Topic,
"partition": strconv.FormatInt(int64(partition.Partition), 10),
},
)
}
}
func appendPathToURL(src *url.URL, parts ...string) *url.URL {
dst := new(url.URL)
*dst = *src
for i, part := range parts {
parts[i] = url.PathEscape(part)
}
ext := strings.Join(parts, "/")
dst.Path = fmt.Sprintf("%s/%s", src.Path, ext)
return dst
}
func mapStatusToCode(src string) int {
switch src {
case "OK":
return 1
case "NOT_FOUND":
return 2
case "WARN":
return 3
case "ERR":
return 4
case "STOP":
return 5
case "STALL":
return 6
default:
return 0
}
}

View File

@@ -0,0 +1,285 @@
package burrow
import (
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"strings"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
// remap uri to json file, eg: /v3/kafka -> ./testdata/v3_kafka.json
func getResponseJSON(requestURI string) ([]byte, int) {
uri := strings.TrimLeft(requestURI, "/")
mappedFile := strings.Replace(uri, "/", "_", -1)
jsonFile := fmt.Sprintf("./testdata/%s.json", mappedFile)
code := 200
_, err := os.Stat(jsonFile)
if err != nil {
code = 404
jsonFile = "./testdata/error.json"
}
// respond with file
b, _ := ioutil.ReadFile(jsonFile)
return b, code
}
// return mocked HTTP server
func getHTTPServer() *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
body, code := getResponseJSON(r.RequestURI)
w.WriteHeader(code)
w.Header().Set("Content-Type", "application/json")
w.Write(body)
}))
}
// return mocked HTTP server with basic auth
func getHTTPServerBasicAuth() *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`)
username, password, authOK := r.BasicAuth()
if authOK == false {
http.Error(w, "Not authorized", 401)
return
}
if username != "test" && password != "test" {
http.Error(w, "Not authorized", 401)
return
}
// ok, continue
body, code := getResponseJSON(r.RequestURI)
w.WriteHeader(code)
w.Header().Set("Content-Type", "application/json")
w.Write(body)
}))
}
// test burrow_topic measurement
func TestBurrowTopic(t *testing.T) {
s := getHTTPServer()
defer s.Close()
plugin := &burrow{Servers: []string{s.URL}}
acc := &testutil.Accumulator{}
plugin.Gather(acc)
fields := []map[string]interface{}{
// topicA
{"offset": int64(459178195)},
{"offset": int64(459178022)},
{"offset": int64(456491598)},
}
tags := []map[string]string{
// topicA
{"cluster": "clustername1", "topic": "topicA", "partition": "0"},
{"cluster": "clustername1", "topic": "topicA", "partition": "1"},
{"cluster": "clustername1", "topic": "topicA", "partition": "2"},
}
require.Empty(t, acc.Errors)
require.Equal(t, true, acc.HasMeasurement("burrow_topic"))
for i := 0; i < len(fields); i++ {
acc.AssertContainsTaggedFields(t, "burrow_topic", fields[i], tags[i])
}
}
// test burrow_partition measurement
func TestBurrowPartition(t *testing.T) {
s := getHTTPServer()
defer s.Close()
plugin := &burrow{
Servers: []string{s.URL},
}
acc := &testutil.Accumulator{}
plugin.Gather(acc)
fields := []map[string]interface{}{
{
"status": "OK",
"status_code": 1,
"lag": int64(0),
"offset": int64(431323195),
"timestamp": int64(1515609490008),
},
{
"status": "OK",
"status_code": 1,
"lag": int64(0),
"offset": int64(431322962),
"timestamp": int64(1515609490008),
},
{
"status": "OK",
"status_code": 1,
"lag": int64(0),
"offset": int64(428636563),
"timestamp": int64(1515609490008),
},
}
tags := []map[string]string{
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "0"},
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "1"},
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "2"},
}
require.Empty(t, acc.Errors)
require.Equal(t, true, acc.HasMeasurement("burrow_partition"))
for i := 0; i < len(fields); i++ {
acc.AssertContainsTaggedFields(t, "burrow_partition", fields[i], tags[i])
}
}
// burrow_group
func TestBurrowGroup(t *testing.T) {
s := getHTTPServer()
defer s.Close()
plugin := &burrow{
Servers: []string{s.URL},
}
acc := &testutil.Accumulator{}
plugin.Gather(acc)
fields := []map[string]interface{}{
{
"status": "OK",
"status_code": 1,
"partition_count": 3,
"total_lag": int64(0),
"lag": int64(0),
"offset": int64(431323195),
"timestamp": int64(1515609490008),
},
}
tags := []map[string]string{
{"cluster": "clustername1", "group": "group1"},
}
require.Empty(t, acc.Errors)
require.Equal(t, true, acc.HasMeasurement("burrow_group"))
for i := 0; i < len(fields); i++ {
acc.AssertContainsTaggedFields(t, "burrow_group", fields[i], tags[i])
}
}
// collect from multiple servers
func TestMultipleServers(t *testing.T) {
s1 := getHTTPServer()
defer s1.Close()
s2 := getHTTPServer()
defer s2.Close()
plugin := &burrow{
Servers: []string{s1.URL, s2.URL},
}
acc := &testutil.Accumulator{}
plugin.Gather(acc)
require.Exactly(t, 14, len(acc.Metrics))
require.Empty(t, acc.Errors)
}
// collect multiple times
func TestMultipleRuns(t *testing.T) {
s := getHTTPServer()
defer s.Close()
plugin := &burrow{
Servers: []string{s.URL},
}
for i := 0; i < 4; i++ {
acc := &testutil.Accumulator{}
plugin.Gather(acc)
require.Exactly(t, 7, len(acc.Metrics))
require.Empty(t, acc.Errors)
}
}
// collect from http basic auth server
func TestBasicAuthConfig(t *testing.T) {
s := getHTTPServerBasicAuth()
defer s.Close()
plugin := &burrow{
Servers: []string{s.URL},
Username: "test",
Password: "test",
}
acc := &testutil.Accumulator{}
plugin.Gather(acc)
require.Exactly(t, 7, len(acc.Metrics))
require.Empty(t, acc.Errors)
}
// collect from whitelisted clusters
func TestFilterClusters(t *testing.T) {
s := getHTTPServer()
defer s.Close()
plugin := &burrow{
Servers: []string{s.URL},
ClustersInclude: []string{"wrongname*"}, // clustername1 -> no match
}
acc := &testutil.Accumulator{}
plugin.Gather(acc)
// no match by cluster
require.Exactly(t, 0, len(acc.Metrics))
require.Empty(t, acc.Errors)
}
// collect from whitelisted groups
func TestFilterGroups(t *testing.T) {
s := getHTTPServer()
defer s.Close()
plugin := &burrow{
Servers: []string{s.URL},
GroupsInclude: []string{"group?"}, // group1 -> match
TopicsExclude: []string{"*"}, // exclude all
}
acc := &testutil.Accumulator{}
plugin.Gather(acc)
require.Exactly(t, 4, len(acc.Metrics))
require.Empty(t, acc.Errors)
}
// collect from whitelisted topics
func TestFilterTopics(t *testing.T) {
s := getHTTPServer()
defer s.Close()
plugin := &burrow{
Servers: []string{s.URL},
TopicsInclude: []string{"topic?"}, // topicA -> match
GroupsExclude: []string{"*"}, // exclude all
}
acc := &testutil.Accumulator{}
plugin.Gather(acc)
require.Exactly(t, 3, len(acc.Metrics))
require.Empty(t, acc.Errors)
}

View File

@@ -0,0 +1,11 @@
{
"error": true,
"message": "Detailed error message",
"request": {
"uri": "/invalid/request",
"host": "responding.host.example.com",
"cluster": "",
"group": "",
"topic": ""
}
}

View File

@@ -0,0 +1,11 @@
{
"error": false,
"message": "cluster list returned",
"clusters": [
"clustername1"
],
"request": {
"url": "/v3/kafka",
"host": "example.com"
}
}

View File

@@ -0,0 +1,11 @@
{
"error": false,
"message": "consumer list returned",
"consumers": [
"group1"
],
"request": {
"url": "/v3/kafka/clustername1/consumer",
"host": "example.com"
}
}

View File

@@ -0,0 +1,90 @@
{
"error": false,
"message": "consumer status returned",
"status": {
"cluster": "clustername1",
"group": "group1",
"status": "OK",
"complete": 1,
"partitions": [
{
"topic": "topicA",
"partition": 0,
"owner": "kafka",
"status": "OK",
"start": {
"offset": 431323195,
"timestamp": 1515609445004,
"lag": 0
},
"end": {
"offset": 431323195,
"timestamp": 1515609490008,
"lag": 0
},
"current_lag": 0,
"complete": 1
},
{
"topic": "topicA",
"partition": 1,
"owner": "kafka",
"status": "OK",
"start": {
"offset": 431322962,
"timestamp": 1515609445004,
"lag": 0
},
"end": {
"offset": 431322962,
"timestamp": 1515609490008,
"lag": 0
},
"current_lag": 0,
"complete": 1
},
{
"topic": "topicA",
"partition": 2,
"owner": "kafka",
"status": "OK",
"start": {
"offset": 428636563,
"timestamp": 1515609445004,
"lag": 0
},
"end": {
"offset": 428636563,
"timestamp": 1515609490008,
"lag": 0
},
"current_lag": 0,
"complete": 1
}
],
"partition_count": 3,
"maxlag": {
"topic": "topicA",
"partition": 0,
"owner": "kafka",
"status": "OK",
"start": {
"offset": 431323195,
"timestamp": 1515609445004,
"lag": 0
},
"end": {
"offset": 431323195,
"timestamp": 1515609490008,
"lag": 0
},
"current_lag": 0,
"complete": 1
},
"totallag": 0
},
"request": {
"url": "/v3/kafka/clustername1/consumer/group1/lag",
"host": "example.com"
}
}

View File

@@ -0,0 +1,11 @@
{
"error": false,
"message": "topic list returned",
"topics": [
"topicA"
],
"request": {
"url": "/v3/kafka/clustername1/topic",
"host": "example.com"
}
}

View File

@@ -0,0 +1,13 @@
{
"error": false,
"message": "topic offsets returned",
"offsets": [
459178195,
459178022,
456491598
],
"request": {
"url": "/v3/kafka/clustername1/topic/topicA",
"host": "example.com"
}
}

View File

@@ -1,5 +1,8 @@
# Telegraf plugin: Cassandra
### **Deprecated in version 1.7**: Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin with the [cassandra.conf](/plugins/inputs/jolokia2/examples/cassandra.conf) example configuration.
#### Plugin arguments:
- **context** string: Context root used for jolokia url
- **servers** []string: List of servers with the format "<user:passwd@><host>:port"

View File

@@ -4,12 +4,14 @@ import (
"encoding/json"
"errors"
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"io/ioutil"
"log"
"net/http"
"net/url"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
type JolokiaClient interface {
@@ -60,7 +62,8 @@ func newCassandraMetric(host string, metric string,
func addValuesAsFields(values map[string]interface{}, fields map[string]interface{},
mname string) {
for k, v := range values {
if v != nil {
switch v.(type) {
case int64, float64, string, bool:
fields[mname+"_"+k] = v
}
}
@@ -117,7 +120,7 @@ func (j javaMetric) addTagsFields(out map[string]interface{}) {
switch t := values.(type) {
case map[string]interface{}:
addValuesAsFields(values.(map[string]interface{}), fields, attribute)
case interface{}:
case int64, float64, string, bool:
fields[attribute] = t
}
j.acc.AddFields(tokens["class"]+tokens["type"], fields, tags)
@@ -172,7 +175,11 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
func (j *Cassandra) SampleConfig() string {
return `
# This is the context root used to compose the jolokia url
## DEPRECATED: The cassandra plugin has been deprecated. Please use the
## jolokia2 plugin instead.
##
## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
context = "/jolokia/read"
## List of cassandra servers exposing jolokia read service
servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
@@ -256,6 +263,16 @@ func parseServerTokens(server string) map[string]string {
return serverTokens
}
func (c *Cassandra) Start(acc telegraf.Accumulator) error {
log.Println("W! DEPRECATED: The cassandra plugin has been deprecated. " +
"Please use the jolokia2 plugin instead. " +
"https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2")
return nil
}
func (c *Cassandra) Stop() {
}
func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
context := c.Context
servers := c.Servers

View File

@@ -27,12 +27,17 @@ report those stats already using StatsD protocol if needed.
## Data centre to query the health checks from
# datacentre = ""
## SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## If false, skip chain & host verification
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = true
## Consul checks' tag splitting
# When tags are formatted like "key:value" with ":" as a delimiter then
# they will be splitted and reported as proper key:value in Telegraf
# tag_delimiter = ":"
```
### Metrics:

View File

@@ -2,10 +2,11 @@ package consul
import (
"net/http"
"strings"
"github.com/hashicorp/consul/api"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -16,15 +17,8 @@ type Consul struct {
Username string
Password string
Datacentre string
// Path to CA file
SSLCA string `toml:"ssl_ca"`
// Path to host cert file
SSLCert string `toml:"ssl_cert"`
// Path to cert key file
SSLKey string `toml:"ssl_key"`
// Use SSL but skip chain & host verification
InsecureSkipVerify bool
tls.ClientConfig
TagDelimiter string
// client used to connect to Consul agnet
client *api.Client
@@ -47,12 +41,17 @@ var sampleConfig = `
## Data centre to query the health checks from
# datacentre = ""
## SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## If false, skip chain & host verification
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = true
## Consul checks' tag splitting
# When tags are formatted like "key:value" with ":" as a delimiter then
# they will be splitted and reported as proper key:value in Telegraf
# tag_delimiter = ":"
`
func (c *Consul) Description() string {
@@ -89,14 +88,12 @@ func (c *Consul) createAPIClient() (*api.Client, error) {
}
}
tlsCfg, err := internal.GetTLSConfig(
c.SSLCert, c.SSLKey, c.SSLCA, c.InsecureSkipVerify)
tlsCfg, err := c.ClientConfig.TLSConfig()
if err != nil {
return nil, err
}
config.HttpClient.Transport = &http.Transport{
config.Transport = &http.Transport{
TLSClientConfig: tlsCfg,
}
@@ -121,6 +118,19 @@ func (c *Consul) GatherHealthCheck(acc telegraf.Accumulator, checks []*api.Healt
tags["service_name"] = check.ServiceName
tags["check_id"] = check.CheckID
for _, checkTag := range check.ServiceTags {
if c.TagDelimiter != "" {
splittedTag := strings.SplitN(checkTag, c.TagDelimiter, 2)
if len(splittedTag) == 1 {
tags[checkTag] = checkTag
} else if len(splittedTag) == 2 {
tags[splittedTag[0]] = splittedTag[1]
}
} else {
tags[checkTag] = checkTag
}
}
acc.AddFields("consul_health_checks", record, tags)
}
}

View File

@@ -17,6 +17,7 @@ var sampleChecks = []*api.HealthCheck{
Output: "OK",
ServiceID: "foo.123",
ServiceName: "foo",
ServiceTags: []string{"bar", "env:sandbox", "tagkey:value:stillvalue"},
},
}
@@ -31,9 +32,12 @@ func TestGatherHealthCheck(t *testing.T) {
}
expectedTags := map[string]string{
"node": "localhost",
"service_name": "foo",
"check_id": "foo.health123",
"node": "localhost",
"service_name": "foo",
"check_id": "foo.health123",
"bar": "bar",
"env:sandbox": "env:sandbox",
"tagkey:value:stillvalue": "tagkey:value:stillvalue",
}
var acc testutil.Accumulator
@@ -43,3 +47,32 @@ func TestGatherHealthCheck(t *testing.T) {
acc.AssertContainsTaggedFields(t, "consul_health_checks", expectedFields, expectedTags)
}
func TestGatherHealthCheckWithDelimitedTags(t *testing.T) {
expectedFields := map[string]interface{}{
"check_name": "foo.health",
"status": "passing",
"passing": 1,
"critical": 0,
"warning": 0,
"service_id": "foo.123",
}
expectedTags := map[string]string{
"node": "localhost",
"service_name": "foo",
"check_id": "foo.health123",
"bar": "bar",
"env": "sandbox",
"tagkey": "value:stillvalue",
}
var acc testutil.Accumulator
consul := &Consul{
TagDelimiter: ":",
}
consul.GatherHealthCheck(&acc, sampleChecks)
acc.AssertContainsTaggedFields(t, "consul_health_checks", expectedFields, expectedTags)
}

View File

@@ -54,10 +54,10 @@ your database.
## Maximum time to receive a response from cluster.
# response_timeout = "20s"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## If false, skip chain & host verification
# insecure_skip_verify = true

View File

@@ -9,26 +9,11 @@ import (
"testing"
jwt "github.com/dgrijalva/jwt-go"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
const (
privateKey = `-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQCwlGyzVp9cqtwiNCgCnaR0kilPZhr4xFBcnXxvQ8/uzOHaWKxj
XWR38cKR3gPh5+4iSmzMdo3HDJM5ks6imXGnp+LPOA5iNewnpLNs7UxA2arwKH/6
4qIaAXAtf5jE46wZIMgc2EW9wGL3dxC0JY8EXPpBFB/3J8gADkorFR8lwwIDAQAB
AoGBAJaFHxfMmjHK77U0UnrQWFSKFy64cftmlL4t/Nl3q7L68PdIKULWZIMeEWZ4
I0UZiFOwr4em83oejQ1ByGSwekEuiWaKUI85IaHfcbt+ogp9hY/XbOEo56OPQUAd
bEZv1JqJOqta9Ug1/E1P9LjEEyZ5F5ubx7813rxAE31qKtKJAkEA1zaMlCWIr+Rj
hGvzv5rlHH3wbOB4kQFXO4nqj3J/ttzR5QiJW24STMDcbNngFlVcDVju56LrNTiD
dPh9qvl7nwJBANILguR4u33OMksEZTYB7nQZSurqXsq6382zH7pTl29ANQTROHaM
PKC8dnDWq8RGTqKuvWblIzzGIKqIMovZo10CQC96T0UXirITFolOL3XjvAuvFO1Q
EAkdXJs77805m0dCK+P1IChVfiAEpBw3bKJArpAbQIlFfdI953JUp5SieU0CQEub
BSSEKMjh/cxu6peEHnb/262vayuCFKkQPu1sxWewLuVrAe36EKCy9dcsDmv5+rgo
Odjdxc9Madm4aKlaT6kCQQCpAgeblDrrxTrNQ+Typzo37PlnQrvI+0EceAUuJ72G
P0a+YZUeHNRqT2pPN9lMTAZGGi3CtcF2XScbLNEBeXge
-----END RSA PRIVATE KEY-----`
)
var privateKey = testutil.NewPKI("../../../testutil/pki").ReadServerKey()
func TestLogin(t *testing.T) {
ts := httptest.NewServer(http.NotFoundHandler())

View File

@@ -13,6 +13,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -56,11 +57,7 @@ type DCOS struct {
MaxConnections int
ResponseTimeout internal.Duration
SSLCA string `toml:"ssl_ca"`
SSLCert string `toml:"ssl_cert"`
SSLKey string `toml:"ssl_key"`
InsecureSkipVerify bool `toml:"insecure_skip_verify"`
tls.ClientConfig
client Client
creds Credentials
@@ -107,10 +104,10 @@ var sampleConfig = `
## Maximum time to receive a response from cluster.
# response_timeout = "20s"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## If false, skip chain & host verification
# insecure_skip_verify = true
@@ -351,8 +348,7 @@ func (d *DCOS) init() error {
}
func (d *DCOS) createClient() (Client, error) {
tlsCfg, err := internal.GetTLSConfig(
d.SSLCert, d.SSLKey, d.SSLCA, d.InsecureSkipVerify)
tlsCfg, err := d.ClientConfig.TLSConfig()
if err != nil {
return nil, err
}

View File

@@ -3,9 +3,8 @@
The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wikipedia.org/wiki/Dig_\(command\))
### Configuration:
```
# Sample Config:
```toml
# Query given DNS server and gives statistics
[[inputs.dns_query]]
## servers to query
servers = ["8.8.8.8"]
@@ -27,29 +26,20 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi
# timeout = 2
```
For querying more than one record type make:
### Metrics:
- dns_query
- tags:
- server
- domain
- record_type
- result
- fields:
- query_time_ms (float)
- result_code (int, success = 0, timeout = 1, error = 2)
### Example Output:
```
[[inputs.dns_query]]
domains = ["mjasion.pl"]
servers = ["8.8.8.8", "8.8.4.4"]
record_type = "A"
[[inputs.dns_query]]
domains = ["mjasion.pl"]
servers = ["8.8.8.8", "8.8.4.4"]
record_type = "MX"
```
### Tags:
- server
- domain
- record_type
### Example output:
```
telegraf --input-filter dns_query --test
> dns_query,domain=mjasion.pl,record_type=A,server=8.8.8.8 query_time_ms=67.189842 1456082743585760680
dns_query,domain=mjasion.pl,record_type=A,server=8.8.8.8 query_time_ms=67.189842 1456082743585760680
```

View File

@@ -13,6 +13,14 @@ import (
"github.com/influxdata/telegraf/plugins/inputs"
)
type ResultType uint64
const (
Success ResultType = 0
Timeout = 1
Error = 2
)
type DnsQuery struct {
// Domains or subdomains to query
Domains []string
@@ -66,15 +74,24 @@ func (d *DnsQuery) Gather(acc telegraf.Accumulator) error {
for _, domain := range d.Domains {
for _, server := range d.Servers {
dnsQueryTime, err := d.getDnsQueryTime(domain, server)
acc.AddError(err)
fields := make(map[string]interface{}, 2)
tags := map[string]string{
"server": server,
"domain": domain,
"record_type": d.RecordType,
}
fields := map[string]interface{}{"query_time_ms": dnsQueryTime}
dnsQueryTime, err := d.getDnsQueryTime(domain, server)
if err == nil {
setResult(Success, fields, tags)
fields["query_time_ms"] = dnsQueryTime
} else if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() {
setResult(Timeout, fields, tags)
} else if err != nil {
setResult(Error, fields, tags)
acc.AddError(err)
}
acc.AddFields("dns_query", fields, tags)
}
}
@@ -165,6 +182,21 @@ func (d *DnsQuery) parseRecordType() (uint16, error) {
return recordType, error
}
func setResult(result ResultType, fields map[string]interface{}, tags map[string]string) {
var tag string
switch result {
case Success:
tag = "success"
case Timeout:
tag = "timeout"
case Error:
tag = "error"
}
tags["result"] = tag
fields["result_code"] = uint64(result)
}
func init() {
inputs.Add("dns_query", func() telegraf.Input {
return &DnsQuery{}

View File

@@ -4,12 +4,11 @@ The docker plugin uses the Docker Engine API to gather metrics on running
docker containers.
The docker plugin uses the [Official Docker Client](https://github.com/moby/moby/tree/master/client)
to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/).
[Library Documentation](https://godoc.org/github.com/moby/moby/client)
to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.24/).
### Configuration:
```
```toml
# Read metrics about docker containers
[[inputs.docker]]
## Docker Endpoint
@@ -54,11 +53,11 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/)
## Which environment variables should we use as a tag
tag_env = ["JAVA_HOME", "HEAP_SIZE"]
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
```
@@ -76,15 +75,57 @@ may prefer to exclude them:
```
### Measurements & Fields:
### Metrics:
Every effort was made to preserve the names based on the JSON response from the
docker API.
Note that the docker_container_cpu metric may appear multiple times per collection,
based on the availability of per-cpu stats on your system.
- docker
- tags:
- unit
- engine_host
- server_version
- fields:
- n_used_file_descriptors
- n_cpus
- n_containers
- n_containers_running
- n_containers_stopped
- n_containers_paused
- n_images
- n_goroutines
- n_listener_events
- memory_total
- pool_blocksize
- docker_data
- tags:
- unit
- engine_host
- server_version
- fields:
- available
- total
- used
- docker_metadata
- tags:
- unit
- engine_host
- server_version
- fields:
- available
- total
- used
- docker_container_mem
- tags:
- engine_host
- server_version
- container_image
- container_name
- container_version
- fields:
- total_pgmafault
- cache
- mapped_file
@@ -119,7 +160,16 @@ based on the availability of per-cpu stats on your system.
- failcnt
- limit
- container_id
- docker_container_cpu
- tags:
- engine_host
- server_version
- container_image
- container_name
- container_version
- cpu
- fields:
- throttling_periods
- throttling_throttled_periods
- throttling_throttled_time
@@ -129,7 +179,16 @@ based on the availability of per-cpu stats on your system.
- usage_total
- usage_percent
- container_id
- docker_container_net
- tags:
- engine_host
- server_version
- container_image
- container_name
- container_version
- network
- fields:
- rx_dropped
- rx_bytes
- rx_errors
@@ -139,7 +198,16 @@ based on the availability of per-cpu stats on your system.
- tx_errors
- tx_bytes
- container_id
- docker_container_blkio
- tags:
- engine_host
- server_version
- container_image
- container_name
- container_version
- device
- fields:
- io_service_bytes_recursive_async
- io_service_bytes_recursive_read
- io_service_bytes_recursive_sync
@@ -151,118 +219,38 @@ based on the availability of per-cpu stats on your system.
- io_serviced_recursive_total
- io_serviced_recursive_write
- container_id
- docker_
- n_used_file_descriptors
- n_cpus
- n_containers
- n_containers_running
- n_containers_stopped
- n_containers_paused
- n_images
- n_goroutines
- n_listener_events
- memory_total
- pool_blocksize
- docker_data
- available
- total
- used
- docker_metadata
- available
- total
- used
- docker_swarm
- tasks_desired
- tasks_running
### Tags:
#### Docker Engine tags
- docker (memory_total)
- unit=bytes
- engine_host
- docker (pool_blocksize)
- unit=bytes
- engine_host
- docker_data
- unit=bytes
- engine_host
- docker_metadata
- unit=bytes
- engine_host
#### Docker Container tags
- Tags on all containers:
- docker_container_health
- tags:
- engine_host
- server_version
- container_image
- container_name
- container_version
- docker_container_mem specific:
- docker_container_cpu specific:
- cpu
- docker_container_net specific:
- network
- docker_container_blkio specific:
- device
- docker_container_health specific:
- health_status
- failing_streak
- docker_swarm specific:
- fields:
- health_status (string)
- failing_streak (integer)
- docker_swarm
- tags:
- service_id
- service_name
- service_mode
- fields:
- tasks_desired
- tasks_running
### Example Output:
```
% ./telegraf --config ~/ws/telegraf.conf --input-filter docker --test
* Plugin: docker, Collection 1
> docker n_cpus=8i 1456926671065383978
> docker n_used_file_descriptors=15i 1456926671065383978
> docker n_containers=7i 1456926671065383978
> docker n_containers_running=7i 1456926671065383978
> docker n_containers_stopped=3i 1456926671065383978
> docker n_containers_paused=0i 1456926671065383978
> docker n_images=152i 1456926671065383978
> docker n_goroutines=36i 1456926671065383978
> docker n_listener_events=0i 1456926671065383978
> docker,unit=bytes memory_total=18935443456i 1456926671065383978
> docker,unit=bytes pool_blocksize=65540i 1456926671065383978
> docker_data,unit=bytes available=24340000000i,total=107400000000i,used=14820000000i 1456926671065383978
> docker_metadata,unit=bytes available=2126999999i,total=2146999999i,used=20420000i 145692667106538
> docker_container_mem,
container_image=spotify/kafka,container_name=kafka \
active_anon=52568064i,active_file=6926336i,cache=12038144i,fail_count=0i,\
hierarchical_memory_limit=9223372036854771712i,inactive_anon=52707328i,\
inactive_file=5111808i,limit=1044578304i,mapped_file=10301440i,\
max_usage=140656640i,pgfault=63762i,pgmajfault=2837i,pgpgin=73355i,\
pgpgout=45736i,rss=105275392i,rss_huge=4194304i,total_active_anon=52568064i,\
total_active_file=6926336i,total_cache=12038144i,total_inactive_anon=52707328i,\
total_inactive_file=5111808i,total_mapped_file=10301440i,total_pgfault=63762i,\
total_pgmafault=0i,total_pgpgin=73355i,total_pgpgout=45736i,\
total_rss=105275392i,total_rss_huge=4194304i,total_unevictable=0i,\
total_writeback=0i,unevictable=0i,usage=117440512i,writeback=0i 1453409536840126713
> docker_container_cpu,
container_image=spotify/kafka,container_name=kafka,cpu=cpu-total \
throttling_periods=0i,throttling_throttled_periods=0i,\
throttling_throttled_time=0i,usage_in_kernelmode=440000000i,\
usage_in_usermode=2290000000i,usage_system=84795360000000i,\
usage_total=6628208865i 1453409536840126713
> docker_container_cpu,
container_image=spotify/kafka,container_name=kafka,cpu=cpu0 \
usage_total=6628208865i 1453409536840126713
> docker_container_net,\
container_image=spotify/kafka,container_name=kafka,network=eth0 \
rx_bytes=7468i,rx_dropped=0i,rx_errors=0i,rx_packets=94i,tx_bytes=946i,\
tx_dropped=0i,tx_errors=0i,tx_packets=13i 1453409536840126713
> docker_container_blkio,
container_image=spotify/kafka,container_name=kafka,device=8:0 \
io_service_bytes_recursive_async=80216064i,io_service_bytes_recursive_read=79925248i,\
io_service_bytes_recursive_sync=77824i,io_service_bytes_recursive_total=80293888i,\
io_service_bytes_recursive_write=368640i,io_serviced_recursive_async=6562i,\
io_serviced_recursive_read=6492i,io_serviced_recursive_sync=37i,\
io_serviced_recursive_total=6599i,io_serviced_recursive_write=107i 1453409536840126713
>docker_swarm,
service_id=xaup2o9krw36j2dy1mjx1arjw,service_mode=replicated,service_name=test,\
tasks_desired=3,tasks_running=3 1508968160000000000
docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce n_containers=6i,n_containers_paused=0i,n_containers_running=1i,n_containers_stopped=5i,n_cpus=2i,n_goroutines=41i,n_images=2i,n_listener_events=0i,n_used_file_descriptors=27i 1524002041000000000
docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce,unit=bytes memory_total=2101661696i 1524002041000000000
docker_container_mem,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce active_anon=8327168i,active_file=2314240i,cache=27402240i,container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",hierarchical_memory_limit=9223372036854771712i,inactive_anon=0i,inactive_file=25088000i,limit=2101661696i,mapped_file=20582400i,max_usage=36646912i,pgfault=4193i,pgmajfault=214i,pgpgin=9243i,pgpgout=520i,rss=8327168i,rss_huge=0i,total_active_anon=8327168i,total_active_file=2314240i,total_cache=27402240i,total_inactive_anon=0i,total_inactive_file=25088000i,total_mapped_file=20582400i,total_pgfault=4193i,total_pgmajfault=214i,total_pgpgin=9243i,total_pgpgout=520i,total_rss=8327168i,total_rss_huge=0i,total_unevictable=0i,total_writeback=0i,unevictable=0i,usage=36528128i,usage_percent=0.4342225020025297,writeback=0i 1524002042000000000
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,cpu=cpu-total,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",throttling_periods=0i,throttling_throttled_periods=0i,throttling_throttled_time=0i,usage_in_kernelmode=40000000i,usage_in_usermode=100000000i,usage_percent=0,usage_system=6394210000000i,usage_total=117319068i 1524002042000000000
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,cpu=cpu0,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",usage_total=20825265i 1524002042000000000
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,cpu=cpu1,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",usage_total=96493803i 1524002042000000000
docker_container_net,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,engine_host=debian-stretch-docker,network=eth0,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",rx_bytes=1576i,rx_dropped=0i,rx_errors=0i,rx_packets=20i,tx_bytes=0i,tx_dropped=0i,tx_errors=0i,tx_packets=0i 1524002042000000000
docker_container_blkio,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,device=254:0,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",io_service_bytes_recursive_async=27398144i,io_service_bytes_recursive_read=27398144i,io_service_bytes_recursive_sync=0i,io_service_bytes_recursive_total=27398144i,io_service_bytes_recursive_write=0i,io_serviced_recursive_async=529i,io_serviced_recursive_read=529i,io_serviced_recursive_sync=0i,io_serviced_recursive_total=529i,io_serviced_recursive_write=0i 1524002042000000000
docker_container_health,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce failing_streak=0i,health_status="healthy" 1524007529000000000
docker_swarm,service_id=xaup2o9krw36j2dy1mjx1arjw,service_mode=replicated,service_name=test tasks_desired=3,tasks_running=3 1508968160000000000
```

View File

@@ -12,7 +12,7 @@ import (
)
var (
version string
version = "1.24"
defaultHeaders = map[string]string{"User-Agent": "engine-api-cli-1.0"}
)

View File

@@ -20,6 +20,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/internal"
tlsint "github.com/influxdata/telegraf/internal/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -43,10 +44,7 @@ type Docker struct {
ContainerStateInclude []string `toml:"container_state_include"`
ContainerStateExclude []string `toml:"container_state_exclude"`
SSLCA string `toml:"ssl_ca"`
SSLCert string `toml:"ssl_cert"`
SSLKey string `toml:"ssl_key"`
InsecureSkipVerify bool
tlsint.ClientConfig
newEnvClient func() (Client, error)
newClient func(string, *tls.Config) (Client, error)
@@ -54,6 +52,7 @@ type Docker struct {
client Client
httpClient *http.Client
engine_host string
serverVersion string
filtersCreated bool
labelFilter filter.Filter
containerFilter filter.Filter
@@ -114,11 +113,11 @@ var sampleConfig = `
docker_label_include = []
docker_label_exclude = []
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
`
@@ -135,8 +134,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
if d.Endpoint == "ENV" {
c, err = d.newEnvClient()
} else {
tlsConfig, err := internal.GetTLSConfig(
d.SSLCert, d.SSLKey, d.SSLCA, d.InsecureSkipVerify)
tlsConfig, err := d.ClientConfig.TLSConfig()
if err != nil {
return err
}
@@ -301,7 +299,14 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
if err != nil {
return err
}
d.engine_host = info.Name
d.serverVersion = info.ServerVersion
tags := map[string]string{
"engine_host": d.engine_host,
"server_version": d.serverVersion,
}
fields := map[string]interface{}{
"n_cpus": info.NCPU,
@@ -315,15 +320,13 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
"n_listener_events": info.NEventsListener,
}
// Add metrics
acc.AddFields("docker",
fields,
map[string]string{"engine_host": d.engine_host},
now)
acc.AddFields("docker", fields, tags, now)
acc.AddFields("docker",
map[string]interface{}{"memory_total": info.MemTotal},
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
tags,
now)
// Get storage metrics
tags["unit"] = "bytes"
for _, rawData := range info.DriverStatus {
// Try to convert string to int (bytes)
value, err := parseSize(rawData[1])
@@ -335,7 +338,7 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
// pool blocksize
acc.AddFields("docker",
map[string]interface{}{"pool_blocksize": value},
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
tags,
now)
} else if strings.HasPrefix(name, "data_space_") {
// data space
@@ -348,16 +351,10 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
}
}
if len(dataFields) > 0 {
acc.AddFields("docker_data",
dataFields,
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
now)
acc.AddFields("docker_data", dataFields, tags, now)
}
if len(metadataFields) > 0 {
acc.AddFields("docker_metadata",
metadataFields,
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
now)
acc.AddFields("docker_metadata", metadataFields, tags, now)
}
return nil
}
@@ -388,6 +385,7 @@ func (d *Docker) gatherContainer(
tags := map[string]string{
"engine_host": d.engine_host,
"server_version": d.serverVersion,
"container_name": cname,
"container_image": imageName,
"container_version": imageVersion,

View File

@@ -615,7 +615,10 @@ func TestDockerGatherInfo(t *testing.T) {
"n_images": int(199),
"n_goroutines": int(39),
},
map[string]string{"engine_host": "absol"},
map[string]string{
"engine_host": "absol",
"server_version": "17.09.0-ce",
},
)
acc.AssertContainsTaggedFields(t,
@@ -626,8 +629,9 @@ func TestDockerGatherInfo(t *testing.T) {
"available": int64(36530000000),
},
map[string]string{
"unit": "bytes",
"engine_host": "absol",
"unit": "bytes",
"engine_host": "absol",
"server_version": "17.09.0-ce",
},
)
acc.AssertContainsTaggedFields(t,
@@ -648,6 +652,7 @@ func TestDockerGatherInfo(t *testing.T) {
"ENVVAR7": "ENVVAR8=ENVVAR9",
"label1": "test_value_1",
"label2": "test_value_2",
"server_version": "17.09.0-ce",
},
)
acc.AssertContainsTaggedFields(t,
@@ -670,6 +675,7 @@ func TestDockerGatherInfo(t *testing.T) {
"ENVVAR7": "ENVVAR8=ENVVAR9",
"label1": "test_value_1",
"label2": "test_value_2",
"server_version": "17.09.0-ce",
},
)
}

View File

@@ -55,6 +55,7 @@ var info = types.Info{
DockerRootDir: "/var/lib/docker",
NoProxy: "",
BridgeNfIP6tables: true,
ServerVersion: "17.09.0-ce",
}
var containerList = []types.Container{

View File

@@ -38,11 +38,11 @@ or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/curre
## "breaker". Per default, all stats are gathered.
# node_stats = ["jvm", "http"]
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
```

View File

@@ -3,16 +3,18 @@ package elasticsearch
import (
"encoding/json"
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
"io/ioutil"
"net/http"
"regexp"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/tls"
"github.com/influxdata/telegraf/plugins/inputs"
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
)
// mask for masking username/password from error messages
@@ -38,17 +40,20 @@ type nodeStat struct {
}
type clusterHealth struct {
ClusterName string `json:"cluster_name"`
Status string `json:"status"`
TimedOut bool `json:"timed_out"`
NumberOfNodes int `json:"number_of_nodes"`
NumberOfDataNodes int `json:"number_of_data_nodes"`
ActivePrimaryShards int `json:"active_primary_shards"`
ActiveShards int `json:"active_shards"`
RelocatingShards int `json:"relocating_shards"`
InitializingShards int `json:"initializing_shards"`
UnassignedShards int `json:"unassigned_shards"`
Indices map[string]indexHealth `json:"indices"`
ClusterName string `json:"cluster_name"`
Status string `json:"status"`
TimedOut bool `json:"timed_out"`
NumberOfNodes int `json:"number_of_nodes"`
NumberOfDataNodes int `json:"number_of_data_nodes"`
ActivePrimaryShards int `json:"active_primary_shards"`
ActiveShards int `json:"active_shards"`
RelocatingShards int `json:"relocating_shards"`
InitializingShards int `json:"initializing_shards"`
UnassignedShards int `json:"unassigned_shards"`
NumberOfPendingTasks int `json:"number_of_pending_tasks"`
TaskMaxWaitingInQueueMillis int `json:"task_max_waiting_in_queue_millis"`
ActiveShardsPercentAsNumber float64 `json:"active_shards_percent_as_number"`
Indices map[string]indexHealth `json:"indices"`
}
type indexHealth struct {
@@ -108,28 +113,26 @@ const sampleConfig = `
## "breaker". Per default, all stats are gathered.
# node_stats = ["jvm", "http"]
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
`
// Elasticsearch is a plugin to read stats from one or many Elasticsearch
// servers.
type Elasticsearch struct {
Local bool
Servers []string
HttpTimeout internal.Duration
ClusterHealth bool
ClusterHealthLevel string
ClusterStats bool
NodeStats []string
SSLCA string `toml:"ssl_ca"` // Path to CA file
SSLCert string `toml:"ssl_cert"` // Path to host cert file
SSLKey string `toml:"ssl_key"` // Path to cert key file
InsecureSkipVerify bool // Use SSL but skip chain & host verification
Local bool
Servers []string
HttpTimeout internal.Duration
ClusterHealth bool
ClusterHealthLevel string
ClusterStats bool
NodeStats []string
tls.ClientConfig
client *http.Client
catMasterResponseTokens []string
isMaster bool
@@ -227,7 +230,7 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
}
func (e *Elasticsearch) createHttpClient() (*http.Client, error) {
tlsCfg, err := internal.GetTLSConfig(e.SSLCert, e.SSLKey, e.SSLCA, e.InsecureSkipVerify)
tlsCfg, err := e.ClientConfig.TLSConfig()
if err != nil {
return nil, err
}
@@ -323,16 +326,19 @@ func (e *Elasticsearch) gatherClusterHealth(url string, acc telegraf.Accumulator
}
measurementTime := time.Now()
clusterFields := map[string]interface{}{
"status": healthStats.Status,
"status_code": mapHealthStatusToCode(healthStats.Status),
"timed_out": healthStats.TimedOut,
"number_of_nodes": healthStats.NumberOfNodes,
"number_of_data_nodes": healthStats.NumberOfDataNodes,
"active_primary_shards": healthStats.ActivePrimaryShards,
"active_shards": healthStats.ActiveShards,
"relocating_shards": healthStats.RelocatingShards,
"initializing_shards": healthStats.InitializingShards,
"unassigned_shards": healthStats.UnassignedShards,
"status": healthStats.Status,
"status_code": mapHealthStatusToCode(healthStats.Status),
"timed_out": healthStats.TimedOut,
"number_of_nodes": healthStats.NumberOfNodes,
"number_of_data_nodes": healthStats.NumberOfDataNodes,
"active_primary_shards": healthStats.ActivePrimaryShards,
"active_shards": healthStats.ActiveShards,
"relocating_shards": healthStats.RelocatingShards,
"initializing_shards": healthStats.InitializingShards,
"unassigned_shards": healthStats.UnassignedShards,
"number_of_pending_tasks": healthStats.NumberOfPendingTasks,
"task_max_waiting_in_queue_millis": healthStats.TaskMaxWaitingInQueueMillis,
"active_shards_percent_as_number": healthStats.ActiveShardsPercentAsNumber,
}
acc.AddFields(
"elasticsearch_cluster_health",

View File

@@ -11,7 +11,10 @@ const clusterHealthResponse = `
"active_shards": 15,
"relocating_shards": 0,
"initializing_shards": 0,
"unassigned_shards": 0
"unassigned_shards": 0,
"number_of_pending_tasks": 0,
"task_max_waiting_in_queue_millis": 0,
"active_shards_percent_as_number": 100.0
}
`
@@ -27,6 +30,9 @@ const clusterHealthResponseWithIndices = `
"relocating_shards": 0,
"initializing_shards": 0,
"unassigned_shards": 0,
"number_of_pending_tasks": 0,
"task_max_waiting_in_queue_millis": 0,
"active_shards_percent_as_number": 100.0,
"indices": {
"v1": {
"status": "green",
@@ -53,16 +59,19 @@ const clusterHealthResponseWithIndices = `
`
var clusterHealthExpected = map[string]interface{}{
"status": "green",
"status_code": 1,
"timed_out": false,
"number_of_nodes": 3,
"number_of_data_nodes": 3,
"active_primary_shards": 5,
"active_shards": 15,
"relocating_shards": 0,
"initializing_shards": 0,
"unassigned_shards": 0,
"status": "green",
"status_code": 1,
"timed_out": false,
"number_of_nodes": 3,
"number_of_data_nodes": 3,
"active_primary_shards": 5,
"active_shards": 15,
"relocating_shards": 0,
"initializing_shards": 0,
"unassigned_shards": 0,
"number_of_pending_tasks": 0,
"task_max_waiting_in_queue_millis": 0,
"active_shards_percent_as_number": 100.0,
}
var v1IndexExpected = map[string]interface{}{

View File

@@ -0,0 +1,51 @@
# Fibaro Input Plugin
The Fibaro plugin makes HTTP calls to the Fibaro controller API to gather values of hooked devices.
Those values could be true (1) or false (0) for switches, percentage for dimmers, temperature, etc.
### Configuration:
```toml
# Read devices value(s) from a Fibaro controller
[[inputs.fibaro]]
## Required Fibaro controller address/hostname.
## Note: at the time of writing this plugin, Fibaro only implemented http - no https available
url = "http://<controller>:80"
## Required credentials to access the API (http://<controller/api/<component>)
username = "<username>"
password = "<password>"
## Amount of time allowed to complete the HTTP request
# timeout = "5s"
```
### Metrics:
- fibaro
- tags:
- section (section name)
- room (room name)
- name (device name)
- type (device type)
- fields:
- value (float)
- value2 (float, when available from device)
### Example Output:
```
fibaro,host=vm1,name=Escaliers,room=Dégagement,section=Pièces\ communes,type=com.fibaro.binarySwitch value=0 1523351010000000000
fibaro,host=vm1,name=Porte\ fenêtre,room=Salon,section=Pièces\ communes,type=com.fibaro.FGRM222 value=99,value2=99 1523351010000000000
fibaro,host=vm1,name=LED\ îlot\ central,room=Cuisine,section=Cuisine,type=com.fibaro.binarySwitch value=0 1523351010000000000
fibaro,host=vm1,name=Détérioration,room=Entrée,section=Pièces\ communes,type=com.fibaro.heatDetector value=0 1523351010000000000
fibaro,host=vm1,name=Température,room=Cave,section=Cave,type=com.fibaro.temperatureSensor value=17.87 1523351010000000000
fibaro,host=vm1,name=Présence,room=Garde-manger,section=Cuisine,type=com.fibaro.FGMS001 value=1 1523351010000000000
fibaro,host=vm1,name=Luminosité,room=Garde-manger,section=Cuisine,type=com.fibaro.lightSensor value=92 1523351010000000000
fibaro,host=vm1,name=Etat,room=Garage,section=Extérieur,type=com.fibaro.doorSensor value=0 1523351010000000000
fibaro,host=vm1,name=CO2\ (ppm),room=Salon,section=Pièces\ communes,type=com.fibaro.multilevelSensor value=880 1523351010000000000
fibaro,host=vm1,name=Humidité\ (%),room=Salon,section=Pièces\ communes,type=com.fibaro.humiditySensor value=53 1523351010000000000
fibaro,host=vm1,name=Pression\ (mb),room=Salon,section=Pièces\ communes,type=com.fibaro.multilevelSensor value=1006.9 1523351010000000000
fibaro,host=vm1,name=Bruit\ (db),room=Salon,section=Pièces\ communes,type=com.fibaro.multilevelSensor value=58 1523351010000000000
```

View File

@@ -0,0 +1,202 @@
package fibaro
import (
"encoding/json"
"fmt"
"net/http"
"strconv"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
const sampleConfig = `
## Required Fibaro controller address/hostname.
## Note: at the time of writing this plugin, Fibaro only implemented http - no https available
url = "http://<controller>:80"
## Required credentials to access the API (http://<controller/api/<component>)
username = "<username>"
password = "<password>"
## Amount of time allowed to complete the HTTP request
# timeout = "5s"
`
const description = "Read devices value(s) from a Fibaro controller"
// Fibaro contains connection information
type Fibaro struct {
URL string
// HTTP Basic Auth Credentials
Username string
Password string
Timeout internal.Duration
client *http.Client
}
// LinkRoomsSections links rooms to sections
type LinkRoomsSections struct {
Name string
SectionID uint16
}
// Sections contains sections informations
type Sections struct {
ID uint16 `json:"id"`
Name string `json:"name"`
}
// Rooms contains rooms informations
type Rooms struct {
ID uint16 `json:"id"`
Name string `json:"name"`
SectionID uint16 `json:"sectionID"`
}
// Devices contains devices informations
type Devices struct {
ID uint16 `json:"id"`
Name string `json:"name"`
RoomID uint16 `json:"roomID"`
Type string `json:"type"`
Enabled bool `json:"enabled"`
Properties struct {
Dead interface{} `json:"dead"`
Value interface{} `json:"value"`
Value2 interface{} `json:"value2"`
} `json:"properties"`
}
// Description returns a string explaining the purpose of this plugin
func (f *Fibaro) Description() string { return description }
// SampleConfig returns text explaining how plugin should be configured
func (f *Fibaro) SampleConfig() string { return sampleConfig }
// getJSON connects, authenticates and reads JSON payload returned by Fibaro box
func (f *Fibaro) getJSON(path string, dataStruct interface{}) error {
var requestURL = f.URL + path
req, err := http.NewRequest("GET", requestURL, nil)
if err != nil {
return err
}
req.SetBasicAuth(f.Username, f.Password)
resp, err := f.client.Do(req)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)",
requestURL,
resp.StatusCode,
http.StatusText(resp.StatusCode),
http.StatusOK,
http.StatusText(http.StatusOK))
return err
}
defer resp.Body.Close()
dec := json.NewDecoder(resp.Body)
err = dec.Decode(&dataStruct)
if err != nil {
return err
}
return nil
}
// Gather fetches all required information to output metrics
func (f *Fibaro) Gather(acc telegraf.Accumulator) error {
if f.client == nil {
f.client = &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
},
Timeout: f.Timeout.Duration,
}
}
var tmpSections []Sections
err := f.getJSON("/api/sections", &tmpSections)
if err != nil {
return err
}
sections := map[uint16]string{}
for _, v := range tmpSections {
sections[v.ID] = v.Name
}
var tmpRooms []Rooms
err = f.getJSON("/api/rooms", &tmpRooms)
if err != nil {
return err
}
rooms := map[uint16]LinkRoomsSections{}
for _, v := range tmpRooms {
rooms[v.ID] = LinkRoomsSections{Name: v.Name, SectionID: v.SectionID}
}
var devices []Devices
err = f.getJSON("/api/devices", &devices)
if err != nil {
return err
}
for _, device := range devices {
// skip device in some cases
if device.RoomID == 0 ||
device.Enabled == false ||
device.Properties.Dead == "true" ||
device.Type == "com.fibaro.zwaveDevice" {
continue
}
tags := map[string]string{
"section": sections[rooms[device.RoomID].SectionID],
"room": rooms[device.RoomID].Name,
"name": device.Name,
"type": device.Type,
}
fields := make(map[string]interface{})
if device.Properties.Value != nil {
value := device.Properties.Value
switch value {
case "true":
value = "1"
case "false":
value = "0"
}
if fValue, err := strconv.ParseFloat(value.(string), 64); err == nil {
fields["value"] = fValue
}
}
if device.Properties.Value2 != nil {
if fValue, err := strconv.ParseFloat(device.Properties.Value2.(string), 64); err == nil {
fields["value2"] = fValue
}
}
acc.AddFields("fibaro", fields, tags)
}
return nil
}
func init() {
inputs.Add("fibaro", func() telegraf.Input {
return &Fibaro{}
})
}

View File

@@ -0,0 +1,204 @@
package fibaro
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const sectionsJSON = `
[
{
"id": 1,
"name": "Section 1",
"sortOrder": 1
},
{
"id": 2,
"name": "Section 2",
"sortOrder": 2
},
{
"id": 3,
"name": "Section 3",
"sortOrder": 3
}
]`
const roomsJSON = `
[
{
"id": 1,
"name": "Room 1",
"sectionID": 1,
"icon": "room_1",
"sortOrder": 1
},
{
"id": 2,
"name": "Room 2",
"sectionID": 2,
"icon": "room_2",
"sortOrder": 2
},
{
"id": 3,
"name": "Room 3",
"sectionID": 3,
"icon": "room_3",
"sortOrder": 3
},
{
"id": 4,
"name": "Room 4",
"sectionID": 3,
"icon": "room_4",
"sortOrder": 4
}
]`
const devicesJSON = `
[
{
"id": 1,
"name": "Device 1",
"roomID": 1,
"type": "com.fibaro.binarySwitch",
"enabled": true,
"properties": {
"dead": "false",
"value": "false"
},
"sortOrder": 1
},
{
"id": 2,
"name": "Device 2",
"roomID": 2,
"type": "com.fibaro.binarySwitch",
"enabled": true,
"properties": {
"dead": "false",
"value": "true"
},
"sortOrder": 2
},
{
"id": 3,
"name": "Device 3",
"roomID": 3,
"type": "com.fibaro.multilevelSwitch",
"enabled": true,
"properties": {
"dead": "false",
"value": "67"
},
"sortOrder": 3
},
{
"id": 4,
"name": "Device 4",
"roomID": 4,
"type": "com.fibaro.temperatureSensor",
"enabled": true,
"properties": {
"dead": "false",
"value": "22.80"
},
"sortOrder": 4
},
{
"id": 5,
"name": "Device 5",
"roomID": 4,
"type": "com.fibaro.FGRM222",
"enabled": true,
"properties": {
"dead": "false",
"value": "50",
"value2": "75"
},
"sortOrder": 5
}
]`
// TestUnauthorized validates that 401 (wrong credentials) is managed properly
func TestUnauthorized(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusUnauthorized)
}))
defer ts.Close()
a := Fibaro{
URL: ts.URL,
Username: "user",
Password: "pass",
client: &http.Client{},
}
var acc testutil.Accumulator
err := acc.GatherError(a.Gather)
require.Error(t, err)
}
// TestJSONSuccess validates that module works OK with valid JSON payloads
func TestJSONSuccess(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
payload := ""
switch r.URL.Path {
case "/api/sections":
payload = sectionsJSON
case "/api/rooms":
payload = roomsJSON
case "/api/devices":
payload = devicesJSON
}
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, payload)
}))
defer ts.Close()
a := Fibaro{
URL: ts.URL,
Username: "user",
Password: "pass",
client: &http.Client{},
}
var acc testutil.Accumulator
err := acc.GatherError(a.Gather)
require.NoError(t, err)
// Gather should add 5 metrics
assert.Equal(t, uint64(5), acc.NMetrics())
// Ensure fields / values are correct - Device 1
tags := map[string]string{"section": "Section 1", "room": "Room 1", "name": "Device 1", "type": "com.fibaro.binarySwitch"}
fields := map[string]interface{}{"value": float64(0)}
acc.AssertContainsTaggedFields(t, "fibaro", fields, tags)
// Ensure fields / values are correct - Device 2
tags = map[string]string{"section": "Section 2", "room": "Room 2", "name": "Device 2", "type": "com.fibaro.binarySwitch"}
fields = map[string]interface{}{"value": float64(1)}
acc.AssertContainsTaggedFields(t, "fibaro", fields, tags)
// Ensure fields / values are correct - Device 3
tags = map[string]string{"section": "Section 3", "room": "Room 3", "name": "Device 3", "type": "com.fibaro.multilevelSwitch"}
fields = map[string]interface{}{"value": float64(67)}
acc.AssertContainsTaggedFields(t, "fibaro", fields, tags)
// Ensure fields / values are correct - Device 4
tags = map[string]string{"section": "Section 3", "room": "Room 4", "name": "Device 4", "type": "com.fibaro.temperatureSensor"}
fields = map[string]interface{}{"value": float64(22.8)}
acc.AssertContainsTaggedFields(t, "fibaro", fields, tags)
// Ensure fields / values are correct - Device 5
tags = map[string]string{"section": "Section 3", "room": "Room 4", "name": "Device 5", "type": "com.fibaro.FGRM222"}
fields = map[string]interface{}{"value": float64(50), "value2": float64(75)}
acc.AssertContainsTaggedFields(t, "fibaro", fields, tags)
}

View File

@@ -44,11 +44,11 @@ Note: if namespace end point specified metrics array will be ignored for that ca
username = ""
password = ""
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
```

View File

@@ -14,7 +14,7 @@ import (
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -35,15 +35,7 @@ type GrayLog struct {
Metrics []string
Username string
Password string
// Path to CA file
SSLCA string `toml:"ssl_ca"`
// Path to host cert file
SSLCert string `toml:"ssl_cert"`
// Path to cert key file
SSLKey string `toml:"ssl_key"`
// Use SSL but skip chain & host verification
InsecureSkipVerify bool
tls.ClientConfig
client HTTPClient
}
@@ -111,11 +103,11 @@ var sampleConfig = `
username = ""
password = ""
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
`
@@ -132,8 +124,7 @@ func (h *GrayLog) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup
if h.client.HTTPClient() == nil {
tlsCfg, err := internal.GetTLSConfig(
h.SSLCert, h.SSLKey, h.SSLCA, h.InsecureSkipVerify)
tlsCfg, err := h.ClientConfig.TLSConfig()
if err != nil {
return err
}

View File

@@ -28,11 +28,11 @@ or [HTTP statistics page](https://cbonte.github.io/haproxy-dconv/1.9/management.
## field names.
# keep_field_names = false
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
```

View File

@@ -14,27 +14,18 @@ import (
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
//CSV format: https://cbonte.github.io/haproxy-dconv/1.5/configuration.html#9.1
type haproxy struct {
Servers []string
Servers []string
KeepFieldNames bool
tls.ClientConfig
client *http.Client
KeepFieldNames bool
// Path to CA file
SSLCA string `toml:"ssl_ca"`
// Path to host cert file
SSLCert string `toml:"ssl_cert"`
// Path to cert key file
SSLKey string `toml:"ssl_key"`
// Use SSL but skip chain & host verification
InsecureSkipVerify bool
}
var sampleConfig = `
@@ -56,11 +47,11 @@ var sampleConfig = `
## field names.
# keep_field_names = false
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
`
@@ -144,8 +135,7 @@ func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
}
if g.client == nil {
tlsCfg, err := internal.GetTLSConfig(
g.SSLCert, g.SSLKey, g.SSLCA, g.InsecureSkipVerify)
tlsCfg, err := g.ClientConfig.TLSConfig()
if err != nil {
return err
}

View File

@@ -23,11 +23,11 @@ The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The
# username = "username"
# password = "pa$$word"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Amount of time allowed to complete the HTTP request

View File

@@ -11,6 +11,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/tls"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
)
@@ -24,15 +25,7 @@ type HTTP struct {
// HTTP Basic Auth Credentials
Username string
Password string
// Path to CA file
SSLCA string `toml:"ssl_ca"`
// Path to host cert file
SSLCert string `toml:"ssl_cert"`
// Path to cert key file
SSLKey string `toml:"ssl_key"`
// Use SSL but skip chain & host verification
InsecureSkipVerify bool
tls.ClientConfig
Timeout internal.Duration
@@ -62,11 +55,11 @@ var sampleConfig = `
## Tag all metrics with the url
# tag_url = true
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Amount of time allowed to complete the HTTP request
@@ -97,8 +90,7 @@ func (h *HTTP) Gather(acc telegraf.Accumulator) error {
}
if h.client == nil {
tlsCfg, err := internal.GetTLSConfig(
h.SSLCert, h.SSLKey, h.SSLCA, h.InsecureSkipVerify)
tlsCfg, err := h.ClientConfig.TLSConfig()
if err != nil {
return err
}

View File

@@ -5,9 +5,7 @@ import (
"compress/gzip"
"crypto/subtle"
"crypto/tls"
"crypto/x509"
"io"
"io/ioutil"
"log"
"net"
"net/http"
@@ -16,6 +14,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
tlsint "github.com/influxdata/telegraf/internal/tls"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers/influx"
"github.com/influxdata/telegraf/selfstat"
@@ -43,9 +42,7 @@ type HTTPListener struct {
MaxLineSize int
Port int
TlsAllowedCacerts []string
TlsCert string
TlsKey string
tlsint.ServerConfig
BasicUsername string
BasicPassword string
@@ -158,7 +155,10 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
h.acc = acc
h.pool = NewPool(200, h.MaxLineSize)
tlsConf := h.getTLSConfig()
tlsConf, err := h.ServerConfig.TLSConfig()
if err != nil {
return err
}
server := &http.Server{
Addr: h.ServiceAddress,
@@ -168,7 +168,6 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
TLSConfig: tlsConf,
}
var err error
var listener net.Listener
if tlsConf != nil {
listener, err = tls.Listen("tcp", h.ServiceAddress, tlsConf)
@@ -372,38 +371,6 @@ func badRequest(res http.ResponseWriter) {
res.Write([]byte(`{"error":"http: bad request"}`))
}
func (h *HTTPListener) getTLSConfig() *tls.Config {
tlsConf := &tls.Config{
InsecureSkipVerify: false,
Renegotiation: tls.RenegotiateNever,
}
if len(h.TlsCert) == 0 || len(h.TlsKey) == 0 {
return nil
}
cert, err := tls.LoadX509KeyPair(h.TlsCert, h.TlsKey)
if err != nil {
return nil
}
tlsConf.Certificates = []tls.Certificate{cert}
if h.TlsAllowedCacerts != nil {
tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
clientPool := x509.NewCertPool()
for _, ca := range h.TlsAllowedCacerts {
c, err := ioutil.ReadFile(ca)
if err != nil {
continue
}
clientPool.AppendCertsFromPEM(c)
}
tlsConf.ClientCAs = clientPool
}
return tlsConf
}
func (h *HTTPListener) AuthenticateIfSet(handler http.HandlerFunc, res http.ResponseWriter, req *http.Request) {
if h.BasicUsername != "" && h.BasicPassword != "" {
reqUsername, reqPassword, ok := req.BasicAuth()

View File

@@ -4,7 +4,6 @@ import (
"bytes"
"crypto/tls"
"crypto/x509"
"io"
"io/ioutil"
"net/http"
"net/url"
@@ -34,86 +33,12 @@ cpu_load_short,host=server06 value=12.0 1422568543702900257
emptyMsg = ""
serviceRootPEM = `-----BEGIN CERTIFICATE-----
MIIBxzCCATCgAwIBAgIJAJb7HqN2BzWWMA0GCSqGSIb3DQEBCwUAMBYxFDASBgNV
BAMMC1RlbGVncmFmIENBMB4XDTE3MTEwNDA0MzEwN1oXDTI3MTEwMjA0MzEwN1ow
FjEUMBIGA1UEAwwLVGVsZWdyYWYgQ0EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJ
AoGBANbkUkK6JQC3rbLcXhLJTS9SX6uXyFwl7bUfpAN5Hm5EqfvG3PnLrogfTGLr
Tq5CRAu/gbbdcMoL9TLv/aaDVnrpV0FslKhqYmkOgT28bdmA7Qtr539aQpMKCfcW
WCnoMcBD5u5h9MsRqpdq+0Mjlsf1H2hSf07jHk5R1T4l8RMXAgMBAAGjHTAbMAwG
A1UdEwQFMAMBAf8wCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBCwUAA4GBANSrwvpU
t8ihIhpHqgJZ34DM92CZZ3ZHmH/KyqlnuGzjjpnVZiXVrLDTOzrA0ziVhmefY29w
roHjENbFm54HW97ogxeURuO8HRHIVh2U0rkyVxOfGZiUdINHqsZdSnDY07bzCtSr
Z/KsfWXM5llD1Ig1FyBHpKjyUvfzr73sjm/4
-----END CERTIFICATE-----`
serviceCertPEM = `-----BEGIN CERTIFICATE-----
MIIBzzCCATigAwIBAgIBATANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDDAtUZWxl
Z3JhZiBDQTAeFw0xNzExMDQwNDMxMDdaFw0yNzExMDIwNDMxMDdaMBQxEjAQBgNV
BAMMCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAsJRss1af
XKrcIjQoAp2kdJIpT2Ya+MRQXJ18b0PP7szh2lisY11kd/HCkd4D4efuIkpszHaN
xwyTOZLOoplxp6fizzgOYjXsJ6SzbO1MQNmq8Ch/+uKiGgFwLX+YxOOsGSDIHNhF
vcBi93cQtCWPBFz6QRQf9yfIAA5KKxUfJcMCAwEAAaMvMC0wCQYDVR0TBAIwADAL
BgNVHQ8EBAMCBSAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDQYJKoZIhvcNAQELBQAD
gYEAiC3WI4y9vfYz53gw7FKnNK7BBdwRc43x7Pd+5J/cclWyUZPdmcj1UNmv/3rj
2qcMmX06UdgPoHppzNAJePvMVk0vjMBUe9MmYlafMz0h4ma/it5iuldXwmejFcdL
6wWQp7gVTileCEmq9sNvfQN1FmT3EWf4IMdO2MNat/1If0g=
-----END CERTIFICATE-----`
serviceKeyPEM = `-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQCwlGyzVp9cqtwiNCgCnaR0kilPZhr4xFBcnXxvQ8/uzOHaWKxj
XWR38cKR3gPh5+4iSmzMdo3HDJM5ks6imXGnp+LPOA5iNewnpLNs7UxA2arwKH/6
4qIaAXAtf5jE46wZIMgc2EW9wGL3dxC0JY8EXPpBFB/3J8gADkorFR8lwwIDAQAB
AoGBAJaFHxfMmjHK77U0UnrQWFSKFy64cftmlL4t/Nl3q7L68PdIKULWZIMeEWZ4
I0UZiFOwr4em83oejQ1ByGSwekEuiWaKUI85IaHfcbt+ogp9hY/XbOEo56OPQUAd
bEZv1JqJOqta9Ug1/E1P9LjEEyZ5F5ubx7813rxAE31qKtKJAkEA1zaMlCWIr+Rj
hGvzv5rlHH3wbOB4kQFXO4nqj3J/ttzR5QiJW24STMDcbNngFlVcDVju56LrNTiD
dPh9qvl7nwJBANILguR4u33OMksEZTYB7nQZSurqXsq6382zH7pTl29ANQTROHaM
PKC8dnDWq8RGTqKuvWblIzzGIKqIMovZo10CQC96T0UXirITFolOL3XjvAuvFO1Q
EAkdXJs77805m0dCK+P1IChVfiAEpBw3bKJArpAbQIlFfdI953JUp5SieU0CQEub
BSSEKMjh/cxu6peEHnb/262vayuCFKkQPu1sxWewLuVrAe36EKCy9dcsDmv5+rgo
Odjdxc9Madm4aKlaT6kCQQCpAgeblDrrxTrNQ+Typzo37PlnQrvI+0EceAUuJ72G
P0a+YZUeHNRqT2pPN9lMTAZGGi3CtcF2XScbLNEBeXge
-----END RSA PRIVATE KEY-----`
clientRootPEM = serviceRootPEM
clientCertPEM = `-----BEGIN CERTIFICATE-----
MIIBzjCCATegAwIBAgIBAjANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDDAtUZWxl
Z3JhZiBDQTAeFw0xNzExMDQwNDMxMDdaFw0yNzExMDIwNDMxMDdaMBMxETAPBgNV
BAMMCHRlbGVncmFmMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDP2IMqyOqI
sJjwBprrz8WPzmlrpyYikQ4XSCSJB3DSTIO+igqMpBUTj3vLlOzsHfVVot1WRqc6
3esM4JE92rc6S73xi4g8L/r8cPIHW4hvFJdMti4UkJBWim8ArSbFqnZjcR19G3tG
LUOiXAUG3nWzMzoEsPruvV1dkKRbJVE4MwIDAQABoy8wLTAJBgNVHRMEAjAAMAsG
A1UdDwQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAjANBgkqhkiG9w0BAQsFAAOB
gQCHxMk38XNxL9nPFBYo3JqITJCFswu6/NLHwDBXCuZKl53rUuFWduiO+1OuScKQ
sQ79W0jHsWRKGOUFrF5/Gdnh8AlkVaITVlcmhdAOFCEbeGpeEvLuuK6grckPitxy
bRF5oM4TCLKKAha60Ir41rk2bomZM9+NZu+Bm+csDqCoxQ==
-----END CERTIFICATE-----`
clientKeyPEM = `-----BEGIN RSA PRIVATE KEY-----
MIICXAIBAAKBgQDP2IMqyOqIsJjwBprrz8WPzmlrpyYikQ4XSCSJB3DSTIO+igqM
pBUTj3vLlOzsHfVVot1WRqc63esM4JE92rc6S73xi4g8L/r8cPIHW4hvFJdMti4U
kJBWim8ArSbFqnZjcR19G3tGLUOiXAUG3nWzMzoEsPruvV1dkKRbJVE4MwIDAQAB
AoGAFzb/r4+xYoMXEfgq5ZvXXTCY5cVNpR6+jCsqqYODPnn9XRLeCsdo8z5bfWms
7NKLzHzca/6IPzL6Rf3vOxFq1YyIZfYVHH+d63/9blAm3Iajjp1W2yW5aj9BJjTb
nm6F0RfuW/SjrZ9IXxTZhSpCklPmUzVZpzvwV3KGeVTVCEECQQDoavCeOwLuqDpt
0aM9GMFUpOU7kLPDuicSwCDaTae4kN2rS17Zki41YXe8A8+509IEN7mK09Vq9HxY
SX6EmV1FAkEA5O9QcCHEa8P12EmUC8oqD2bjq6o7JjUIRlKinwZTlooMJYZw98gA
FVSngTUvLVCVIvSdjldXPOGgfYiccTZrFwJAfHS3gKOtAEuJbkEyHodhD4h1UB4+
hPLr9Xh4ny2yQH0ilpV3px5GLEOTMFUCKUoqTiPg8VxaDjn5U/WXED5n2QJAR4J1
NsFlcGACj+/TvacFYlA6N2nyFeokzoqLX28Ddxdh2erXqJ4hYIhT1ik9tkLggs2z
1T1084BquCuO6lIcOwJBALX4xChoMUF9k0IxSQzlz//seQYDkQNsE7y9IgAOXkzp
RaR4pzgPbnKj7atG+2dBnffWfE+1Mcy0INDAO6WxPg0=
-----END RSA PRIVATE KEY-----`
basicUsername = "test-username-please-ignore"
basicPassword = "super-secure-password!"
)
var (
initClient sync.Once
client *http.Client
initServiceCertFiles sync.Once
allowedCAFiles []string
serviceCAFiles []string
serviceCertFile string
serviceKeyFile string
pki = testutil.NewPKI("../../../testutil/pki")
)
func newTestHTTPListener() *HTTPListener {
@@ -132,74 +57,25 @@ func newTestHTTPAuthListener() *HTTPListener {
}
func newTestHTTPSListener() *HTTPListener {
initServiceCertFiles.Do(func() {
acaf, err := ioutil.TempFile("", "allowedCAFile.crt")
if err != nil {
panic(err)
}
defer acaf.Close()
_, err = io.Copy(acaf, bytes.NewReader([]byte(clientRootPEM)))
allowedCAFiles = []string{acaf.Name()}
scaf, err := ioutil.TempFile("", "serviceCAFile.crt")
if err != nil {
panic(err)
}
defer scaf.Close()
_, err = io.Copy(scaf, bytes.NewReader([]byte(serviceRootPEM)))
serviceCAFiles = []string{scaf.Name()}
scf, err := ioutil.TempFile("", "serviceCertFile.crt")
if err != nil {
panic(err)
}
defer scf.Close()
_, err = io.Copy(scf, bytes.NewReader([]byte(serviceCertPEM)))
serviceCertFile = scf.Name()
skf, err := ioutil.TempFile("", "serviceKeyFile.crt")
if err != nil {
panic(err)
}
defer skf.Close()
_, err = io.Copy(skf, bytes.NewReader([]byte(serviceKeyPEM)))
serviceKeyFile = skf.Name()
})
listener := &HTTPListener{
ServiceAddress: "localhost:0",
TlsAllowedCacerts: allowedCAFiles,
TlsCert: serviceCertFile,
TlsKey: serviceKeyFile,
TimeFunc: time.Now,
ServiceAddress: "localhost:0",
ServerConfig: *pki.TLSServerConfig(),
TimeFunc: time.Now,
}
return listener
}
func getHTTPSClient() *http.Client {
initClient.Do(func() {
cas := x509.NewCertPool()
cas.AppendCertsFromPEM([]byte(serviceRootPEM))
clientCert, err := tls.X509KeyPair([]byte(clientCertPEM), []byte(clientKeyPEM))
if err != nil {
panic(err)
}
client = &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
RootCAs: cas,
Certificates: []tls.Certificate{clientCert},
MinVersion: tls.VersionTLS12,
MaxVersion: tls.VersionTLS12,
CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256},
Renegotiation: tls.RenegotiateNever,
InsecureSkipVerify: false,
},
},
}
})
return client
tlsConfig, err := pki.TLSClientConfig().TLSConfig()
if err != nil {
panic(err)
}
return &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsConfig,
},
}
}
func createURL(listener *HTTPListener, scheme string, path string, rawquery string) string {
@@ -214,14 +90,14 @@ func createURL(listener *HTTPListener, scheme string, path string, rawquery stri
func TestWriteHTTPSNoClientAuth(t *testing.T) {
listener := newTestHTTPSListener()
listener.TlsAllowedCacerts = nil
listener.TLSAllowedCACerts = nil
acc := &testutil.Accumulator{}
require.NoError(t, listener.Start(acc))
defer listener.Stop()
cas := x509.NewCertPool()
cas.AppendCertsFromPEM([]byte(serviceRootPEM))
cas.AppendCertsFromPEM([]byte(pki.ReadServerCert()))
noClientAuthClient := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{

View File

@@ -32,11 +32,11 @@ This input plugin checks HTTP/HTTPS connections.
# response_string_match = "ok"
# response_string_match = "\".*_status\".?:.?\"up\""
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## HTTP Request Headers (all values must be strings)

View File

@@ -16,6 +16,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -29,15 +30,7 @@ type HTTPResponse struct {
Headers map[string]string
FollowRedirects bool
ResponseStringMatch string
// Path to CA file
SSLCA string `toml:"ssl_ca"`
// Path to host cert file
SSLCert string `toml:"ssl_cert"`
// Path to cert key file
SSLKey string `toml:"ssl_key"`
// Use SSL but skip chain & host verification
InsecureSkipVerify bool
tls.ClientConfig
compiledStringMatch *regexp.Regexp
client *http.Client
@@ -74,11 +67,11 @@ var sampleConfig = `
# response_string_match = "ok"
# response_string_match = "\".*_status\".?:.?\"up\""
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## HTTP Request Headers (all values must be strings)
@@ -113,8 +106,7 @@ func getProxyFunc(http_proxy string) func(*http.Request) (*url.URL, error) {
// CreateHttpClient creates an http client which will timeout at the specified
// timeout period and can follow redirects if specified
func (h *HTTPResponse) createHttpClient() (*http.Client, error) {
tlsCfg, err := internal.GetTLSConfig(
h.SSLCert, h.SSLKey, h.SSLCA, h.InsecureSkipVerify)
tlsCfg, err := h.ClientConfig.TLSConfig()
if err != nil {
return nil, err
}

View File

@@ -34,11 +34,11 @@ Deprecated (1.6): use the [http](../http) input.
# "my_tag_2"
# ]
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## HTTP Request Parameters (all values must be strings). For "GET" requests, data

View File

@@ -12,6 +12,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/tls"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
)
@@ -29,15 +30,7 @@ type HttpJson struct {
ResponseTimeout internal.Duration
Parameters map[string]string
Headers map[string]string
// Path to CA file
SSLCA string `toml:"ssl_ca"`
// Path to host cert file
SSLCert string `toml:"ssl_cert"`
// Path to cert key file
SSLKey string `toml:"ssl_key"`
// Use SSL but skip chain & host verification
InsecureSkipVerify bool
tls.ClientConfig
client HTTPClient
}
@@ -100,11 +93,11 @@ var sampleConfig = `
# "my_tag_2"
# ]
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## HTTP parameters (all values must be strings). For "GET" requests, data
@@ -133,8 +126,7 @@ func (h *HttpJson) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup
if h.client.HTTPClient() == nil {
tlsCfg, err := internal.GetTLSConfig(
h.SSLCert, h.SSLKey, h.SSLCA, h.InsecureSkipVerify)
tlsCfg, err := h.ClientConfig.TLSConfig()
if err != nil {
return err
}

View File

@@ -20,11 +20,11 @@ InfluxDB-formatted endpoints. See below for more information.
"http://localhost:8086/debug/vars"
]
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## http request & header timeout
@@ -33,6 +33,9 @@ InfluxDB-formatted endpoints. See below for more information.
### Measurements & Fields
**Note:** The measurements and fields are dynamically built from the InfluxDB source,
and may vary between versions.
- influxdb
- n_shards
- influxdb_database

View File

@@ -10,21 +10,14 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
type InfluxDB struct {
URLs []string `toml:"urls"`
// Path to CA file
SSLCA string `toml:"ssl_ca"`
// Path to host cert file
SSLCert string `toml:"ssl_cert"`
// Path to cert key file
SSLKey string `toml:"ssl_key"`
// Use SSL but skip chain & host verification
InsecureSkipVerify bool
URLs []string `toml:"urls"`
Timeout internal.Duration
tls.ClientConfig
client *http.Client
}
@@ -45,11 +38,11 @@ func (*InfluxDB) SampleConfig() string {
"http://localhost:8086/debug/vars"
]
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## http request & header timeout
@@ -63,8 +56,7 @@ func (i *InfluxDB) Gather(acc telegraf.Accumulator) error {
}
if i.client == nil {
tlsCfg, err := internal.GetTLSConfig(
i.SSLCert, i.SSLKey, i.SSLCA, i.InsecureSkipVerify)
tlsCfg, err := i.ClientConfig.TLSConfig()
if err != nil {
return err
}

View File

@@ -2,7 +2,9 @@
The [Jolokia](http://jolokia.org) _agent_ and _proxy_ input plugins collect JMX metrics from an HTTP endpoint using Jolokia's [JSON-over-HTTP protocol](https://jolokia.org/reference/html/protocol.html).
## Jolokia Agent Configuration
### Configuration:
#### Jolokia Agent Configuration
The `jolokia2_agent` input plugin reads JMX metrics from one or more [Jolokia agent](https://jolokia.org/agent/jvm.html) REST endpoints.
@@ -16,14 +18,14 @@ The `jolokia2_agent` input plugin reads JMX metrics from one or more [Jolokia ag
paths = ["Uptime"]
```
Optionally, specify SSL options for communicating with agents:
Optionally, specify TLS options for communicating with agents:
```toml
[[inputs.jolokia2_agent]]
urls = ["https://agent:8080/jolokia"]
ssl_ca = "/var/private/ca.pem"
ssl_cert = "/var/private/client.pem"
ssl_key = "/var/private/client-key.pem"
tls_ca = "/var/private/ca.pem"
tls_cert = "/var/private/client.pem"
tls_key = "/var/private/client-key.pem"
#insecure_skip_verify = false
[[inputs.jolokia2_agent.metric]]
@@ -32,7 +34,7 @@ Optionally, specify SSL options for communicating with agents:
paths = ["Uptime"]
```
## Jolokia Proxy Configuration
#### Jolokia Proxy Configuration
The `jolokia2_proxy` input plugin reads JMX metrics from one or more _targets_ by interacting with a [Jolokia proxy](https://jolokia.org/features/proxy.html) REST endpoint.
@@ -53,15 +55,15 @@ The `jolokia2_proxy` input plugin reads JMX metrics from one or more _targets_ b
paths = ["Uptime"]
```
Optionally, specify SSL options for communicating with proxies:
Optionally, specify TLS options for communicating with proxies:
```toml
[[inputs.jolokia2_proxy]]
url = "https://proxy:8080/jolokia"
ssl_ca = "/var/private/ca.pem"
ssl_cert = "/var/private/client.pem"
ssl_key = "/var/private/client-key.pem"
tls_ca = "/var/private/ca.pem"
tls_cert = "/var/private/client.pem"
tls_key = "/var/private/client-key.pem"
#insecure_skip_verify = false
#default_target_username = ""
@@ -77,7 +79,7 @@ Optionally, specify SSL options for communicating with proxies:
paths = ["Uptime"]
```
## Jolokia Metric Configuration
#### Jolokia Metric Configuration
Each `metric` declaration generates a Jolokia request to fetch telemetry from a JMX MBean.
@@ -167,3 +169,11 @@ Both `jolokia2_agent` and `jolokia2_proxy` plugins support default configuration
| `default_field_separator` | `.` | A character to use to join Mbean attributes when creating fields. |
| `default_field_prefix` | _None_ | A string to prepend to the field names produced by all `metric` declarations. |
| `default_tag_prefix` | _None_ | A string to prepend to the tag names produced by all `metric` declarations. |
### Example Configurations:
- [Java JVM](/plugins/inputs/jolokia2/examples/java.conf)
- [Kafka](/plugins/inputs/jolokia2/examples/kafka.conf)
- [Cassandra](/plugins/inputs/jolokia2/examples/cassandra.conf)
Please help improve this list and contribute new configuration files by opening an issue or pull request.

View File

@@ -10,7 +10,7 @@ import (
"path"
"time"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/tls"
)
type Client struct {
@@ -20,15 +20,11 @@ type Client struct {
}
type ClientConfig struct {
ResponseTimeout time.Duration
Username string
Password string
SSLCA string
SSLCert string
SSLKey string
InsecureSkipVerify bool
ProxyConfig *ProxyConfig
ResponseTimeout time.Duration
Username string
Password string
ProxyConfig *ProxyConfig
tls.ClientConfig
}
type ProxyConfig struct {
@@ -100,8 +96,7 @@ type jolokiaResponse struct {
}
func NewClient(url string, config *ClientConfig) (*Client, error) {
tlsConfig, err := internal.GetTLSConfig(
config.SSLCert, config.SSLKey, config.SSLCA, config.InsecureSkipVerify)
tlsConfig, err := config.ClientConfig.TLSConfig()
if err != nil {
return nil, err
}

View File

@@ -0,0 +1,95 @@
[[inputs.jolokia2_agent]]
urls = ["http://localhost:8778/jolokia"]
name_prefix = "java_"
[[inputs.jolokia2_agent.metrics]]
name = "Memory"
mbean = "java.lang:type=Memory"
[[inputs.jolokia2_agent.metric]]
name = "GarbageCollector"
mbean = "java.lang:name=*,type=GarbageCollector"
tag_keys = ["name"]
field_prefix = "$1_"
[[inputs.jolokia2_agent]]
urls = ["http://localhost:8778/jolokia"]
name_prefix = "cassandra_"
[[inputs.jolokia2_agent.metric]]
name = "Cache"
mbean = "org.apache.cassandra.metrics:name=*,scope=*,type=Cache"
tag_keys = ["name", "scope"]
field_prefix = "$1_"
[[inputs.jolokia2_agent.metric]]
name = "Client"
mbean = "org.apache.cassandra.metrics:name=*,type=Client"
tag_keys = ["name"]
field_prefix = "$1_"
[[inputs.jolokia2_agent.metric]]
name = "ClientRequestMetrics"
mbean = "org.apache.cassandra.metrics:name=*,type=ClientRequestMetrics"
tag_keys = ["name"]
field_prefix = "$1_"
[[inputs.jolokia2_agent.metric]]
name = "ClientRequest"
mbean = "org.apache.cassandra.metrics:name=*,scope=*,type=ClientRequest"
tag_keys = ["name", "scope"]
field_prefix = "$1_"
[[inputs.jolokia2_agent.metric]]
name = "ColumnFamily"
mbean = "org.apache.cassandra.metrics:keyspace=*,name=*,scope=*,type=ColumnFamily"
tag_keys = ["keyspace", "name", "scope"]
field_prefix = "$2_"
[[inputs.jolokia2_agent.metric]]
name = "CommitLog"
mbean = "org.apache.cassandra.metrics:name=*,type=CommitLog"
tag_keys = ["name"]
field_prefix = "$1_"
[[inputs.jolokia2_agent.metric]]
name = "Compaction"
mbean = "org.apache.cassandra.metrics:name=*,type=Compaction"
tag_keys = ["name"]
field_prefix = "$1_"
[[inputs.jolokia2_agent.metric]]
name = "CQL"
mbean = "org.apache.cassandra.metrics:name=*,type=CQL"
tag_keys = ["name"]
field_prefix = "$1_"
[[inputs.jolokia2_agent.metric]]
name = "DroppedMessage"
mbean = "org.apache.cassandra.metrics:name=*,scope=*,type=DroppedMessage"
tag_keys = ["name", "scope"]
field_prefix = "$1_"
[[inputs.jolokia2_agent.metric]]
name = "FileCache"
mbean = "org.apache.cassandra.metrics:name=*,type=FileCache"
tag_keys = ["name"]
field_prefix = "$1_"
[[inputs.jolokia2_agent.metric]]
name = "ReadRepair"
mbean = "org.apache.cassandra.metrics:name=*,type=ReadRepair"
tag_keys = ["name"]
field_prefix = "$1_"
[[inputs.jolokia2_agent.metric]]
name = "Storage"
mbean = "org.apache.cassandra.metrics:name=*,type=Storage"
tag_keys = ["name"]
field_prefix = "$1_"
[[inputs.jolokia2_agent.metric]]
name = "ThreadPools"
mbean = "org.apache.cassandra.metrics:name=*,path=*,scope=*,type=ThreadPools"
tag_keys = ["name", "path", "scope"]
field_prefix = "$1_"

View File

@@ -3,9 +3,10 @@ package jolokia2
import (
"fmt"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/tls"
)
type JolokiaAgent struct {
@@ -16,15 +17,13 @@ type JolokiaAgent struct {
URLs []string `toml:"urls"`
Username string
Password string
ResponseTimeout time.Duration `toml:"response_timeout"`
ResponseTimeout internal.Duration `toml:"response_timeout"`
SSLCA string `toml:"ssl_ca"`
SSLCert string `toml:"ssl_cert"`
SSLKey string `toml:"ssl_key"`
InsecureSkipVerify bool
tls.ClientConfig
Metrics []MetricConfig `toml:"metric"`
gatherer *Gatherer
clients []*Client
}
func (ja *JolokiaAgent) SampleConfig() string {
@@ -39,10 +38,10 @@ func (ja *JolokiaAgent) SampleConfig() string {
# password = ""
# response_timeout = "5s"
## Optional SSL config
# ssl_ca = "/var/private/ca.pem"
# ssl_cert = "/var/private/client.pem"
# ssl_key = "/var/private/client-key.pem"
## Optional TLS config
# tls_ca = "/var/private/ca.pem"
# tls_cert = "/var/private/client.pem"
# tls_key = "/var/private/client-key.pem"
# insecure_skip_verify = false
## Add metrics to read
@@ -62,20 +61,27 @@ func (ja *JolokiaAgent) Gather(acc telegraf.Accumulator) error {
ja.gatherer = NewGatherer(ja.createMetrics())
}
// Initialize clients once
if ja.clients == nil {
ja.clients = make([]*Client, 0, len(ja.URLs))
for _, url := range ja.URLs {
client, err := ja.createClient(url)
if err != nil {
acc.AddError(fmt.Errorf("Unable to create client for %s: %v", url, err))
continue
}
ja.clients = append(ja.clients, client)
}
}
var wg sync.WaitGroup
for _, url := range ja.URLs {
client, err := ja.createClient(url)
if err != nil {
acc.AddError(fmt.Errorf("Unable to create client for %s: %v", url, err))
continue
}
for _, client := range ja.clients {
wg.Add(1)
go func(client *Client) {
defer wg.Done()
err = ja.gatherer.Gather(client, acc)
err := ja.gatherer.Gather(client, acc)
if err != nil {
acc.AddError(fmt.Errorf("Unable to gather metrics for %s: %v", client.URL, err))
}
@@ -101,12 +107,9 @@ func (ja *JolokiaAgent) createMetrics() []Metric {
func (ja *JolokiaAgent) createClient(url string) (*Client, error) {
return NewClient(url, &ClientConfig{
Username: ja.Username,
Password: ja.Password,
ResponseTimeout: ja.ResponseTimeout,
SSLCA: ja.SSLCA,
SSLCert: ja.SSLCert,
SSLKey: ja.SSLKey,
InsecureSkipVerify: ja.InsecureSkipVerify,
Username: ja.Username,
Password: ja.Password,
ResponseTimeout: ja.ResponseTimeout.Duration,
ClientConfig: ja.ClientConfig,
})
}

View File

@@ -1,9 +1,9 @@
package jolokia2
import (
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/tls"
)
type JolokiaProxy struct {
@@ -16,13 +16,10 @@ type JolokiaProxy struct {
DefaultTargetUsername string
Targets []JolokiaProxyTargetConfig `toml:"target"`
Username string
Password string
SSLCA string `toml:"ssl_ca"`
SSLCert string `toml:"ssl_cert"`
SSLKey string `toml:"ssl_key"`
InsecureSkipVerify bool
ResponseTimeout time.Duration `toml:"response_timeout"`
Username string
Password string
ResponseTimeout internal.Duration `toml:"response_timeout"`
tls.ClientConfig
Metrics []MetricConfig `toml:"metric"`
client *Client
@@ -47,22 +44,22 @@ func (jp *JolokiaProxy) SampleConfig() string {
# password = ""
# response_timeout = "5s"
## Optional SSL config
# ssl_ca = "/var/private/ca.pem"
# ssl_cert = "/var/private/client.pem"
# ssl_key = "/var/private/client-key.pem"
## Optional TLS config
# tls_ca = "/var/private/ca.pem"
# tls_cert = "/var/private/client.pem"
# tls_key = "/var/private/client-key.pem"
# insecure_skip_verify = false
## Add proxy targets to query
# default_target_username = ""
# default_target_password = ""
[[inputs.jolokia_proxy.target]]
[[inputs.jolokia2_proxy.target]]
url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi"
# username = ""
# password = ""
# username = ""
# password = ""
## Add metrics to read
[[inputs.jolokia_proxy.metric]]
[[inputs.jolokia2_proxy.metric]]
name = "java_runtime"
mbean = "java.lang:type=Runtime"
paths = ["Uptime"]
@@ -117,13 +114,10 @@ func (jp *JolokiaProxy) createClient() (*Client, error) {
}
return NewClient(jp.URL, &ClientConfig{
Username: jp.Username,
Password: jp.Password,
ResponseTimeout: jp.ResponseTimeout,
SSLCA: jp.SSLCA,
SSLCert: jp.SSLCert,
SSLKey: jp.SSLKey,
InsecureSkipVerify: jp.InsecureSkipVerify,
ProxyConfig: proxyConfig,
Username: jp.Username,
Password: jp.Password,
ResponseTimeout: jp.ResponseTimeout.Duration,
ClientConfig: jp.ClientConfig,
ProxyConfig: proxyConfig,
})
}

View File

@@ -0,0 +1,59 @@
# JTI OpenConfig Telemetry Input Plugin
This plugin reads Juniper Networks implementation of OpenConfig telemetry data from listed sensors using Junos Telemetry Interface. Refer to
[openconfig.net](http://openconfig.net/) for more details about OpenConfig and [Junos Telemetry Interface (JTI)](https://www.juniper.net/documentation/en_US/junos/topics/concept/junos-telemetry-interface-oveview.html).
### Configuration:
```toml
# Subscribe and receive OpenConfig Telemetry data using JTI
[[inputs.jti_openconfig_telemetry]]
## List of device addresses to collect telemetry from
servers = ["localhost:1883"]
## Authentication details. Username and password are must if device expects
## authentication. Client ID must be unique when connecting from multiple instances
## of telegraf to the same device
username = "user"
password = "pass"
client_id = "telegraf"
## Frequency to get data
sample_frequency = "1000ms"
## Sensors to subscribe for
## A identifier for each sensor can be provided in path by separating with space
## Else sensor path will be used as identifier
## When identifier is used, we can provide a list of space separated sensors.
## A single subscription will be created with all these sensors and data will
## be saved to measurement with this identifier name
sensors = [
"/interfaces/",
"collection /components/ /lldp",
]
## We allow specifying sensor group level reporting rate. To do this, specify the
## reporting rate in Duration at the beginning of sensor paths / collection
## name. For entries without reporting rate, we use configured sample frequency
sensors = [
"1000ms customReporting /interfaces /lldp",
"2000ms collection /components",
"/interfaces",
]
## x509 Certificate to use with TLS connection. If it is not provided, an insecure
## channel will be opened with server
ssl_cert = "/etc/telegraf/cert.pem"
## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms.
## Failed streams/calls will not be retried if 0 is provided
retry_delay = "1000ms"
## To treat all string values as tags, set this to true
str_as_tags = false
```
### Tags:
- All measurements are tagged appropriately using the identifier information
in incoming data

View File

@@ -0,0 +1,182 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: authentication_service.proto
/*
Package authentication is a generated protocol buffer package.
It is generated from these files:
authentication_service.proto
It has these top-level messages:
LoginRequest
LoginReply
*/
package authentication
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// The request message containing the user's name, password and client id
type LoginRequest struct {
UserName string `protobuf:"bytes,1,opt,name=user_name,json=userName" json:"user_name,omitempty"`
Password string `protobuf:"bytes,2,opt,name=password" json:"password,omitempty"`
ClientId string `protobuf:"bytes,3,opt,name=client_id,json=clientId" json:"client_id,omitempty"`
}
func (m *LoginRequest) Reset() { *m = LoginRequest{} }
func (m *LoginRequest) String() string { return proto.CompactTextString(m) }
func (*LoginRequest) ProtoMessage() {}
func (*LoginRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *LoginRequest) GetUserName() string {
if m != nil {
return m.UserName
}
return ""
}
func (m *LoginRequest) GetPassword() string {
if m != nil {
return m.Password
}
return ""
}
func (m *LoginRequest) GetClientId() string {
if m != nil {
return m.ClientId
}
return ""
}
// The response message containing the result of login attempt.
// result value of true indicates success and false indicates
// failure
type LoginReply struct {
Result bool `protobuf:"varint,1,opt,name=result" json:"result,omitempty"`
}
func (m *LoginReply) Reset() { *m = LoginReply{} }
func (m *LoginReply) String() string { return proto.CompactTextString(m) }
func (*LoginReply) ProtoMessage() {}
func (*LoginReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *LoginReply) GetResult() bool {
if m != nil {
return m.Result
}
return false
}
func init() {
proto.RegisterType((*LoginRequest)(nil), "authentication.LoginRequest")
proto.RegisterType((*LoginReply)(nil), "authentication.LoginReply")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Login service
type LoginClient interface {
LoginCheck(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginReply, error)
}
type loginClient struct {
cc *grpc.ClientConn
}
func NewLoginClient(cc *grpc.ClientConn) LoginClient {
return &loginClient{cc}
}
func (c *loginClient) LoginCheck(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginReply, error) {
out := new(LoginReply)
err := grpc.Invoke(ctx, "/authentication.Login/LoginCheck", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Login service
type LoginServer interface {
LoginCheck(context.Context, *LoginRequest) (*LoginReply, error)
}
func RegisterLoginServer(s *grpc.Server, srv LoginServer) {
s.RegisterService(&_Login_serviceDesc, srv)
}
func _Login_LoginCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(LoginRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(LoginServer).LoginCheck(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/authentication.Login/LoginCheck",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(LoginServer).LoginCheck(ctx, req.(*LoginRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Login_serviceDesc = grpc.ServiceDesc{
ServiceName: "authentication.Login",
HandlerType: (*LoginServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "LoginCheck",
Handler: _Login_LoginCheck_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "authentication_service.proto",
}
func init() { proto.RegisterFile("authentication_service.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 200 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0x2c, 0x2d, 0xc9,
0x48, 0xcd, 0x2b, 0xc9, 0x4c, 0x4e, 0x2c, 0xc9, 0xcc, 0xcf, 0x8b, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb,
0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x43, 0x95, 0x55, 0x4a, 0xe1, 0xe2,
0xf1, 0xc9, 0x4f, 0xcf, 0xcc, 0x0b, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x11, 0x92, 0xe6, 0xe2,
0x2c, 0x2d, 0x4e, 0x2d, 0x8a, 0xcf, 0x4b, 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c,
0xe2, 0x00, 0x09, 0xf8, 0x25, 0xe6, 0xa6, 0x0a, 0x49, 0x71, 0x71, 0x14, 0x24, 0x16, 0x17, 0x97,
0xe7, 0x17, 0xa5, 0x48, 0x30, 0x41, 0xe4, 0x60, 0x7c, 0x90, 0xc6, 0xe4, 0x9c, 0xcc, 0xd4, 0xbc,
0x92, 0xf8, 0xcc, 0x14, 0x09, 0x66, 0x88, 0x24, 0x44, 0xc0, 0x33, 0x45, 0x49, 0x85, 0x8b, 0x0b,
0x6a, 0x4b, 0x41, 0x4e, 0xa5, 0x90, 0x18, 0x17, 0x5b, 0x51, 0x6a, 0x71, 0x69, 0x4e, 0x09, 0xd8,
0x02, 0x8e, 0x20, 0x28, 0xcf, 0x28, 0x90, 0x8b, 0x15, 0xac, 0x4a, 0xc8, 0x03, 0xaa, 0xdc, 0x39,
0x23, 0x35, 0x39, 0x5b, 0x48, 0x46, 0x0f, 0xd5, 0xcd, 0x7a, 0xc8, 0x0e, 0x96, 0x92, 0xc2, 0x21,
0x5b, 0x90, 0x53, 0xa9, 0xc4, 0x90, 0xc4, 0x06, 0xf6, 0xb5, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff,
0x11, 0x57, 0x52, 0xd2, 0x15, 0x01, 0x00, 0x00,
}

View File

@@ -0,0 +1,48 @@
//
// Copyrights (c) 2017, Juniper Networks, Inc.
// All rights reserved.
//
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
syntax = "proto3";
package authentication;
// The Login service definition.
service Login {
rpc LoginCheck (LoginRequest) returns (LoginReply) {}
}
// The request message containing the user's name, password and client id
message LoginRequest {
string user_name = 1;
string password = 2;
string client_id = 3;
}
/*
* The response message containing the result of login attempt.
* result value of true indicates success and false indicates
* failure
*/
message LoginReply {
bool result = 1;
}

View File

@@ -0,0 +1,63 @@
package jti_openconfig_telemetry
import "sort"
type DataGroup struct {
numKeys int
tags map[string]string
data map[string]interface{}
}
// Sort the data groups by number of keys
type CollectionByKeys []DataGroup
func (a CollectionByKeys) Len() int { return len(a) }
func (a CollectionByKeys) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a CollectionByKeys) Less(i, j int) bool { return a[i].numKeys < a[j].numKeys }
// Checks to see if there is already a group with these tags and returns its index. Returns -1 if unavailable.
func (a CollectionByKeys) IsAvailable(tags map[string]string) *DataGroup {
sort.Sort(CollectionByKeys(a))
// Iterate through all the groups and see if we have group with these tags
for _, group := range a {
// Since already sorted, match with only groups with N keys
if group.numKeys < len(tags) {
continue
} else if group.numKeys > len(tags) {
break
}
matchFound := true
for k, v := range tags {
if val, ok := group.tags[k]; ok {
if val != v {
matchFound = false
break
}
} else {
matchFound = false
break
}
}
if matchFound {
return &group
}
}
return nil
}
// Inserts into already existing group or creates a new group
func (a CollectionByKeys) Insert(tags map[string]string, data map[string]interface{}) CollectionByKeys {
// If there is already a group with this set of tags, insert into it. Otherwise create a new group and insert
if group := a.IsAvailable(tags); group != nil {
for k, v := range data {
group.data[k] = v
}
} else {
a = append(a, DataGroup{len(tags), tags, data})
}
return a
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,319 @@
//
// Copyrights (c) 2016, Juniper Networks, Inc.
// All rights reserved.
//
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
//
// Nitin Kumar 04/07/2016
// Abbas Sakarwala 04/07/2016
//
// This file defines the Openconfig Telemetry RPC APIs (for gRPC).
//
// https://github.com/openconfig/public/blob/master/release/models/rpc/openconfig-rpc-api.yang
//
// Version 1.0
//
syntax = "proto3";
package telemetry;
// Interface exported by Agent
service OpenConfigTelemetry {
// Request an inline subscription for data at the specified path.
// The device should send telemetry data back on the same
// connection as the subscription request.
rpc telemetrySubscribe(SubscriptionRequest) returns (stream OpenConfigData) {}
// Terminates and removes an exisiting telemetry subscription
rpc cancelTelemetrySubscription(CancelSubscriptionRequest) returns (CancelSubscriptionReply) {}
// Get the list of current telemetry subscriptions from the
// target. This command returns a list of existing subscriptions
// not including those that are established via configuration.
rpc getTelemetrySubscriptions(GetSubscriptionsRequest) returns (GetSubscriptionsReply) {}
// Get Telemetry Agent Operational States
rpc getTelemetryOperationalState(GetOperationalStateRequest) returns (GetOperationalStateReply) {}
// Return the set of data encodings supported by the device for
// telemetry data
rpc getDataEncodings(DataEncodingRequest) returns (DataEncodingReply) {}
}
// Message sent for a telemetry subscription request
message SubscriptionRequest {
// Data associated with a telemetry subscription
SubscriptionInput input = 1;
// List of data models paths and filters
// which are used in a telemetry operation.
repeated Path path_list = 2;
// The below configuration is not defined in Openconfig RPC.
// It is a proposed extension to configure additional
// subscription request features.
SubscriptionAdditionalConfig additional_config = 3;
}
// Data associated with a telemetry subscription
message SubscriptionInput {
// List of optional collector endpoints to send data for
// this subscription.
// If no collector destinations are specified, the collector
// destination is assumed to be the requester on the rpc channel.
repeated Collector collector_list = 1;
}
// Collector endpoints to send data specified as an ip+port combination.
message Collector {
// IP address of collector endpoint
string address = 1;
// Transport protocol port number for the collector destination.
uint32 port = 2;
}
// Data model path
message Path {
// Data model path of interest
// Path specification for elements of OpenConfig data models
string path = 1;
// Regular expression to be used in filtering state leaves
string filter = 2;
// If this is set to true, the target device will only send
// updates to the collector upon a change in data value
bool suppress_unchanged = 3;
// Maximum time in ms the target device may go without sending
// a message to the collector. If this time expires with
// suppress-unchanged set, the target device must send an update
// message regardless if the data values have changed.
uint32 max_silent_interval = 4;
// Time in ms between collection and transmission of the
// specified data to the collector platform. The target device
// will sample the corresponding data (e.g,. a counter) and
// immediately send to the collector destination.
//
// If sample-frequency is set to 0, then the network device
// must emit an update upon every datum change.
uint32 sample_frequency = 5;
// EOM needed for each walk cycle of this path?
// For periodic sensor, applicable for each complete reap
// For event sensor, applicable when initial dump is over
// (same as EOS)
// This feature is not implemented currently.
bool need_eom = 6;
}
// Configure subscription request additional features.
message SubscriptionAdditionalConfig {
// limit the number of records sent in the stream
int32 limit_records = 1;
// limit the time the stream remains open
int32 limit_time_seconds = 2;
// EOS needed for this subscription?
bool need_eos = 3;
}
// Reply to inline subscription for data at the specified path is done in
// two-folds.
// 1. Reply data message sent out using out-of-band channel.
// 2. Telemetry data send back on the same connection as the
// subscription request.
// 1. Reply data message sent out using out-of-band channel.
message SubscriptionReply {
// Response message to a telemetry subscription creation or
// get request.
SubscriptionResponse response = 1;
// List of data models paths and filters
// which are used in a telemetry operation.
repeated Path path_list = 2;
}
// Response message to a telemetry subscription creation or get request.
message SubscriptionResponse {
// Unique id for the subscription on the device. This is
// generated by the device and returned in a subscription
// request or when listing existing subscriptions
uint32 subscription_id = 1;
}
// 2. Telemetry data send back on the same connection as the
// subscription request.
message OpenConfigData {
// router name:export IP address
string system_id = 1;
// line card / RE (slot number)
uint32 component_id = 2;
// PFE (if applicable)
uint32 sub_component_id = 3;
// Path specification for elements of OpenConfig data models
string path = 4;
// Sequence number, monotonically increasing for each
// system_id, component_id, sub_component_id + path.
uint64 sequence_number = 5;
// timestamp (milliseconds since epoch)
uint64 timestamp = 6;
// List of key-value pairs
repeated KeyValue kv = 7;
// For delete. If filled, it indicates delete
repeated Delete delete = 8;
// If filled, it indicates end of marker for the
// respective path in the list.
repeated Eom eom = 9;
// If filled, it indicates end of sync for complete subscription
bool sync_response = 10;
}
// Simple Key-value, where value could be one of scalar types
message KeyValue {
// Key
string key = 1;
// One of possible values
oneof value {
double double_value = 5;
int64 int_value = 6;
uint64 uint_value = 7;
sint64 sint_value = 8;
bool bool_value = 9;
string str_value = 10;
bytes bytes_value = 11;
}
}
// Message indicating delete for a particular path
message Delete {
string path = 1;
}
// Message indicating EOM for a particular path
message Eom {
string path = 1;
}
// Message sent for a telemetry subscription cancellation request
message CancelSubscriptionRequest {
// Subscription identifier as returned by the device when
// subscription was requested
uint32 subscription_id = 1;
}
// Reply to telemetry subscription cancellation request
message CancelSubscriptionReply {
// Return code
ReturnCode code = 1;
// Return code string
string code_str = 2;
};
// Result of the operation
enum ReturnCode {
SUCCESS = 0;
NO_SUBSCRIPTION_ENTRY = 1;
UNKNOWN_ERROR = 2;
}
// Message sent for a telemetry get request
message GetSubscriptionsRequest {
// Subscription identifier as returned by the device when
// subscription was requested
// --- or ---
// 0xFFFFFFFF for all subscription identifiers
uint32 subscription_id = 1;
}
// Reply to telemetry subscription get request
message GetSubscriptionsReply {
// List of current telemetry subscriptions
repeated SubscriptionReply subscription_list = 1;
}
// Message sent for telemetry agent operational states request
message GetOperationalStateRequest {
// Per-subscription_id level operational state can be requested.
//
// Subscription identifier as returned by the device when
// subscription was requested
// --- or ---
// 0xFFFFFFFF for all subscription identifiers including agent-level
// operational stats
// --- or ---
// If subscription_id is not present then sent only agent-level
// operational stats
uint32 subscription_id = 1;
// Control verbosity of the output
VerbosityLevel verbosity = 2;
}
// Verbosity Level
enum VerbosityLevel {
DETAIL = 0;
TERSE = 1;
BRIEF = 2;
}
// Reply to telemetry agent operational states request
message GetOperationalStateReply {
// List of key-value pairs where
// key = operational state definition
// value = operational state value
repeated KeyValue kv = 1;
}
// Message sent for a data encoding request
message DataEncodingRequest {
}
// Reply to data encodings supported request
message DataEncodingReply {
repeated EncodingType encoding_list = 1;
}
// Encoding Type Supported
enum EncodingType {
UNDEFINED = 0;
XML = 1;
JSON_IETF = 2;
PROTO3 = 3;
}

View File

@@ -0,0 +1,422 @@
package jti_openconfig_telemetry
import (
"fmt"
"log"
"net"
"regexp"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/auth"
"github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/oc"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/status"
)
type OpenConfigTelemetry struct {
Servers []string
Sensors []string
Username string
Password string
ClientID string `toml:"client_id"`
SampleFrequency internal.Duration `toml:"sample_frequency"`
SSLCert string `toml:"ssl_cert"`
StrAsTags bool `toml:"str_as_tags"`
RetryDelay internal.Duration `toml:"retry_delay"`
sensorsConfig []sensorConfig
grpcClientConns []*grpc.ClientConn
wg *sync.WaitGroup
}
var (
// Regex to match and extract data points from path value in received key
keyPathRegex = regexp.MustCompile("\\/([^\\/]*)\\[([A-Za-z0-9\\-\\/]*\\=[^\\[]*)\\]")
sampleConfig = `
## List of device addresses to collect telemetry from
servers = ["localhost:1883"]
## Authentication details. Username and password are must if device expects
## authentication. Client ID must be unique when connecting from multiple instances
## of telegraf to the same device
username = "user"
password = "pass"
client_id = "telegraf"
## Frequency to get data
sample_frequency = "1000ms"
## Sensors to subscribe for
## A identifier for each sensor can be provided in path by separating with space
## Else sensor path will be used as identifier
## When identifier is used, we can provide a list of space separated sensors.
## A single subscription will be created with all these sensors and data will
## be saved to measurement with this identifier name
sensors = [
"/interfaces/",
"collection /components/ /lldp",
]
## We allow specifying sensor group level reporting rate. To do this, specify the
## reporting rate in Duration at the beginning of sensor paths / collection
## name. For entries without reporting rate, we use configured sample frequency
sensors = [
"1000ms customReporting /interfaces /lldp",
"2000ms collection /components",
"/interfaces",
]
## x509 Certificate to use with TLS connection. If it is not provided, an insecure
## channel will be opened with server
ssl_cert = "/etc/telegraf/cert.pem"
## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms.
## Failed streams/calls will not be retried if 0 is provided
retry_delay = "1000ms"
## To treat all string values as tags, set this to true
str_as_tags = false
`
)
func (m *OpenConfigTelemetry) SampleConfig() string {
return sampleConfig
}
func (m *OpenConfigTelemetry) Description() string {
return "Read JTI OpenConfig Telemetry from listed sensors"
}
func (m *OpenConfigTelemetry) Gather(acc telegraf.Accumulator) error {
return nil
}
func (m *OpenConfigTelemetry) Stop() {
for _, grpcClientConn := range m.grpcClientConns {
grpcClientConn.Close()
}
m.wg.Wait()
}
// Takes in XML path with predicates and returns list of tags+values along with a final
// XML path without predicates. If /events/event[id=2]/attributes[key='message']/value
// is given input, this function will emit /events/event/attributes/value as xmlpath and
// { /events/event/@id=2, /events/event/attributes/@key='message' } as tags
func spitTagsNPath(xmlpath string) (string, map[string]string) {
subs := keyPathRegex.FindAllStringSubmatch(xmlpath, -1)
tags := make(map[string]string)
// Given XML path, this will spit out final path without predicates
if len(subs) > 0 {
for _, sub := range subs {
tagKey := strings.Split(xmlpath, sub[0])[0] + "/" + strings.TrimSpace(sub[1]) + "/@"
// If we have multiple keys in give path like /events/event[id=2 and type=3]/,
// we must emit multiple tags
for _, kv := range strings.Split(sub[2], " and ") {
key := tagKey + strings.TrimSpace(strings.Split(kv, "=")[0])
tagValue := strings.Replace(strings.Split(kv, "=")[1], "'", "", -1)
tags[key] = tagValue
}
xmlpath = strings.Replace(xmlpath, sub[0], "/"+strings.TrimSpace(sub[1]), 1)
}
}
return xmlpath, tags
}
// Takes in a OC response, extracts tag information from keys and returns a
// list of groups with unique sets of tags+values
func (m *OpenConfigTelemetry) extractData(r *telemetry.OpenConfigData, grpcServer string) []DataGroup {
// Use empty prefix. We will update this when we iterate over key-value pairs
prefix := ""
dgroups := []DataGroup{}
for _, v := range r.Kv {
kv := make(map[string]interface{})
if v.Key == "__prefix__" {
prefix = v.GetStrValue()
continue
}
// Also, lets use prefix if there is one
xmlpath, finaltags := spitTagsNPath(prefix + v.Key)
finaltags["device"] = grpcServer
switch v.Value.(type) {
case *telemetry.KeyValue_StrValue:
// If StrAsTags is set, we treat all string values as tags
if m.StrAsTags {
finaltags[xmlpath] = v.GetStrValue()
} else {
kv[xmlpath] = v.GetStrValue()
}
break
case *telemetry.KeyValue_DoubleValue:
kv[xmlpath] = v.GetDoubleValue()
break
case *telemetry.KeyValue_IntValue:
kv[xmlpath] = v.GetIntValue()
break
case *telemetry.KeyValue_UintValue:
kv[xmlpath] = v.GetUintValue()
break
case *telemetry.KeyValue_SintValue:
kv[xmlpath] = v.GetSintValue()
break
case *telemetry.KeyValue_BoolValue:
kv[xmlpath] = v.GetBoolValue()
break
case *telemetry.KeyValue_BytesValue:
kv[xmlpath] = v.GetBytesValue()
break
}
// Insert other tags from message
finaltags["system_id"] = r.SystemId
finaltags["path"] = r.Path
// Insert derived key and value
dgroups = CollectionByKeys(dgroups).Insert(finaltags, kv)
// Insert data from message header
dgroups = CollectionByKeys(dgroups).Insert(finaltags,
map[string]interface{}{"_sequence": r.SequenceNumber})
dgroups = CollectionByKeys(dgroups).Insert(finaltags,
map[string]interface{}{"_timestamp": r.Timestamp})
dgroups = CollectionByKeys(dgroups).Insert(finaltags,
map[string]interface{}{"_component_id": r.ComponentId})
dgroups = CollectionByKeys(dgroups).Insert(finaltags,
map[string]interface{}{"_subcomponent_id": r.SubComponentId})
}
return dgroups
}
// Structure to hold sensors path list and measurement name
type sensorConfig struct {
measurementName string
pathList []*telemetry.Path
}
// Takes in sensor configuration and converts it into slice of sensorConfig objects
func (m *OpenConfigTelemetry) splitSensorConfig() int {
var pathlist []*telemetry.Path
var measurementName string
var reportingRate uint32
m.sensorsConfig = make([]sensorConfig, 0)
for _, sensor := range m.Sensors {
spathSplit := strings.Fields(sensor)
reportingRate = uint32(m.SampleFrequency.Duration / time.Millisecond)
// Extract measurement name and custom reporting rate if specified. Custom
// reporting rate will be specified at the beginning of sensor list,
// followed by measurement name like "1000ms interfaces /interfaces"
// where 1000ms is the custom reporting rate and interfaces is the
// measurement name. If 1000ms is not given, we use global reporting rate
// from sample_frequency. if measurement name is not given, we use first
// sensor name as the measurement name. If first or the word after custom
// reporting rate doesn't start with /, we treat it as measurement name
// and exclude it from list of sensors to subscribe
duration, err := time.ParseDuration(spathSplit[0])
if err == nil {
reportingRate = uint32(duration / time.Millisecond)
spathSplit = spathSplit[1:]
}
if len(spathSplit) == 0 {
log.Printf("E! No sensors are specified")
continue
}
// Word after custom reporting rate is treated as measurement name
measurementName = spathSplit[0]
// If our word after custom reporting rate doesn't start with /, we treat
// it as measurement name. Else we treat it as sensor
if !strings.HasPrefix(measurementName, "/") {
spathSplit = spathSplit[1:]
}
if len(spathSplit) == 0 {
log.Printf("E! No valid sensors are specified")
continue
}
// Iterate over our sensors and create pathlist to subscribe
pathlist = make([]*telemetry.Path, 0)
for _, path := range spathSplit {
pathlist = append(pathlist, &telemetry.Path{Path: path,
SampleFrequency: reportingRate})
}
m.sensorsConfig = append(m.sensorsConfig, sensorConfig{
measurementName: measurementName, pathList: pathlist,
})
}
return len(m.sensorsConfig)
}
// Subscribes and collects OpenConfig telemetry data from given server
func (m *OpenConfigTelemetry) collectData(ctx context.Context,
grpcServer string, grpcClientConn *grpc.ClientConn,
acc telegraf.Accumulator) error {
c := telemetry.NewOpenConfigTelemetryClient(grpcClientConn)
for _, sensor := range m.sensorsConfig {
m.wg.Add(1)
go func(ctx context.Context, sensor sensorConfig) {
defer m.wg.Done()
for {
stream, err := c.TelemetrySubscribe(ctx,
&telemetry.SubscriptionRequest{PathList: sensor.pathList})
if err != nil {
rpcStatus, _ := status.FromError(err)
// If service is currently unavailable and may come back later, retry
if rpcStatus.Code() != codes.Unavailable {
acc.AddError(fmt.Errorf("E! Could not subscribe to %s: %v", grpcServer,
err))
return
} else {
// Retry with delay. If delay is not provided, use default
if m.RetryDelay.Duration > 0 {
log.Printf("D! Retrying %s with timeout %v", grpcServer,
m.RetryDelay.Duration)
time.Sleep(m.RetryDelay.Duration)
continue
} else {
return
}
}
}
for {
r, err := stream.Recv()
if err != nil {
// If we encounter error in the stream, break so we can retry
// the connection
acc.AddError(fmt.Errorf("E! Failed to read from %s: %v", err, grpcServer))
break
}
log.Printf("D! Received from %s: %v", grpcServer, r)
// Create a point and add to batch
tags := make(map[string]string)
// Insert additional tags
tags["device"] = grpcServer
dgroups := m.extractData(r, grpcServer)
// Print final data collection
log.Printf("D! Available collection for %s is: %v", grpcServer, dgroups)
tnow := time.Now()
// Iterate through data groups and add them
for _, group := range dgroups {
if len(group.tags) == 0 {
acc.AddFields(sensor.measurementName, group.data, tags, tnow)
} else {
acc.AddFields(sensor.measurementName, group.data, group.tags, tnow)
}
}
}
}
}(ctx, sensor)
}
return nil
}
func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error {
// Build sensors config
if m.splitSensorConfig() == 0 {
return fmt.Errorf("E! No valid sensor configuration available")
}
// If SSL certificate is provided, use transport credentials
var err error
var transportCredentials credentials.TransportCredentials
if m.SSLCert != "" {
transportCredentials, err = credentials.NewClientTLSFromFile(m.SSLCert, "")
if err != nil {
return fmt.Errorf("E! Failed to read certificate: %v", err)
}
} else {
transportCredentials = nil
}
// Connect to given list of servers and start collecting data
var grpcClientConn *grpc.ClientConn
var wg sync.WaitGroup
ctx := context.Background()
m.wg = &wg
for _, server := range m.Servers {
// Extract device address and port
grpcServer, grpcPort, err := net.SplitHostPort(server)
if err != nil {
log.Printf("E! Invalid server address: %v", err)
continue
}
// If a certificate is provided, open a secure channel. Else open insecure one
if transportCredentials != nil {
grpcClientConn, err = grpc.Dial(server, grpc.WithTransportCredentials(transportCredentials))
} else {
grpcClientConn, err = grpc.Dial(server, grpc.WithInsecure())
}
if err != nil {
log.Printf("E! Failed to connect to %s: %v", server, err)
} else {
log.Printf("D! Opened a new gRPC session to %s on port %s", grpcServer, grpcPort)
}
// Add to the list of client connections
m.grpcClientConns = append(m.grpcClientConns, grpcClientConn)
if m.Username != "" && m.Password != "" && m.ClientID != "" {
lc := authentication.NewLoginClient(grpcClientConn)
loginReply, loginErr := lc.LoginCheck(ctx,
&authentication.LoginRequest{UserName: m.Username,
Password: m.Password, ClientId: m.ClientID})
if loginErr != nil {
log.Printf("E! Could not initiate login check for %s: %v", server, err)
continue
}
// Check if the user is authenticated. Bail if auth error
if !loginReply.Result {
log.Printf("E! Failed to authenticate the user for %s", server)
continue
}
}
// Subscribe and gather telemetry data
m.collectData(ctx, grpcServer, grpcClientConn, acc)
}
return nil
}
func init() {
inputs.Add("jti_openconfig_telemetry", func() telegraf.Input {
return &OpenConfigTelemetry{
RetryDelay: internal.Duration{Duration: time.Second},
StrAsTags: false,
}
})
}

View File

@@ -0,0 +1,225 @@
package jti_openconfig_telemetry
import (
"log"
"net"
"os"
"testing"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/oc"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
var cfg = &OpenConfigTelemetry{
Servers: []string{"127.0.0.1:50051"},
SampleFrequency: internal.Duration{Duration: time.Second * 2},
}
var data = &telemetry.OpenConfigData{
Path: "/sensor",
Kv: []*telemetry.KeyValue{{Key: "/sensor[tag='tagValue']/intKey", Value: &telemetry.KeyValue_IntValue{IntValue: 10}}},
}
var data_with_prefix = &telemetry.OpenConfigData{
Path: "/sensor_with_prefix",
Kv: []*telemetry.KeyValue{{Key: "__prefix__", Value: &telemetry.KeyValue_StrValue{StrValue: "/sensor/prefix/"}},
{Key: "intKey", Value: &telemetry.KeyValue_IntValue{IntValue: 10}}},
}
var data_with_multiple_tags = &telemetry.OpenConfigData{
Path: "/sensor_with_multiple_tags",
Kv: []*telemetry.KeyValue{{Key: "__prefix__", Value: &telemetry.KeyValue_StrValue{StrValue: "/sensor/prefix/"}},
{Key: "tagKey[tag='tagValue']/boolKey", Value: &telemetry.KeyValue_BoolValue{BoolValue: false}},
{Key: "intKey", Value: &telemetry.KeyValue_IntValue{IntValue: 10}}},
}
var data_with_string_values = &telemetry.OpenConfigData{
Path: "/sensor_with_string_values",
Kv: []*telemetry.KeyValue{{Key: "__prefix__", Value: &telemetry.KeyValue_StrValue{StrValue: "/sensor/prefix/"}},
{Key: "strKey[tag='tagValue']/strValue", Value: &telemetry.KeyValue_StrValue{StrValue: "10"}}},
}
type openConfigTelemetryServer struct {
}
func (s *openConfigTelemetryServer) TelemetrySubscribe(req *telemetry.SubscriptionRequest, stream telemetry.OpenConfigTelemetry_TelemetrySubscribeServer) error {
path := req.PathList[0].Path
if path == "/sensor" {
stream.Send(data)
} else if path == "/sensor_with_prefix" {
stream.Send(data_with_prefix)
} else if path == "/sensor_with_multiple_tags" {
stream.Send(data_with_multiple_tags)
} else if path == "/sensor_with_string_values" {
stream.Send(data_with_string_values)
}
return nil
}
func (s *openConfigTelemetryServer) CancelTelemetrySubscription(ctx context.Context, req *telemetry.CancelSubscriptionRequest) (*telemetry.CancelSubscriptionReply, error) {
return nil, nil
}
func (s *openConfigTelemetryServer) GetTelemetrySubscriptions(ctx context.Context, req *telemetry.GetSubscriptionsRequest) (*telemetry.GetSubscriptionsReply, error) {
return nil, nil
}
func (s *openConfigTelemetryServer) GetTelemetryOperationalState(ctx context.Context, req *telemetry.GetOperationalStateRequest) (*telemetry.GetOperationalStateReply, error) {
return nil, nil
}
func (s *openConfigTelemetryServer) GetDataEncodings(ctx context.Context, req *telemetry.DataEncodingRequest) (*telemetry.DataEncodingReply, error) {
return nil, nil
}
func newServer() *openConfigTelemetryServer {
s := new(openConfigTelemetryServer)
return s
}
func TestOpenConfigTelemetryData(t *testing.T) {
var acc testutil.Accumulator
cfg.Sensors = []string{"/sensor"}
err := cfg.Start(&acc)
require.NoError(t, err)
tags := map[string]string{
"device": "127.0.0.1",
"/sensor/@tag": "tagValue",
"system_id": "",
"path": "/sensor",
}
fields := map[string]interface{}{
"/sensor/intKey": int64(10),
"_sequence": uint64(0),
"_timestamp": uint64(0),
"_component_id": uint32(0),
"_subcomponent_id": uint32(0),
}
// Give sometime for gRPC channel to be established
time.Sleep(2 * time.Second)
acc.AssertContainsTaggedFields(t, "/sensor", fields, tags)
}
func TestOpenConfigTelemetryDataWithPrefix(t *testing.T) {
var acc testutil.Accumulator
cfg.Sensors = []string{"/sensor_with_prefix"}
err := cfg.Start(&acc)
require.NoError(t, err)
tags := map[string]string{
"device": "127.0.0.1",
"system_id": "",
"path": "/sensor_with_prefix",
}
fields := map[string]interface{}{
"/sensor/prefix/intKey": int64(10),
"_sequence": uint64(0),
"_timestamp": uint64(0),
"_component_id": uint32(0),
"_subcomponent_id": uint32(0),
}
// Give sometime for gRPC channel to be established
time.Sleep(2 * time.Second)
acc.AssertContainsTaggedFields(t, "/sensor_with_prefix", fields, tags)
}
func TestOpenConfigTelemetryDataWithMultipleTags(t *testing.T) {
var acc testutil.Accumulator
cfg.Sensors = []string{"/sensor_with_multiple_tags"}
err := cfg.Start(&acc)
require.NoError(t, err)
tags1 := map[string]string{
"/sensor/prefix/tagKey/@tag": "tagValue",
"device": "127.0.0.1",
"system_id": "",
"path": "/sensor_with_multiple_tags",
}
fields1 := map[string]interface{}{
"/sensor/prefix/tagKey/boolKey": false,
"_sequence": uint64(0),
"_timestamp": uint64(0),
"_component_id": uint32(0),
"_subcomponent_id": uint32(0),
}
tags2 := map[string]string{
"device": "127.0.0.1",
"system_id": "",
"path": "/sensor_with_multiple_tags",
}
fields2 := map[string]interface{}{
"/sensor/prefix/intKey": int64(10),
"_sequence": uint64(0),
"_timestamp": uint64(0),
"_component_id": uint32(0),
"_subcomponent_id": uint32(0),
}
// Give sometime for gRPC channel to be established
time.Sleep(2 * time.Second)
acc.AssertContainsTaggedFields(t, "/sensor_with_multiple_tags", fields1, tags1)
acc.AssertContainsTaggedFields(t, "/sensor_with_multiple_tags", fields2, tags2)
}
func TestOpenConfigTelemetryDataWithStringValues(t *testing.T) {
var acc testutil.Accumulator
cfg.Sensors = []string{"/sensor_with_string_values"}
err := cfg.Start(&acc)
require.NoError(t, err)
tags := map[string]string{
"/sensor/prefix/strKey/@tag": "tagValue",
"device": "127.0.0.1",
"system_id": "",
"path": "/sensor_with_string_values",
}
fields := map[string]interface{}{
"/sensor/prefix/strKey/strValue": "10",
"_sequence": uint64(0),
"_timestamp": uint64(0),
"_component_id": uint32(0),
"_subcomponent_id": uint32(0),
}
// Give sometime for gRPC channel to be established
time.Sleep(2 * time.Second)
acc.AssertContainsTaggedFields(t, "/sensor_with_string_values", fields, tags)
}
func TestMain(m *testing.M) {
lis, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
log.Fatalf("Failed to listen: %v", err)
}
cfg.Servers = []string{lis.Addr().String()}
var opts []grpc.ServerOption
grpcServer := grpc.NewServer(opts...)
telemetry.RegisterOpenConfigTelemetryServer(grpcServer, newServer())
go func() {
grpcServer.Serve(lis)
}()
defer grpcServer.Stop()
os.Exit(m.Run())
}

View File

@@ -22,11 +22,11 @@ and use the old zookeeper connection method.
## Offset (must be either "oldest" or "newest")
offset = "oldest"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Optional SASL Config

Some files were not shown because too many files have changed in this diff Show More