Compare commits

..

1353 Commits

Author SHA1 Message Date
Max U 513ec70e25 nitpick 2018-06-26 15:30:49 -07:00
Max U cd2ca966e3 add unchanged makefile 2018-06-26 14:47:38 -07:00
Max U 46f9eb8dd9 hopefully fix weird commits 2018-06-26 14:39:49 -07:00
Max U 3deeea7a8a Merge branch 'plugin/reader' of github.com:influxdata/telegraf into plugin/reader 2018-06-26 14:19:38 -07:00
Max U 05b0153b0c Revert "more condensing"
This reverts commit 7fa27f400d.
2018-06-26 14:01:27 -07:00
Max U 5529d51df7 Merge branch 'master' of github.com:influxdata/telegraf into plugin/reader 2018-06-26 14:00:07 -07:00
Max U 5bcb5413a9 prevent merge conflict 2018-06-26 13:55:40 -07:00
Max U 7fa27f400d more condensing 2018-06-26 13:19:53 -07:00
Max U 885e78968f more condensing 2018-06-26 13:19:53 -07:00
Max U 001658af30 condense telegraf.conf 2018-06-26 13:18:43 -07:00
Max U 41c174dc29 condense telegraf.conf 2018-06-26 13:18:43 -07:00
Max U e450b266ec remove comments 2018-06-26 13:10:46 -07:00
Max U bacee88ac0 remove comments 2018-06-26 13:10:46 -07:00
Max U a931eb1c90 update DATA_FORMATS_INPUT.MD to include grok 2018-06-26 12:06:35 -07:00
Max U 64c8559e90 update DATA_FORMATS_INPUT.MD to include grok 2018-06-26 12:06:35 -07:00
Max U bf7220d2ce add test file to docker spin up 2018-06-26 11:53:40 -07:00
Max U 16119293e9 add test file to docker spin up 2018-06-26 11:53:40 -07:00
Max U bbd68b3820 docker will spin up 2018-06-26 11:26:43 -07:00
Max U 7845d4a466 docker will spin up 2018-06-26 11:26:43 -07:00
Max U 79d9ea4761 add docker-image spin up for reader 2018-06-26 10:26:48 -07:00
Max U 3d0647d6d9 add docker-image spin up for reader 2018-06-26 10:26:48 -07:00
Max U cc406299ba allow for import from plugins/all 2018-06-25 15:52:43 -07:00
Max U fb4a272a45 allow for import from plugins/all 2018-06-25 15:52:43 -07:00
Max U 9c845950a7 add grok as a top level parser, still need README 2018-06-25 15:32:27 -07:00
Max U e1e6a08f39 add grok as a top level parser, still need README 2018-06-25 15:32:27 -07:00
Max U f40371e361 add init function to reader 2018-06-25 10:15:32 -07:00
Max U 95edfcbf99 add init function to reader 2018-06-25 10:15:32 -07:00
Max U 36a23ea1ba Merge branch 'master' into plugin/reader 2018-06-25 09:54:49 -07:00
Max U 6101471ae1 Merge branch 'master' into plugin/reader 2018-06-25 09:54:49 -07:00
Max U 554b960339 add setparser to reader 2018-06-25 09:53:35 -07:00
Max U fdf1b9b351 add setparser to reader 2018-06-25 09:53:35 -07:00
Daniel Nelson 96e601f405 Document path tag in tail input 2018-06-21 18:02:34 -07:00
Daniel Nelson e7076e4032 Update changelog 2018-06-21 17:59:31 -07:00
JongHyok Lee b895147fdc Added path tag to tail input plugin (#4292) 2018-06-21 17:55:54 -07:00
Daniel Nelson dcaa94ab75 Run windows tests with -short 2018-06-21 17:46:58 -07:00
Max U 542c030dc8 knock more errors from test files 2018-06-21 16:23:06 -07:00
Max U 071b7b9731 knock more errors from test files 2018-06-21 16:23:06 -07:00
Max U 504d978446 clean up some test cases 2018-06-21 16:12:26 -07:00
Max U 563e8e8f54 clean up some test cases 2018-06-21 16:12:26 -07:00
Max U ec7f13111f add more test files 2018-06-21 16:06:36 -07:00
Max U 823ba0a7e0 add more test files 2018-06-21 16:06:36 -07:00
Patrick Hemmer cef6be8d7e Fix postfix input handling of multi-level queues (#4333) 2018-06-21 16:01:38 -07:00
Max U 4e24a1bbe3 add grok as a top level parser 2018-06-21 15:56:20 -07:00
Max U ab0bf5748b add grok as a top level parser 2018-06-21 15:56:20 -07:00
Daniel Nelson 797263369f Update changelog 2018-06-21 14:20:35 -07:00
Ayrdrie 40432353ef Add support for comma in logparser timestamp format (#4311) 2018-06-21 14:19:15 -07:00
Max U 9c4b52256d tweak metric output 2018-06-21 13:13:46 -07:00
Max U caad3b73cb tweak metric output 2018-06-21 13:13:46 -07:00
Max U 08a11d7bfd change config file 2018-06-21 11:44:02 -07:00
Max U 9901bd4ca8 change config file 2018-06-21 11:44:02 -07:00
Max U e12eced211 input plugin that reads files each interval 2018-06-21 11:26:14 -07:00
Max U 80fd876ed2 input plugin that reads files each interval 2018-06-21 11:26:14 -07:00
Greg f9de896427 Update vendoring to dep from gdm (#4314) 2018-06-19 11:55:38 -07:00
Daniel Nelson db78610ab8 Update changelog 2018-06-19 11:48:08 -07:00
maxunt 86e4cd0083 Add new measurement with results of pgrep lookup to procstat input (#4307) 2018-06-19 11:47:13 -07:00
Daniel Nelson 8b678b9041 Update changelog 2018-06-18 18:09:31 -07:00
Piotr Popieluch a059689378 Add valuecounter aggregator plugin (#3523) 2018-06-18 18:06:11 -07:00
Daniel Nelson 3626a522e5 Update changelog 2018-06-18 15:40:00 -07:00
Daniel Nelson e9d2955321 Update docker input documentation for container status 2018-06-18 15:38:21 -07:00
prashanthjbabu f74503087c Add container status tag to docker input (#4259) 2018-06-18 15:33:14 -07:00
Daniel Nelson cc63c25549 Drop CI support for Go 1.8 (#4309)
Go 1.8 is no longer a supported version and the circleci/golang images
has been removed.
2018-06-17 18:50:14 -07:00
Daniel Nelson 0b68ae09d7 Update changelog 2018-06-14 13:18:31 -07:00
maxunt a852184496 Fix selection of tags under nested objects in the JSON parser (#4284) 2018-06-14 13:17:32 -07:00
Daniel Nelson 97cf5894bf Update changelog 2018-06-13 13:50:43 -07:00
Arkady Emelyanov 2d9f9e3f2d Add owner tag on partitions in burrow input (#4281) 2018-06-13 13:05:27 -07:00
Daniel Nelson 7557ad465b Use linux/unix name only in `make install`
closes: #4278
2018-06-12 18:37:50 -07:00
Vlasta Hajek 92baea99a4 Fix grammar issues in win_perf_counters readme 2018-06-12 16:54:48 -07:00
Daniel Nelson 5ff70b47a0 Fix typo 2018-06-12 16:50:14 -07:00
Daniel Nelson 72c4227883 Update changelog 2018-06-12 16:46:27 -07:00
Daniel Nelson 6502ec8f14 Treat sigterm as a clean shutdown signal (#4277) 2018-06-12 16:44:04 -07:00
Daniel Nelson cfe32acfa3 Fix grammar in converter processor documentation 2018-06-12 16:12:08 -07:00
marcv81 da36658e82 Fixed typos in nvidia_smi plugin doc (#4261) 2018-06-12 14:28:56 -07:00
Daniel Nelson 0d3ce434ad Update changelog 2018-06-12 13:57:00 -07:00
Sambhav Kothari 35c200f78d Add support for solr 7 to the solr input (#4271) 2018-06-12 13:56:13 -07:00
Daniel Nelson 240c4a1baf Set 1.7.0 release date 2018-06-12 11:41:58 -07:00
Daniel Nelson 99acc68c25 Use nats-io/go-nats instead of nats-io/nats in tests 2018-06-11 16:13:59 -07:00
Daniel Nelson 93d0a9c544 Update changelog 2018-06-11 16:07:23 -07:00
marcv81 397a9b3fce Add power draw field to nvidia_smi plugin (#4262) 2018-06-11 16:06:26 -07:00
Daniel Nelson 33fdc2ef29 Use nats-io/go-nats instead of nats-io/nats 2018-06-11 15:24:45 -07:00
Daniel Nelson 7d10025d5d Update changelog 2018-06-11 14:55:12 -07:00
Pierre Tessier 5ea289dc9d Remove tags with empty values from Wavefront output (#4266) 2018-06-11 14:54:08 -07:00
Daniel Nelson b30832dbb1 Reword converter description 2018-06-11 14:43:28 -07:00
Daniel Nelson f4d16ae70d Update win_perf_counters README 2018-06-11 11:41:46 -07:00
Vlasta Hajek c03a144640 Add option to enable wildcard expansion (#4265)
This is needed because wildcard expansion causes counters to be localized.
2018-06-11 11:10:53 -07:00
Vlasta Hajek 3112ea1d8a Fix panic with unicode counter names in win_perf_counters (#4255) 2018-06-08 12:41:21 -07:00
Daniel Nelson 465305a143 Update go-syslog version
Fix go-syslog overquota errors since latest version no longer uses LFS.
2018-06-08 12:22:49 -07:00
Daniel Nelson ff6671f026 Update changelog 2018-06-07 12:38:17 -07:00
Daniel Nelson 0549d42bac Update tengine docs 2018-06-07 12:35:46 -07:00
Daniel Nelson 37a10db0bc Restore tengine input plugin (#4160)
This reverts commit 8826cdc423.
2018-06-07 12:35:02 -07:00
Daniel Nelson 661cdc4e81 Fix TLS and SSL config option parsing (#4247) 2018-06-06 18:29:59 -07:00
Daniel Nelson 312843cb0b Update changelog 2018-06-06 14:30:37 -07:00
Daniel Nelson 93269fa1c0 Use same flags for all bsd family ping varients (#4241) 2018-06-06 14:28:12 -07:00
Pierre Fersing a34d7b0754 Ignore more boring filesystems from disk plugin (#4244) 2018-06-06 13:44:26 -07:00
Daniel Nelson d4dbb9a2dd Update changelog 2018-06-05 17:14:29 -07:00
Leszek Charkiewicz 76547dbd4c Add SSL/TLS support to Redis input (#4236) 2018-06-05 17:12:30 -07:00
Piotr Popieluch 9a7b088839 Don't skip metrics during startup in aggregate phase (#4230) 2018-06-05 16:30:53 -07:00
Daniel Nelson b8b139678e Set 1.6.4 release date 2018-06-05 12:11:15 -07:00
Daniel Nelson 71ba89c39b Update master version to 1.8 2018-06-05 11:46:55 -07:00
Daniel Nelson e00d441056 Update sample config 2018-06-05 11:45:07 -07:00
Daniel Nelson 46edb6c96d Add go-syslog to dependencies licenses list 2018-06-05 11:40:03 -07:00
Daniel Nelson 943f51384e Update changelog 2018-06-04 18:35:47 -07:00
Daniel Nelson 230ee8a6f2 Revert "Update aerospike-client-go version to latest release (#4128)"
This reverts commit 1f29612918.
2018-06-04 18:23:51 -07:00
Daniel Nelson ac5baaf4f8 Update changelog 2018-06-04 18:13:53 -07:00
Daniel Nelson 910a464248 Fix misnamed option in varnish sample config 2018-06-04 18:06:59 -07:00
Daniel Nelson fe47a0ee63 Update changelog 2018-06-04 18:02:00 -07:00
Phil Preston ee800e9765 Add counter fields to pf input (#4216) 2018-06-04 18:01:14 -07:00
Daniel Nelson 1335b269d4 Remove test for empty metrics list from file output 2018-06-04 17:58:55 -07:00
Mathur 7cc34787c2 Update burrow README.md (#4231) 2018-06-04 10:51:57 -07:00
Daniel Nelson 835f7c8434 Use random name for test sockets to avoid intermittent failure 2018-06-03 20:19:39 -07:00
Daniel Nelson cf52e3a751 Fix incorrect option name in amqp sample configuration 2018-06-03 18:42:08 -07:00
Daniel Nelson ddebe1aff6 Add option to disconnect after a message limit is reached in amqp output 2018-06-03 18:35:59 -07:00
Daniel Nelson 5a38c152b6 Use list of brokers in amqp output and amqp_consumer 2018-06-03 18:35:59 -07:00
Daniel Nelson 5e267c941d Allow configuration of amqp exchange type, durability, and arguments 2018-06-03 18:35:59 -07:00
Dark 19ec77fb32 Change config to match toml parsing rule. (#4225) 2018-06-03 18:31:47 -07:00
Daniel Nelson a9eb31a80f Update changelog 2018-06-03 15:59:00 -07:00
Mike Gent 850eff5246 Add passive mode exchange declaration option to amqp consumer input (#3995) 2018-06-03 15:52:59 -07:00
Mike Gent 986186a440 Add static routing_key option to amqp output (#3994) 2018-06-03 15:52:00 -07:00
Daniel Nelson 414d7b9543 Update changelog 2018-06-01 10:51:23 -07:00
Thanabodee Charoenpiriyakij aaae6fe324 Handle uint64 on cloudwatch output (#4219) 2018-06-01 10:47:40 -07:00
Daniel Nelson 12fffdd4aa Update changelog 2018-05-31 11:58:16 -07:00
Piotr Popieluch 2f9f5adc78 Print the enabled aggregator and processor plugins on startup (#4212) 2018-05-31 11:56:49 -07:00
Patrick Hemmer 0fd9c1b0ce Fix snmp overriding of auto-configured table fields (#4208)
Whenever the snmp plugin was configured with a table with automatic field
discovery, if one of those fields was explicitly overridden in the config and
the value of is_tag was changed, the field would be duplicated, once as a tag
& once as a field.

This change fixes the behavior so that if a field is explicitly configured,
automatic table field discovery doesn't touch it.
2018-05-29 19:03:37 -07:00
Daniel Nelson bd95f42e14 Log if connection is closed on write error 2018-05-29 16:10:27 -07:00
Vlasta Hajek 121c72ee04 Fix struct alignment in win_perf_counters for 32-bit 386 arch (#4206) 2018-05-29 11:34:00 -07:00
Daniel Nelson 8b92ed8663 Update syslog docs and add to changelog and readme 2018-05-25 14:33:57 -07:00
Leonardo Di Donato 54c59f8688 Add syslog input plugin (#4181) 2018-05-25 11:40:12 -07:00
Daniel Nelson 70519e9a3a Update changelog 2018-05-24 18:34:08 -07:00
Vlasta Hajek a4f511a64a Fix wildcard and other issues with win_perf_counters (#4189) 2018-05-24 18:25:06 -07:00
Daniel Nelson d18aa450d8 Add jolokia2 example configs to list in readme 2018-05-24 12:02:20 -07:00
Daniel Nelson d9dc4f8fba Update changelog 2018-05-24 11:59:15 -07:00
Pierre Tessier c6d4bb1e8d Add additional examples for jolokia2 (#4191) 2018-05-24 11:58:43 -07:00
Daniel Nelson cdf6bcc72a Add special syslog timestamp parser that uses current year (#4190)
Previously it was impossible to parse syslog timestamps without the date
being reported as year 0, due to the year not being specified
2018-05-23 16:37:14 -07:00
Daniel Nelson 1a73ff3aa6 Update changelog 2018-05-23 14:30:55 -07:00
Daniel Nelson 94715c5ae9 Add converter processor (#4178) 2018-05-23 14:29:57 -07:00
Daniel Nelson 2d0028ce1c Update changelog 2018-05-23 14:28:59 -07:00
Daniel Nelson f7e0860771 Add support for TLS and username/password auth to aerospike input (#4183) 2018-05-23 14:28:17 -07:00
Daniel Nelson ebb97c3ec0 Update changelog 2018-05-23 12:26:17 -07:00
Daniel Nelson 893bc2790a Update unbound README 2018-05-23 12:22:25 -07:00
Rodrigo Pereira eb31ff3101 Add option to unbound module to use threads as tags (#3969) 2018-05-23 12:03:49 -07:00
Daniel Nelson adf9568556 Revert "Add tengine input plugin (#4160)"
This reverts commit 697d8ceae5.
2018-05-23 11:58:22 -07:00
arterforyou 51d2a3ea39 Add tengine input plugin (#4160) 2018-05-23 11:19:50 -07:00
Daniel Nelson cecf9393b4 Update changelog 2018-05-22 14:53:21 -07:00
Arkady Emelyanov 2f9b180bc2 Add burrow input plugin (#3489) 2018-05-22 14:10:41 -07:00
Daniel Nelson 9da088e23c Update changelog 2018-05-22 14:00:52 -07:00
Daniel Nelson 3924e6505a Add timeout option to sensors input (#4162) 2018-05-22 13:59:59 -07:00
Daniel Nelson e32e39cf92 Update changelog 2018-05-21 16:40:30 -07:00
Daniel Nelson 3c79b77afc Update graphite output dataf format docs 2018-05-21 16:39:33 -07:00
Daniel Nelson 301c77605b Expose graphite_tag_support option in graphite output data format 2018-05-21 16:39:15 -07:00
Pavel Boev a0c086300f Add support for Graphite 1.1.x tags (#4165) 2018-05-21 15:59:56 -07:00
Daniel Nelson 5ec3229149 Add regex processor to readme and changelog 2018-05-21 15:48:22 -07:00
Alexander Shepelin 9294e82009 Add regex processor plugin (#3839) 2018-05-21 15:46:10 -07:00
Daniel Nelson 1148fd3563 Set release date for 1.6.3 2018-05-21 12:43:52 -07:00
Daniel Nelson daede2bf08 Add aurora input to changelog and readme 2018-05-21 12:01:58 -07:00
Daniel Nelson 636a4aadf6 Add aurora input plugin (#4158) 2018-05-21 11:59:39 -07:00
Daniel Nelson dbc9d03de1 Update changelog 2018-05-21 10:43:57 -07:00
Arkady Emelyanov 95c5ba6e49 Fix waitgroup deadlock if url is incorrect in apache input (#4176) 2018-05-21 10:43:02 -07:00
Daniel Nelson 3f89f35f3f Update mqtt output docs and changelog 2018-05-18 19:03:00 -07:00
jvrahav cbb4dacbc3 Add batch mode to mqtt output (#4094) 2018-05-18 18:55:02 -07:00
Daniel Nelson 680173cc4e Update changelog 2018-05-18 18:52:32 -07:00
Feliksas The Lion ca6d5a6202 Added 3 important elasticsearch cluster health metrics (#4167) 2018-05-18 18:49:23 -07:00
Daniel Nelson 3cd21c8359 Use -parallel=false in gdm to avoid issues on appveyor 2018-05-17 15:19:17 -07:00
Daniel Nelson 791aa07cd8 Return to using latest image file on appveyor 2018-05-17 14:38:21 -07:00
Daniel Nelson 568c0b6b89 Update changelog 2018-05-17 14:25:35 -07:00
Leszek Charkiewicz cefcd2f81c Add consul service tags to metric (#4155) 2018-05-17 14:24:51 -07:00
Daniel Nelson d4e9892539 Update changelog and docs for application_insights plugin 2018-05-15 16:42:56 -07:00
Karol Zadora-Przylecki a018916bf0 Add Microsoft Application Insights output plugin (#4010) 2018-05-15 16:05:59 -07:00
Daniel Nelson 6c6ab3ba27 Update changelog 2018-05-15 15:55:38 -07:00
Daniel Nelson 93d17ec95e Fix librato output support for uint and bool (#4151) 2018-05-15 15:54:20 -07:00
Daniel Nelson bb6b7aa4af Add http output to changelog/readme 2018-05-14 17:19:49 -07:00
Daniel Nelson 797c62c790 Add method, basic auth, and tls support to http output 2018-05-14 17:18:07 -07:00
Dark ca11b6328e Add HTTP output plugin (#2491) 2018-05-14 17:15:40 -07:00
Daniel Nelson 89cc8d2304 Update changelog 2018-05-14 11:01:24 -07:00
Daniel Nelson 2c29f8f84a Fix dropwizard parsing error for metrics that need escaped (#4142)
If the dropwizard parser cannot convert the metric name into a valid
line protocol series then we will accept the name as is.
2018-05-14 11:00:03 -07:00
Daniel Nelson b13b8a04cf Update sample config 2018-05-11 18:18:53 -07:00
Daniel Nelson 890c0e708c Add jti_openconfig_telemetry to docs 2018-05-11 18:16:52 -07:00
Ajay Kumar Chintala 94568030e4 Add service input plugin for OpenConfig streaming telemetry (#2292) 2018-05-11 17:58:19 -07:00
Daniel Nelson c127ba7fa3 Update changelog 2018-05-11 17:50:46 -07:00
Daniel Nelson b703215bad Reuse transport on next interval in jolokia agent (#4137) 2018-05-11 17:48:27 -07:00
Daniel Nelson 59a2f2e7f4 Use internal.Duration for jolokia timeouts (#4136) 2018-05-11 17:47:38 -07:00
Daniel Nelson c49d07c1ec Update changelog 2018-05-09 11:56:59 -07:00
Oleksandr Vilchynskyy 49d48016c4 Update aerospike-client-go version to latest release (#4128) 2018-05-09 11:54:00 -07:00
Daniel Nelson 999710c225 Update changelog 2018-05-08 16:40:42 -07:00
Daniel Nelson 80df42d87b Merge branch 'update_net_response' 2018-05-08 16:17:56 -07:00
Daniel Nelson 078eb47265 Use result and result_code in net_response 2018-05-08 16:17:22 -07:00
Randy Coburn 54a0532604 Add tag/integer pair for result to net_response (#3455) 2018-05-08 16:07:15 -07:00
Daniel Nelson 1d9488bd42 Skip fields that report "not supported" in nvidia-smi (#4123) 2018-05-08 13:11:12 -07:00
Daniel Nelson 581ec4d192 Update changelog 2018-05-08 12:12:03 -07:00
Daniel Nelson 532fd16c68 Add uint/bool support to cratedb output (#4117) 2018-05-08 12:10:25 -07:00
Daniel Nelson b581805629 Add instructions on how to repair windows performance counters 2018-05-07 18:41:05 -07:00
Daniel Nelson 4c362f9b6c Update changelog 2018-05-07 18:19:55 -07:00
Daniel Nelson 734221ccea Don't report 0ms on timeout in dns_query (#4118) 2018-05-07 18:18:01 -07:00
Daniel Nelson 3f5892d91b Run apt-get update in release.sh 2018-05-07 15:12:01 -07:00
Daniel Nelson 4b53fee5ac Update changelog 2018-05-07 15:01:40 -07:00
Jake Champlin d56db64dc6 Add cursor metrics to mongodb input (#4114) 2018-05-07 15:00:24 -07:00
Daniel Nelson 4bfc955f79 Remove combined issue template 2018-05-07 11:43:23 -07:00
Daniel Nelson 2ac89e728b Update issue templates (#4116) 2018-05-07 11:38:09 -07:00
Daniel Nelson d04bac3a79 Update changelog 2018-05-04 18:42:36 -07:00
Germán Jaber 68743825b8 Add topk processor plugin (#4096) 2018-05-04 18:40:05 -07:00
Daniel Nelson dfa23e7a76 Update changelog 2018-05-04 18:31:45 -07:00
Daniel Nelson 0ede70a2bd Add SerializeBatch method to the Serializer interface (#4107) 2018-05-04 18:27:31 -07:00
Daniel Nelson 55b4fcb40d Simplify testing with TLS (#4095) 2018-05-04 16:33:23 -07:00
Daniel Nelson 6e10a4ea88 Update kafka readme 2018-05-04 14:39:31 -07:00
Daniel Nelson 5494b9a65a Only lowercase mysql slave metrics with metric_version = 2 2018-05-04 14:31:16 -07:00
Nicolas Steinmetz 7e749f869e Fix name_override example in mysql rreadme (#4100) 2018-05-04 14:20:34 -07:00
Mauro Murari cb8d5cc265 Fix platform not supported error in build.py (#4102) 2018-05-04 14:18:59 -07:00
Daniel Nelson 0527474bf6 Move usage string to internal to fix `go run` 2018-05-04 14:16:21 -07:00
Daniel Nelson 8ecf81378a Remove -i flag from `make telegraf` 2018-05-04 14:08:23 -07:00
Daniel Nelson 030eb95b71 Fix grammar 2018-05-03 17:26:01 -07:00
Daniel Nelson e17a7378c2 Clarify max_retry option in kafka output 2018-05-03 17:22:49 -07:00
Daniel Nelson 7302ab2f14 Update gopsutil version 2018-05-03 12:32:53 -07:00
Daniel Nelson 1d910529c0 Update changelog 2018-05-03 11:41:18 -07:00
Daniel Meiners 7ba8ac7645 Ignore UTF8 BOM in JSON parser (#4099) 2018-05-03 11:40:28 -07:00
Daniel Nelson a5586e48e2 Update telegraf.conf 2018-05-02 11:50:11 -07:00
Daniel Nelson 11c11da831 Remove dead link from logparser sample config 2018-05-02 11:49:51 -07:00
Daniel Nelson 0976aee411 Update changelog 2018-05-01 18:57:26 -07:00
Daniel Nelson 55818e791d Fix handling of uint64 in datadog output (#4091) 2018-05-01 18:56:39 -07:00
Daniel Nelson 916e9ab815 Update changelog, add mcrouter to README 2018-05-01 12:01:08 -07:00
Craig Thayer 9803d6291b Add input plugin for McRouter (#4077) 2018-05-01 11:58:15 -07:00
Daniel Nelson cb0472c4d3 Update changelog 2018-04-30 19:21:12 -07:00
Mariusz Brzeski a19eaf0b06 Support busybox ping in the ping input (#3877) 2018-04-30 19:20:13 -07:00
Daniel Nelson fc0bba511d Update changelog 2018-04-30 17:51:04 -07:00
Daniel Nelson 95d2857ab8 Fix win_perf_counters to collect counters per instance (#4036) 2018-04-30 17:48:45 -07:00
Daniel Nelson 54f9e9e133 Document one field per line requirement in logparser 2018-04-30 16:15:51 -07:00
Grégoire Bellon-Gervais d1982dc72f Metrics values have same names as old cassandra plugin (#4080) 2018-04-27 15:12:59 -07:00
Daniel Nelson cfeefa44a2 Update changelog 2018-04-27 14:56:31 -07:00
Vincent Caron 3d979493ad Use same timestamp for fields in system input (#4078) 2018-04-27 14:55:10 -07:00
Daniel Nelson 95a8291722 Update changelog 2018-04-25 19:02:00 -07:00
Adrián López aceba08d0f Add parameter to force the interval of gather for sysstat (#4068) 2018-04-25 18:59:42 -07:00
Daniel Nelson 65f1a3a0e3 Note options that only work with influxdb HTTP 2018-04-25 13:47:16 -07:00
Daniel Nelson 4ff3792a8c Update changelog 2018-04-25 13:47:16 -07:00
Jack Zampolin 2c4d6a867f Fix timeout parsing error in nvidia_smi (#4070) 2018-04-24 14:40:19 -07:00
Yosuke Hara 7f1e4c847f Add support for LeoFS v1.4 to leofs input (#4044) 2018-04-24 14:14:31 -07:00
Daniel Nelson 518f8dcef3 Fix nightly build 2018-04-24 13:42:42 -07:00
Daniel Nelson 116d479975 Fix links to jolokia example configs 2018-04-24 12:46:40 -07:00
Daniel Nelson 07590b69cd Update changelog 2018-04-23 15:15:08 -07:00
Daniel Nelson 114800a768 Add docker input server version (#4035) 2018-04-23 15:09:04 -07:00
Daniel Nelson c277161858 Ignore writer error in file output (#4055) 2018-04-23 15:08:04 -07:00
Daniel Nelson 96120a6fc5 Deprecate the cassandra input plugin (#4050) 2018-04-23 15:06:26 -07:00
Daniel Nelson f8063113ce Update changelog 2018-04-23 14:01:38 -07:00
Daniel Nelson b9c5e88c22 Fix handling of floats with multiple leading zeroes (#4065) 2018-04-23 13:29:49 -07:00
Daniel Nelson 0bedc11d2d Return errors in mongodb SSL/TLS configuration (#4066) 2018-04-23 13:29:12 -07:00
Fred Cox e610f2b02d Add server argument as first argument in unbound input (#4062) 2018-04-23 13:27:29 -07:00
Daniel Nelson 0b795d662b Update changelog 2018-04-20 18:49:55 -07:00
Daniel Nelson d3d1bc72ca Fix duplicate tags when overriding tag (#4056) 2018-04-20 18:39:31 -07:00
Daniel Nelson 3440d70f5c Run 32-bit tests in CircleCI 2018-04-20 15:10:22 -07:00
Daniel Nelson 64072a9c8a Update changelog 2018-04-20 15:05:39 -07:00
Daniel Nelson 493ec3773b Fix ints being capped at 32-bits on 32-bit archs (#4054) 2018-04-20 14:56:28 -07:00
Leandro Piccilli 4df09fbd39 Update gopsutils to include fixes for #4037 and #3750 (#4045) 2018-04-20 14:32:19 -07:00
Daniel Nelson 4a08223b74 Update changelog 2018-04-19 16:58:59 -07:00
Daniel Nelson 18cfb3f295 Add only valid field types in cassandra input (#4048) 2018-04-19 16:56:46 -07:00
Daniel Nelson 07760b2758 Allow metrics to be unserializable in influx.Reader (#4047)
Metrics that are unserializable will be logged at debug level, but the
rest of the batch will be sent.  Unserializable metrics can occur during
normal operation such as if you remove all fields from a metric or the
metric cannot fit within the line size limit.
2018-04-19 16:24:31 -07:00
Daniel Nelson 6e0e6db1ee Update changelog 2018-04-18 16:57:15 -07:00
Daniel Nelson 04c72df264 Report available fields if utmp is unreadable (#4043) 2018-04-18 16:55:18 -07:00
Daniel Nelson 32eb442e5b Update github.com/gorilla/mux version (#4042) 2018-04-18 16:55:02 -07:00
Daniel Nelson 04d7e53700 Test using Go 1.8-1.10; official builds with 1.10 (#4041) 2018-04-18 16:14:06 -07:00
Daniel Nelson 11f5d478f6 Update changelog 2018-04-18 12:14:58 -07:00
Daniel Nelson 8d87f3933c Fix graphite serialization of unsigned ints (#4033) 2018-04-18 12:13:25 -07:00
Daniel Nelson fc74c1afa5 Tidy up last change to socket listener/writer 2018-04-17 17:48:30 -07:00
Daniel Nelson 37474d28a5 Update changelog 2018-04-17 17:36:35 -07:00
Matt 18a51d54dc Add snmp input option to strip non fixed length index suffixes (#4025) 2018-04-17 17:34:39 -07:00
Daniel Nelson 3c9498aae2 Update changelog 2018-04-17 17:03:18 -07:00
Bob Shannon 86933ebb7f Add TLS support to socket_writer and socket_listener plugins (#4021) 2018-04-17 17:02:04 -07:00
Daniel Nelson 97c7ea5dca Update changelog 2018-04-17 15:45:49 -07:00
James Maidment 300568db72 Update mem values to gauge (#4034) 2018-04-17 15:43:10 -07:00
Daniel Nelson 4af88fe363 Update changelog adding nvidia_smi 2018-04-17 13:43:36 -07:00
Jack Zampolin 160c96ccfe Add nvidia_smi input to monitor nvidia GPUs (#4026) 2018-04-17 13:40:55 -07:00
Daniel Nelson cd124bb2ee Fix docs about outputs and fieldpass/fielddrop
This has been allowed since 1.1.0
2018-04-17 13:35:27 -07:00
Daniel Nelson 13c2f68453 Remove RateLimiter tests due to race conditions
These tests are fundamentally racy, removing to improve reliability of
test cases.
2018-04-16 18:52:52 -07:00
Daniel Nelson 3175bddd6d Set 1.6 release date in changelog 2018-04-16 12:04:31 -07:00
Daniel Nelson 8c35451400 Fix HashID conflicts in pathological cases
Use "\n" as delimiter as it cannot occur in the series name.
2018-04-12 18:09:31 -07:00
Daniel Nelson 4d9e234175 Fix MQTT sample config 2018-04-12 14:34:55 -07:00
Daniel Nelson ad7955bdb5 Update changelog 2018-04-11 16:52:40 -07:00
jvassev af7382e224 Prevent loading config twice in K8S (#3999)
When config dir is mounted from configmap, filepath.Walk() finds the same
.conf file twice as 20-acme.conf is a link to ..data/20-acme.conf for example.

This patch skips all folder names starting with '..' which is pretty
uncommon and mainly used by Kubernetes mounts.
2018-04-11 16:51:19 -07:00
Daniel Nelson 5b8401fa37 Add --console and --service to usage message in Windows (#3993) 2018-04-11 16:44:55 -07:00
Daniel Nelson dc9789d589 Update changelog 2018-04-10 18:18:27 -07:00
Daniel Nelson e8a3178b4a Allow grok pattern to contain newlines (#4005) 2018-04-10 18:16:21 -07:00
Daniel Nelson d4733f28bb Update changelog 2018-04-10 18:15:02 -07:00
Daniel Nelson a805c424bf Typesetting changes to fibaro README 2018-04-10 18:14:27 -07:00
Pierrick Brossin 2e0da23c7b Add Fibaro input plugin (#2741) 2018-04-10 18:04:58 -07:00
Daniel Nelson da6a299a22 Fix host ordering in mongodb unit tests 2018-04-10 17:24:40 -07:00
Daniel Nelson 6ef237437d Updated changelog 2018-04-10 17:11:25 -07:00
Jake Champlin 0fc8724f88 Add per-host shard metrics in mongodb input (#3819) 2018-04-10 17:10:29 -07:00
Boris Schrijver d44b3f6839 Fix make test-ci run (#4002) 2018-04-10 15:35:58 -07:00
Daniel Nelson 6ff0bcd677 Document that InfluxDB input metrics vary with version 2018-04-09 19:30:18 -07:00
Daniel Nelson 45af6239e8 Update changelog 2018-04-09 17:06:34 -07:00
Daniel Nelson 906616d639 Rename repl_oplog_window_s to repl_oplog_window_sec
To match existing metric style.
2018-04-09 17:05:45 -07:00
Daniel Nelson cad0cf4c78 Fix newline escaping in line protocol (#3992) 2018-04-09 15:29:52 -07:00
Daniel Nelson d9230ac92d Update changelog 2018-04-06 16:45:07 -07:00
Daniel Nelson 997406ea2e Add details about MongoDB permissions 2018-04-06 16:43:03 -07:00
Daniel Nelson ee94fd6a4e Modernize mongodb docs 2018-04-06 16:36:03 -07:00
Matvey Kruglov 9c2bd062a8 Add repl_oplog_window_s metric to mongodb input (#3964) 2018-04-06 16:34:47 -07:00
alekseyp 5d1585db8c Fix typo in phpfpm README (#3985) 2018-04-06 16:20:36 -07:00
Mark Wilkinson - m82labs cbaf8481c9 Use explicit casts to avoid datatype issues (#3980) 2018-04-06 14:58:33 -07:00
Daniel Nelson 72e9f1d7d0 Update changelog 2018-04-06 13:19:02 -07:00
Daniel Nelson 8b269c4e87 Export all vars defined in /etc/default/telegraf (#3981)
This keeps the format of this file the same between systemd and
sysvinit.
2018-04-06 13:17:24 -07:00
Daniel Nelson 7bfcd87e83 Fix conversion of unsigned ints in prometheus output (#3978) 2018-04-05 16:38:41 -07:00
Daniel Nelson cf49ed3b93 Update changelog 2018-04-05 11:19:01 -07:00
Daniel Nelson a02af5f03a Update gosnmp revision (#3973) 2018-04-05 11:15:20 -07:00
Daniel Nelson a2ba62d756 Log error if scheme is unsupported 2018-04-05 11:08:31 -07:00
Jeff Ashton f70c3b3382 Fix https in InfluxDB output (#3976) 2018-04-05 10:50:32 -07:00
Daniel Nelson 559ad1fcdc Fix build.py next_version 2018-04-04 21:53:20 -07:00
Daniel Nelson 990343604a Use automatic extension naming when running go build 2018-04-04 19:00:28 -07:00
Daniel Nelson 23dd19a85f Enable ntpq tests on Windows (#3972) 2018-04-04 18:35:05 -07:00
Daniel Nelson c367fb404d Add config-directory documentation for Windows service 2018-04-04 16:30:22 -07:00
Daniel Nelson bf60b55bcb Don't print name of plugin or interval size during --test 2018-04-04 16:30:22 -07:00
Daniel Nelson 15dbcd4d97 Sort field names when running --test 2018-04-04 16:30:22 -07:00
Scott Anderson f692f656fd Add details about why not all logstash patterns are supported (#3971) 2018-04-04 14:42:58 -07:00
Daniel Nelson f38d7f1a5b Fix bug preventing database from being recreated (#3962) 2018-04-02 16:18:33 -07:00
Daniel Nelson f70694ae4f Set next version to 1.7 on master 2018-04-02 14:44:09 -07:00
Daniel Nelson 3e184ff8ba Update sample telegraf.conf 2018-04-02 14:40:51 -07:00
Daniel Nelson 4a11957498 Update changelog 2018-04-02 14:34:25 -07:00
Daniel Nelson 52d9a98b09 Fix precision truncation when no timestamp included (#3961) 2018-04-02 14:32:33 -07:00
Daniel Nelson 45e77b3301 Update changelog 2018-04-02 14:31:36 -07:00
Daniel Nelson e775c886b7 Fix go vet and use go test -race 2018-04-02 14:30:46 -07:00
Daniel Nelson 7079c6ee60 Fix parsing of dos line endings in smart input (#3960) 2018-04-02 13:55:10 -07:00
Daniel Nelson fe173c18bd Update gopsutil version to v2.18.03 2018-04-02 13:54:16 -07:00
Daniel Nelson d30faaf029 Add metric_version option to mysql input (#3954) 2018-04-02 13:10:43 -07:00
Daniel Nelson d247425e03 Add mutex to influx parser 2018-04-02 12:52:23 -07:00
Daniel Nelson 717347fce2 Allow empty string field values 2018-03-30 16:57:35 -07:00
Mark Wilkinson - m82labs 952b6763ff Remove host tag from Database IO v2 Query (#3953) 2018-03-30 13:22:24 -07:00
Daniel Nelson 973ae07d2f Update changelog 2018-03-30 13:20:07 -07:00
Daniel Nelson 005d7823a5 Add filters for container state to docker input (#3950) 2018-03-30 13:17:48 -07:00
Daniel Nelson 0d5759daed Fix http_proxy variable name in http_response plugin 2018-03-30 11:11:12 -07:00
Daniel Nelson 666f757ae3 Move Handler interface into machine where it is used 2018-03-30 11:11:12 -07:00
Daniel Nelson 4218eb7367 Add MmapStats when using rocksdb storage engine (#3930) 2018-03-29 13:32:05 -07:00
Daniel Nelson 2fe2622327 Add influx uint support as a runtime option (#3948) 2018-03-29 13:31:43 -07:00
Daniel Nelson 8440887288 Update changelog 2018-03-29 12:44:33 -07:00
Daniel Nelson da4fcccd8f Add TLS support to zookeeper input (#3949) 2018-03-29 12:42:25 -07:00
Matt 006ccbf05b Add line protocol uint64 support (#3946) 2018-03-28 16:43:25 -07:00
Daniel Nelson 741abbf590 Revert to 'f' formatting for floats in line protocol
Using 'g' with -1 precision switches over to scientific notation for too
small of numbers, and setting a larger precision results in larger than
desired representations.
2018-03-28 14:38:39 -07:00
rabhis df0bded83e Reconnect AMQP consumer to broker (#3947) 2018-03-28 14:00:56 -07:00
Daniel Nelson 352e99c890 Update changelog 2018-03-27 18:38:39 -07:00
Daniel Nelson 937c7365af Add support for connecting to InfluxDB over a unix domain socket (#3942) 2018-03-27 18:36:08 -07:00
Daniel Nelson bc9123848b Update changelog 2018-03-27 18:07:37 -07:00
Daniel Nelson c695fdf77e Add support for skipping database creation (#3941) 2018-03-27 17:59:57 -07:00
Daniel Nelson e0c3b7ff2b Update changelog 2018-03-27 17:44:13 -07:00
Daniel Nelson 222a68d72e Add new line protocol parser and serializer, influxdb output (#3924) 2018-03-27 17:30:51 -07:00
Daniel Nelson 720c27559c Update to latest fsnotify release 2018-03-27 17:21:00 -07:00
Daniel Nelson efc2809dad Update to latest gopsutil release 2018-03-27 15:12:57 -07:00
Daniel Nelson b187ffc55f Add documentation for existing TLS settings in consul input (#3931) 2018-03-26 19:22:17 -07:00
Daniel Nelson 9146bbd78a Update DC/OS guidance for cardinality 2018-03-26 19:20:27 -07:00
Daniel Nelson b82381aefe Update passenger input documentation (#3938) 2018-03-26 19:11:08 -07:00
Daniel Nelson 47ecd6229d Update changelog 2018-03-23 19:17:03 -07:00
Daniel Nelson 338def3524 Fix DC/OS URL creation race (#3932) 2018-03-23 19:14:07 -07:00
Daniel Nelson 519e0274a0 Update changelog 2018-03-23 11:57:42 -07:00
Evan Kinney 798fea3109 Add HTTP basic auth support to the http_listener input (#3496) 2018-03-23 11:56:49 -07:00
Daniel Nelson 5389f40057 Update changelog 2018-03-23 11:54:06 -07:00
Daniel Nelson c12c6b9d35 Add TLS support to kapacitor input (#3927) 2018-03-23 11:53:18 -07:00
Daniel Nelson 00cac268d2 Update changelog 2018-03-23 11:52:00 -07:00
Daniel Nelson 338cd0b757 Add tag for target url to phpfpm input (#3928) 2018-03-23 11:50:52 -07:00
Daniel Nelson 2750228aa9 Skip hanging test on darwin 2018-03-22 14:41:37 -07:00
Mark Wilkinson - m82labs cce1601a54 Fix SQL Server 2008 compatibility (#3916)
* Fixed a bug in the performance counter query when run against SQL Server 2016 SP1-CU2. The performance counter DMV contains duplicate entries which are not handled by the query.

* Adding more stats related to workload groups.

* Adding new RG stats, removing "host" tag

* Removed workload group query

* Fixing some 2008 compat issues, removed the host field from the result set.

* Adding fixes for SQL Server 2008 compat around RG columns. Also converted perf counter query to support named instances.
2018-03-22 10:38:40 -07:00
Daniel Nelson ed6e5b40c3 Update fsnotify path in license of dependencies 2018-03-21 23:06:14 -07:00
Daniel Nelson c2f6bae271 Use copy of fsnotify from tail vendor 2018-03-21 10:23:28 -07:00
Pierre Tessier 8907db858e Remove noisy debug message from Wavefront output (#3899) 2018-03-19 11:04:35 -07:00
Daniel Nelson 239dddbe1a Fix breaker stat name in elasticsearch comment 2018-03-16 12:58:39 -07:00
Daniel Nelson ec90824686 Set 1.5.3 release date 2018-03-14 16:32:18 -07:00
Daniel Nelson f50935d930 Update changelog 2018-03-14 12:10:16 -07:00
Jonas Hahnfeld cdcfff2fb6 Add output of stderr in case of error to exec log message (#3862)
If the command failed with a non-zero exit status there might be an error
message on stderr. Append the first line to the error message to ease the
search for its cause.
2018-03-14 12:09:01 -07:00
Daniel Nelson db3373e51e Remove gdm -parallel false 2018-03-14 11:36:03 -07:00
Daniel Nelson e0afe7e5cd Use previous image on appveyor 2018-03-14 11:02:33 -07:00
Daniel Nelson 6485291033 Update changelog 2018-03-13 21:09:54 -07:00
Chris Ottinger 25cc56d1e3 Added config flag to skip collection of network protocol metrics (#3880) 2018-03-13 21:08:21 -07:00
Daniel Nelson 7f50fa26e9 Disable parallel restore in gdm
May be aggrevating timeout issue on appveyor builds.
2018-03-13 20:44:51 -07:00
Daniel Nelson ed56360db9 Use Go 1.9.4 for builds 2018-03-09 14:37:26 -08:00
Patrick Hemmer 48cbdbcdde Fix socket_listener setting ReadBufferSize on TCP sockets (#3874) 2018-03-09 09:44:35 -08:00
Daniel Nelson 6437e23dd4 Update changelog 2018-03-08 15:16:46 -08:00
Daniel Nelson 7bc438b6b0 Update http_response documentation 2018-03-08 15:13:26 -08:00
Germán Jaber e9da4e529e Add result related tags and fields to http_response (#3814) 2018-03-08 14:55:59 -08:00
Margarita Bliznikova fe78df3ba0 Fix intermittent TestTailBadLine failures (#3869) 2018-03-08 13:03:48 -08:00
Daniel Nelson b8c4a6bb39 Update changelog 2018-03-08 10:54:03 -08:00
Dennis Schön 6a1f5767bb Fix uptime metric in passenger input plugin (#3871) 2018-03-08 10:52:58 -08:00
Daniel Nelson 314630271d Update changelog 2018-03-07 14:17:11 -08:00
dilshatm cca7ef3dde Fix collation difference in sqlserver input (#3786) 2018-03-07 14:16:17 -08:00
Daniel Nelson 30d595fe0c Update example config 2018-03-07 13:49:04 -08:00
Daniel Nelson d191ccdd3f Update docs for addition of override processor 2018-03-07 13:47:54 -08:00
Karsten Schnitter 82d224d9e2 Add override processor (#3773)
This plugin can perform the standard metric modifications using override semantics.
2018-03-07 13:27:43 -08:00
Daniel Nelson 64c1a7a860 Update changelog 2018-03-06 13:12:16 -08:00
Margarita Bliznikova 86f7767439 Add host to ping timeout log message (#3853) 2018-03-06 13:10:44 -08:00
Daniel Nelson 19e79b0bf8 Update changelog 2018-03-06 12:12:26 -08:00
Marcel 1a03db7119 Add ability to override proxy from environment in http response (#3626) 2018-03-06 12:11:38 -08:00
Demian Dekoninck 8fac10e3e5 Add link to ServiceInput interface in contributing guide (#3828) 2018-03-06 10:14:14 -08:00
Patrick O'Keeffe a409c6c60a Improve fail2ban use_sudo docs (#3852) 2018-03-05 16:32:45 -08:00
Jiri Tyr 3532667bdc Fixing error in snmp example config (#3855) 2018-03-05 15:29:26 -08:00
Daniel Nelson 9114580102 Update changelog 2018-03-05 15:28:09 -08:00
Carl Pacey 54f4a5a502 Add sum stat to basicstats aggregator (#3797) 2018-03-05 15:26:31 -08:00
Daniel Nelson 491b27adbb Update changelog 2018-03-05 11:42:01 -08:00
Michael Boudreau f7ffcfbba3 Add Solr 3 compatibility (#3799) 2018-03-05 11:41:10 -08:00
Daniel Nelson 5821f6f487 Update changelog 2018-02-25 01:06:44 -08:00
Daniel Nelson 3c179494ce Disable keepalive in mqtt output. (#3779)
This functionality currently has race conditions that can result in the
output deadlocking.
2018-02-25 01:04:04 -08:00
Daniel Nelson 7648a2a5c5 Update changelog 2018-02-20 17:33:25 -08:00
Pranay Kanwar 6dd39616de Convert boolean metric values to float in datadog output (#3804) 2018-02-20 17:32:18 -08:00
Daniel Nelson 3a4507866d Update changelog 2018-02-20 16:07:10 -08:00
Fred Cox 4a4557e371 Add server option to unbound plugin (#3713) 2018-02-20 16:06:13 -08:00
Daniel Nelson 20a5887a4a Update changelog 2018-02-20 13:56:44 -08:00
Jake Champlin 3e31ae4eca Add shard server stats to the mongodb input plugin (#3808) 2018-02-20 13:55:56 -08:00
Daniel Nelson 1fc8b2c71e Fix metric buffer limit in internal plugin after reload 2018-02-19 20:55:28 -08:00
Daniel Nelson 495d9700f6 Update changelog 2018-02-16 14:08:11 -08:00
S 17e1bff028 Add option to disable labels in prometheus output for string fields (#3765) 2018-02-16 14:07:26 -08:00
Dwight Spencer c48b15e620 Add README.md for mqtt output (#3764) 2018-02-16 13:51:20 -08:00
Daniel Nelson 2ade95e6bb Method new dropwizard parser more prominently. 2018-02-15 20:33:57 -08:00
Daniel Nelson 0c161b70ca Update changelog 2018-02-15 20:11:52 -08:00
everpcpc 37ae4b00bf Support deadline in ping plugin (#3783) 2018-02-15 20:11:07 -08:00
Daniel Nelson eceba38ec2 Update changelog 2018-02-15 20:06:01 -08:00
Jorge Canha b8a4eae583 Add keep alive support to the TCP mode of statsd (#3781) 2018-02-15 20:04:49 -08:00
Daniel Nelson f567be3d0f Update changelog 2018-02-15 19:09:50 -08:00
Daniel Nelson dcb8417277 Use proxy from environment in http input 2018-02-15 19:06:22 -08:00
Daniel Nelson c76ef34edc Deprecate httpjson input 2018-02-15 19:06:22 -08:00
Daniel Nelson bc96f65881 Add configurable method to http input 2018-02-15 19:06:22 -08:00
Daniel Nelson 0d683f65a3 Check parser initialization earlier 2018-02-15 19:06:22 -08:00
Daniel Nelson 0851de714d Add url tag only if not already set 2018-02-15 19:06:20 -08:00
Daniel Nelson 027ff4e2dd Allow setting basic auth with empty username 2018-02-15 19:05:36 -08:00
Daniel Nelson f664fef1f6 Update http input documentation 2018-02-15 19:05:36 -08:00
Nicolas Grange f1861a08b9 Add http input plugin which supports any input data format (#3546) 2018-02-15 16:00:10 -08:00
Mark Wilkinson - m82labs e14cf49f63 Added additional SQL Server performance counters (#3770)
* Included system databases in server properties, added backup/restore throughput counter, error counters, and user settable counters.

* Added more resource governor counters.

* Added Target and Total Server Memory counters.

* Removed the c_type tag from the performance counters query, added more documentation instead.
2018-02-09 12:45:22 -08:00
Daniel Nelson 2af3d8c15b Update changelog 2018-02-09 12:13:07 -08:00
efficks d67e46bb4e Fix ping plugin not reporting zero durations (#3778) 2018-02-09 12:11:19 -08:00
Daniel Nelson 23e9306ce0 Adjust time of nightly build 2018-02-07 18:37:33 -08:00
Daniel Nelson e5919c9c5f Update changelog 2018-02-07 18:37:32 -08:00
Daniel Nelson 2950a3bdeb Add TLS support to the mesos input plugin (#3769) 2018-02-07 18:36:38 -08:00
Daniel Nelson d467a20b2c Install new requirements for fpm gem install 2018-02-06 11:33:18 -08:00
Daniel Nelson 02892404d0 Update changelog 2018-02-05 12:55:22 -08:00
Andy Cobaugh 1ab0153cb5 Add additional metrics and reverse metric names option to openldap (#3722) 2018-02-05 12:48:41 -08:00
Daniel Nelson 3e37f9f591 Update paho mqtt to latest release 2018-02-05 12:20:14 -08:00
Daniel Nelson ebaf355ad9 Update changelog 2018-02-05 11:17:36 -08:00
Philipp Weber 0cb1b147e6 Remove userinfo from url tag in prometheus input (#3743) 2018-02-05 11:16:00 -08:00
Daniel Nelson 248ac6d92b Update sample config in contributing docs 2018-02-02 12:31:31 -08:00
Daniel Nelson 4c4733fdee Run nightly build sequentially 2018-02-02 12:31:31 -08:00
Daniel Nelson 0e5eb8a584 Fix Makefile on Windows and use in AppVeyor build (#3748) 2018-02-02 12:25:59 -08:00
Pierre Tessier 94e603b46a Fix example source_override values in wavefront output (#3744) 2018-02-02 10:51:00 -08:00
Daniel Nelson ea83d46daf Update gitignore 2018-02-01 16:18:01 -08:00
Daniel Nelson 8c00010b86 Update changelog 2018-02-01 16:12:57 -08:00
Daniel Nelson 192af981c0 Improve procstat readme 2018-02-01 16:12:08 -08:00
Ben Aldrich 551c771bba Add native Go method for finding pids to procstat (#3559) 2018-02-01 15:14:27 -08:00
Paul Myjavec 9b4177d46c Use CircleCI 2.0 for builds (#3731) 2018-02-01 15:05:39 -08:00
Daniel Nelson 1164289619 Update changelog 2018-02-01 12:15:24 -08:00
Daniel Nelson 7162db68f4 Update sqlserver readme; enable query_version = 2 in default config
If unset, query_version is still treated as version 1 for compatibility.
2018-02-01 12:13:49 -08:00
Mark Wilkinson - m82labs 43c092d600 Add new sql server output data model (#3618) 2018-02-01 11:50:26 -08:00
John Eismeier 32a94f1c89 Fix spelling mistakes in zipkin and apache inputs (#3741) 2018-02-01 11:15:12 -08:00
Daniel Nelson 71c8bcd540 Update changelog 2018-02-01 11:13:14 -08:00
Philipp Weber b39dd3a363 Add TLS and http basic_auth to prometheus_client output (#3719) 2018-02-01 11:12:16 -08:00
Daniel Nelson cdbc77ed53 Update documetation style for smart input 2018-02-01 10:51:03 -08:00
Daniel Nelson 34fbdc9b7f Update changelog 2018-01-31 12:30:03 -08:00
Laurent Sesquès e0bba3cdec Add Ipset input plugin (#3346) 2018-01-31 12:25:27 -08:00
Daniel Nelson a20d167619 Update changelog 2018-01-30 18:08:31 -08:00
Daniel Nelson b6912e84f4 Set path to / if HOST_MOUNT_PREFIX matches full path (#3736) 2018-01-30 18:06:53 -08:00
Daniel Nelson 5b1203ca34 Set release date for 1.5.2 2018-01-30 14:00:34 -08:00
Daniel Nelson ebc59cf81d Update changelog 2018-01-30 14:00:33 -08:00
Daniel Nelson 5a4ceab81b Exclude master_replid fields from redis input (#3725) 2018-01-30 13:58:44 -08:00
Daniel Nelson 7ac39869ff Do not build nats input on freebsd
This plugin can work on freebsd, but will only build with cgo
enabled.  For now, disable this plugin to avoid this requirement.
2018-01-30 12:04:21 -08:00
Daniel Nelson 44ea2adfa9 Update changelog 2018-01-29 14:02:44 -08:00
Philipp Weber 90efb9c844 Add support for setting bsd source address to the ping input (#3726) 2018-01-29 14:01:00 -08:00
Daniel Nelson a6e100fd54 Update changelog 2018-01-29 12:16:15 -08:00
Ildar Svetlov 7a44c309b7 Add ability to select which queues will be gathered to rabbitmq input (#3702) 2018-01-29 12:14:49 -08:00
Daniel Nelson 95c9b81397 Update nats readme 2018-01-26 17:15:02 -08:00
Daniel Nelson 22b487769f Add nats input to readme and changelog 2018-01-26 15:18:15 -08:00
Menno Finlay-Smits 1a16126a09 Add NATS Monitoring Input Plugin (#3674) 2018-01-26 15:14:54 -08:00
Daniel Nelson 49e2308e71 Update changelog 2018-01-26 15:03:19 -08:00
Ivan Lopez 782b1336e3 Add RabbitMQ cluster and running nodes count and running node status (#3703) 2018-01-26 15:00:58 -08:00
Daniel Nelson ff63421d0e Return Accumulator interface from NewAccumulator 2018-01-26 11:40:34 -08:00
Peter eca80961b8 Expand on tagvalue option in postgresql_extensible (#3720) 2018-01-25 15:38:11 -08:00
Daniel Nelson 7a197a3b2e Update changelog 2018-01-25 15:05:46 -08:00
Logan 1e5ee780e5 Allow running as console application on Windows (#2754) 2018-01-25 15:04:09 -08:00
Daniel Nelson ed6c59bf97 Update changelog 2018-01-25 13:47:21 -08:00
Pierre Tessier 506cbf0d48 Add timeout to wavefront output write (#3711) 2018-01-25 13:44:25 -08:00
Daniel Nelson 54b8fbb38c Update changelog 2018-01-22 12:06:10 -08:00
Daniel Nelson f3a57ea69e Remove graphite serializer replacement of dot with underscore in field key (#3705) 2018-01-22 12:04:16 -08:00
Daniel Nelson 35ec1ad178 Update changelog 2018-01-22 12:01:09 -08:00
Daniel Nelson 0f63b18c3b Avoid loop creation in second processor pass (#3656) 2018-01-22 11:16:07 -08:00
Daniel Nelson 04f5493ccc Limit wait time for writes in mqtt output (#3699) 2018-01-22 11:15:13 -08:00
Daniel Nelson d1cbf4598c Make error loading tls config fatal in mysql input 2018-01-19 12:16:28 -08:00
Daniel Nelson 704c6c01eb Update changelog 2018-01-18 17:39:03 -08:00
Piotr Popieluch 3ea7c2d222 Align aggregator period with internal ticker to avoid skipping metrics (#3693)
By the time the aggregator.run() was called about 600ms already passed since setting now which was skewing up the aggregation intervals and skipping metrics.
2018-01-18 17:37:53 -08:00
Daniel Nelson 96a175a54c Update changelog 2018-01-17 15:28:35 -08:00
Piotr Popieluch 5f8d908f74 Reconnect before sending graphite metrics if disconnected (#3680) 2018-01-17 15:27:24 -08:00
Daniel Nelson 6e253a67a7 Update changelog 2018-01-17 15:14:07 -08:00
Daniel Nelson 7e597c8992 Add support for using globs in devices list of diskio input plugin (#3687) 2018-01-17 15:12:05 -08:00
Daniel Nelson c3c954441e Use go-redis for the redis input (#3661) 2018-01-17 14:57:46 -08:00
Daniel Nelson e333dcdfc1 Update changelog 2018-01-17 14:38:47 -08:00
Michael Boudreau 20f5741d44 Fix index out of bounds error in solr input plugin (#3683) 2018-01-17 14:37:34 -08:00
Daniel Nelson 4da881d646 Update changelog 2018-01-16 13:46:41 -08:00
Noah Crowley b9ec94f181 Ignore empty lines in Graphite plaintext (#3684) 2018-01-16 13:44:56 -08:00
atzoum 72a235d9cf Add string and boolean support to dropwizard; fix escaping of tags/fields (#3664) 2018-01-16 12:12:14 -08:00
Daniel Nelson 26b34ef6ab Update changelog 2018-01-12 17:46:20 -08:00
Jacob McCann 286dcab4f9 Add container health metrics to docker input (#3666) 2018-01-12 17:43:51 -08:00
Daniel Nelson 4c9e9ac6f3 Skip intermittent test on CircleCI 2018-01-12 16:49:05 -08:00
Jacob McCann e226aa49c9 Listen on localhost interface in tests (#3667) 2018-01-12 12:08:19 -08:00
Daniel Nelson b7b55d1806 Set 1.5.1 release date 2018-01-10 13:27:58 -08:00
Daniel Nelson 651b576175 Skip CircleCI test of tail plugin due to intermittent deadlock 2018-01-09 15:01:20 -08:00
Daniel Nelson 80ac46a468 Pin crate docker image for testing 2018-01-09 13:10:40 -08:00
Daniel Nelson 696dcc06b0 Update release notes 2018-01-08 16:29:59 -08:00
Daniel Nelson 6097a2fb96 Update changelog 2018-01-08 15:21:20 -08:00
Daniel Nelson a177486463 Update changelog 2018-01-08 15:18:31 -08:00
Daniel Nelson a08c1be88f Add gjson and match to dependency license file 2018-01-08 15:15:23 -08:00
atzoum 05d691aa81 Add support for dropwizard input format (#2846) 2018-01-08 15:11:36 -08:00
Daniel Nelson 5207b32b25 Reorder httpjson config to keep variables out of toml table 2018-01-08 15:06:58 -08:00
Daniel Nelson ce09aa35dd Update changelog 2018-01-05 16:04:12 -08:00
James ab4f3176bb Use persistent connection to postgresql database (#2701) 2018-01-05 16:03:09 -08:00
Daniel Nelson b9286bbe23 Fix link to cratedb readme 2018-01-05 16:01:06 -08:00
Daniel Nelson d531f49f9d Update changelog 2018-01-05 16:00:44 -08:00
Daniel Nelson aec09e711a Add user privilege level setting to IPMI sensors (#3643) 2018-01-05 15:59:25 -08:00
Daniel Nelson 500c3b68c9 Update changelog 2018-01-05 14:56:54 -08:00
gerardocorea92 3997abfd65 Add available_entropy field to kernel input plugin (#3524) 2018-01-05 14:54:29 -08:00
Daniel Nelson 279d38b79a Update release notes for 1.5 2018-01-04 18:05:21 -08:00
Daniel Nelson b9fa390881 Add link to docs for configuring the openldap monitoring backend 2018-01-04 15:34:55 -08:00
Daniel Nelson e563c97b5e Update changelog 2018-01-04 15:29:56 -08:00
Daniel Nelson 86701ad873 Escape environment variables during config toml parsing (#3637) 2018-01-04 15:28:00 -08:00
Daniel Nelson 91713ecfd6 Update changelog 2018-01-03 17:47:13 -08:00
Richard Elling f13afea7d9 Add support for additional metrics on Linux in zfs input (#3565) 2018-01-03 17:45:48 -08:00
Daniel Nelson 1ea8d648dc Update changelog 2018-01-03 17:40:37 -08:00
kerams bde4d004df Add support for exchanges to RabbitMQ input (#3619) 2018-01-03 17:38:11 -08:00
Daniel Nelson 57138e0668 Update changelog 2018-01-03 13:44:33 -08:00
kerams 19a5e0b433 Fix deliver_get field in rabbitmq input (#3633) 2018-01-03 13:43:17 -08:00
Daniel Nelson 646506b4eb Update changelog 2018-01-02 16:38:20 -08:00
Daniel Nelson b657480a1b Add wired field to mem input (#3632) 2018-01-02 16:37:11 -08:00
Daniel Nelson 2f7906d5b9 Update changelog 2018-01-02 16:36:04 -08:00
Adam Johnson 371423cdf5 Reintroduce AWS credential check to cloudwatch output (#3587) 2018-01-02 16:33:15 -08:00
Daniel Nelson dce9c335c5 Add information about how to set permissions for postfix input (#3594) 2018-01-02 14:09:14 -08:00
Daniel Nelson 5bb0d30cf5 Update changelog 2017-12-28 16:24:04 -08:00
Daniel Nelson 6b7eb19996 Set content-type charset in influxdb output and allow it be overridden (#3593) 2017-12-28 16:22:19 -08:00
Daniel Nelson d61d406da5 Update changelog 2017-12-28 16:19:04 -08:00
Daniel Nelson 262bd1f326 Fix DC/OS login expiration time (#3625) 2017-12-28 16:17:40 -08:00
Daniel Nelson 0f6aadf253 Update changelog 2017-12-28 16:12:56 -08:00
Daniel Nelson a38fc4270c Fix name error in jolokia2_agent sample config (#3624) 2017-12-28 16:10:00 -08:00
Daniel Nelson 6ff256b71a Fix grammar in haproxy docs 2017-12-21 18:46:03 -08:00
Daniel Nelson 4412f7522a Fix grammar in dcos readme 2017-12-21 16:26:50 -08:00
Daniel Nelson 08c0a8a7a8 Update changelog 2017-12-18 20:39:26 -08:00
kerams dd8639366c Add messages_delivered_get to rabbitmq_overview (#3596) 2017-12-18 20:36:59 -08:00
Daniel Nelson 13fda9405d Update changelog 2017-12-14 16:59:58 -08:00
Jeff Ashton e1bc191f9f Add control over which stats to gather in basicstats aggregator (#3580) 2017-12-14 16:56:10 -08:00
Daniel Nelson dfb68c5810 Update bond input description 2017-12-14 16:03:29 -08:00
timhallinflux df4ad82317 Improve bond plugin description (#3588) 2017-12-14 15:59:20 -08:00
Daniel Nelson 697b191807 Update haproxy documentation 2017-12-14 15:50:03 -08:00
Daniel Nelson 4901f321ee Set release date for 1.5.0 2017-12-14 10:58:33 -08:00
Daniel Nelson bd3d6452f2 Remove AWS credential check from cloudwatch output (#3583)
This method is reported to not work with IAM Instance Profiles, and we
do not want to make any calls that would require additional permissions.
2017-12-13 17:51:55 -08:00
Brian Knight 956a2226b0 Update README with missing Redis measurements (#3582) 2017-12-13 11:24:48 -08:00
Antoine Augusti 5f6a657809 Fix refType documentation for GitHub webhooks (#3579) 2017-12-13 11:22:47 -08:00
Daniel Nelson b1e02de879 Update changelog 2017-12-13 11:17:36 -08:00
Ildar Svetlov 856655f955 Don't add system input uptime_format as a counter (#3578) 2017-12-13 11:13:56 -08:00
Daniel Nelson 0111ee6b84 Update changelog 2017-12-13 10:58:50 -08:00
Logan 4411e3bcc8 Typo and sentence consistency (#3581) 2017-12-13 10:51:15 -08:00
Daniel Nelson bcb66d759a Update changelog 2017-12-12 13:32:47 -08:00
Mike Danko 60c39ced69 Fix various mysql data type conversions (#3554) 2017-12-12 13:22:11 -08:00
Steve Banik 825d338386 Fixed typo in README.md (#3574) 2017-12-12 11:21:32 -08:00
Daniel Nelson be1d4dc9c0 Update changelog 2017-12-11 18:01:50 -08:00
Daniel Nelson 2cf34dd875 Fix separation of multiple prometheus_client outputs (#3570) 2017-12-11 18:00:19 -08:00
Daniel Nelson 09fddafed6 Update exec plugin documentation 2017-12-11 17:58:06 -08:00
Daniel Nelson c46863b87d Update changelog 2017-12-11 15:34:52 -08:00
Daniel Nelson 8761193dc7 Add idle state to processes test 2017-12-11 15:33:44 -08:00
Ted Zlatanov 6a5d8e31b3 Support I (idle) process state on procfs+Linux (#3530) 2017-12-11 15:31:52 -08:00
Daniel Nelson 297282cf78 Use auto type detection for scanned devices in smart input (#3561) 2017-12-08 18:03:12 -08:00
Daniel Nelson ad70f169af Update changelog 2017-12-08 18:02:01 -08:00
Daniel Nelson 3e2eadaf5a Update sarama-cluster to latest release (#3560) 2017-12-08 17:59:06 -08:00
Daniel Nelson afa6347631 Add benchmark test for single metric 2017-12-08 13:23:08 -08:00
Daniel Nelson 4452b0a03f Use device name instead of abs path for devices tag in smart input (#3550) 2017-12-08 13:22:41 -08:00
Daniel Nelson 899ad26373 Update changelog 2017-12-07 11:32:54 -08:00
Arkady Emelyanov 1326f61635 Add health status mapping from string to int in elasticsearch input (#3551) 2017-12-07 11:31:03 -08:00
Daniel Nelson 6265a3b06f Log connect error only in wavefront output (#3549) 2017-12-06 14:55:29 -08:00
Daniel Nelson b5cd471c1d Fix formatting in changelog 2017-12-04 13:18:14 -08:00
Daniel Nelson 4eee2e772a Update next version number for dev builds 2017-12-01 11:52:46 -08:00
Daniel Nelson 589da9c481 Update example config 2017-12-01 11:49:07 -08:00
Daniel Nelson 407f0fe545 Update changelog 2017-12-01 11:42:00 -08:00
Daniel Nelson 0bdd2763b3 Update changelog 2017-12-01 11:23:18 -08:00
Daniel Nelson 7e91563647 Fix HOST_MOUNT_PREFIX in docker with disk input (#3529) 2017-12-01 11:21:39 -08:00
Daniel Nelson b89db57629 Update changelog 2017-11-30 18:42:14 -08:00
Daniel Nelson bca73f0923 Add option to amqp output to publish persistent messages (#3528) 2017-11-30 18:40:12 -08:00
Daniel Nelson 1dee532574 Add time import 2017-11-29 17:05:13 -08:00
Daniel Nelson 24828e1185 Update changelog 2017-11-29 16:36:00 -08:00
Nathan Ferch d758008c1e Add input plugin for OpenBSD/FreeBSD pf (#3405) 2017-11-29 16:32:50 -08:00
Daniel Nelson 4337c98b41 Update changelog 2017-11-29 12:17:46 -08:00
Bob Shannon dd8157ec68 Add support for glob patterns in net input plugin (#3140) 2017-11-29 12:16:34 -08:00
Daniel Nelson ca76242e3e Update changelog 2017-11-29 12:10:56 -08:00
Daniel Nelson e79639859b Update gopsutil version to include netstat fix (#3513) 2017-11-29 12:06:47 -08:00
Daniel Nelson aaed325345 Add dcos plugin to changelog and readme 2017-11-29 11:54:33 -08:00
Daniel Nelson 2ce21bff24 Add input plugin for DC/OS (#3519) 2017-11-29 11:50:32 -08:00
Patrick Hemmer b6e8214396 Fix postfix plugin age to use ctime, not mtime (#3525) 2017-11-29 11:25:31 -08:00
Daniel Nelson eca20036b7 Update changelog 2017-11-29 10:52:59 -08:00
Daniel Nelson 9325f6e937 Add slab to mem plugin (#3518) 2017-11-29 10:49:45 -08:00
Daniel Nelson 62b7cb91dc Add bond input to readme and update changelog 2017-11-28 15:19:30 -08:00
Ildar Svetlov 2ccebf2371 Add bond input plugin (#3424) 2017-11-28 15:16:19 -08:00
Daniel Nelson 1ebdde9487 Update changelog 2017-11-28 10:10:36 -08:00
Patrick Hemmer 2ae35591a0 Add postfix input plugin (#2553) 2017-11-28 10:08:41 -08:00
Daniel Nelson 2681be7caa Update changelog 2017-11-27 17:06:50 -08:00
Lukasz Jagiello 9c45a2150d Use deb-systemd-invoke to restart service (#3506)
From man page:
```
deb-systemd-invoke is a Debian-specific helper script which asks
       /usr/sbin/policy-rc.d before performing a systemctl call.

deb-systemd-invoke is intended to be used from maintscripts to start
       systemd unit files. It is specifically NOT intended to be used
       interactively by users. Instead, users should run systemd and use
       systemctl, or not bother about the systemd enabled state in case they
       are not running systemd.
```

This PR replace regular `systemctl` with `deb-systemd-invoke`.
2017-11-27 17:05:32 -08:00
Lukasz Jagiello b7a50b9414 Add shadow-utils dependency to rpm package (#3505) 2017-11-27 17:02:16 -08:00
Dylan Meissner d9e2599de7 Jolokia2 handles unordered mbean object name properties (#3504) 2017-11-27 13:43:19 -08:00
Daniel Nelson fb1edd5da3 Update changelog 2017-11-27 12:32:36 -08:00
Laurent Gosselin 5ae114bde7 Fix global variable collection when using interval_slow option in mysql input (#3500) 2017-11-27 12:29:51 -08:00
Daniel Nelson e3812e9b97 Update changelog 2017-11-20 16:50:18 -08:00
Daniel Nelson f9ded8fdd8 Fix snmp tools output parsing when they contain Windows eols (#3396) 2017-11-20 16:48:30 -08:00
Daniel Nelson 662e2df779 Update changelog 2017-11-20 16:27:18 -08:00
Leandro Piccilli 9e95d51648 Add support for tags in the index name in elasticsearch output (#3470) 2017-11-20 16:25:36 -08:00
Daniel Nelson 612d81d689 Update changelog 2017-11-20 14:40:45 -08:00
aromeyer dd6fbb62b5 Add opensmtpd input plugin (#3449) 2017-11-20 14:39:13 -08:00
Daniel Nelson ecc619f538 Update changelog 2017-11-20 14:37:09 -08:00
aromeyer f89c774226 Add unbound input plugin (#3434) 2017-11-20 14:32:06 -08:00
Daniel Nelson 51ec55fb16 Update changelog 2017-11-20 14:23:16 -08:00
Leandro Piccilli ca2c1e75c7 Add index by week number to Elasticsearch output (#3490) 2017-11-20 14:22:29 -08:00
Daniel Nelson 527892eef8 Update changelog 2017-11-20 14:20:05 -08:00
Chris Goller 113184ddae Use hexadecimal ids and lowercase names in zipkin input (#3488) 2017-11-20 14:19:32 -08:00
Daniel Nelson f9b808572f Update changelog 2017-11-16 16:51:02 -08:00
erayaslan 650f44980b Use MAX() instead of SUM() for latency measurements in sqlserver (#3471) 2017-11-16 16:49:51 -08:00
Daniel Nelson a8c6a31d1e Update changelog and add particle webhook to readme 2017-11-16 16:11:20 -08:00
David G. Simmons 1fd88ad0d8 Add Particle Webhook Plugin (#3477) 2017-11-16 16:03:19 -08:00
Pierre Fersing f30716e2d0 Whitelist allowed char classes for graphite output (#3473) 2017-11-15 14:44:20 -08:00
Daniel Nelson 3405deebe3 Skip test requiring cratedb server in short test mode 2017-11-13 15:22:57 -08:00
Daniel Nelson c4720e5d8b Update changelog 2017-11-13 15:09:05 -08:00
Daniel Nelson e57cf8c9df Fix typo in error message 2017-11-13 15:07:54 -08:00
faye-sama ccd21755d5 Fail metrics parsing on unescaped quotes (#3409)
Before this change Fields() method on a metric parsed from a line with
unescaped quotes could panic. This change makes such line unparseable.

Fixes #3326
2017-11-13 15:06:47 -08:00
Patrick Hemmer 176064cdf7 Add tests for procstat systemd & cgroup matching (#3469) 2017-11-13 14:45:31 -08:00
Daniel Nelson 23b0e1bc7a Update changelog 2017-11-13 11:02:01 -08:00
Patrick Hemmer 2323d9ae48 Add systemd unit pid and cgroup matching to procstat (#3459) 2017-11-13 10:59:27 -08:00
Daniel Nelson 084f7dc53b Update changelog 2017-11-10 14:39:11 -08:00
Trevor Pounds 742949594b Compile with Go 1.9.2 (#3458) 2017-11-10 14:39:00 -08:00
Daniel Nelson 9befd42ffe Update changelog 2017-11-09 14:05:36 -08:00
Felix Geisendörfer e1005ebfab Add CrateDB output plugin (#3210) 2017-11-09 14:03:16 -08:00
Daniel Nelson 5234e4ff7b Set 1.4.4 release date 2017-11-08 15:21:20 -08:00
Daniel Nelson 4489813ade Update changelog 2017-11-07 16:48:44 -08:00
Lukasz Jagiello 89650bdf52 Add Solr input plugin (#2019) 2017-11-07 16:44:09 -08:00
Daniel Nelson 293ca69a47 Update changelog 2017-11-07 14:37:04 -08:00
Pierre Tessier 60e24fa3a7 Add modification_time field to filestat input plugin (#3305) 2017-11-07 14:32:48 -08:00
Daniel Nelson 80b60542fa Update contributing documentation 2017-11-07 13:59:06 -08:00
Daniel Nelson d12cdb2185 Update changelog 2017-11-07 13:59:06 -08:00
Daniel Nelson 07a98ea5f7 Always ignore autofs filesystems in disk input (#3440) 2017-11-07 11:45:09 -08:00
Daniel Nelson c8c02ff10d Update changelog 2017-11-07 11:43:15 -08:00
Daniel Nelson b486950b8e Use current time if container read time is zero value (#3437) 2017-11-07 11:41:53 -08:00
Daniel Nelson e08a0c9b5c Update changelog 2017-11-07 11:36:29 -08:00
Daniel Nelson 14bc81e10d Update gopsutil to v2.17.10 (#3441) 2017-11-07 11:26:11 -08:00
Daniel Nelson bfd7c8504e Update changelog 2017-11-06 17:42:42 -08:00
Bob Shannon 180cd896fd Redact datadog API key in log output (#3420) 2017-11-06 17:41:14 -08:00
Daniel Nelson b2e0677c09 Revert particle webhook changes on master 2017-11-06 10:47:10 -08:00
David G. Simmons d2b34f3f51 Merge branch 'master' of https://github.com/influxdata/telegraf into dn-particle-plugin 2017-11-04 09:30:17 -04:00
David G. Simmons 29e48c082a Revert "Undo Revert "Revert changes since 9b0af4478""
This reverts commit 6e6aefe5da.
2017-11-04 09:19:37 -04:00
David G. Simmons ad6946cd8c Undo Revert "Revert changes since 9b0af4478"
This reverts commit 2c31345c70.
2017-11-04 09:14:52 -04:00
David G. Simmons 4b6e258d4e Readme update 2017-11-04 08:43:13 -04:00
Daniel Nelson f3d90bdac6 Update http_listener certs 2017-11-03 21:52:45 -07:00
Daniel Nelson b573039af4 Revert changes since 9b0af4478 2017-11-03 21:10:56 -07:00
David G. Simmons f26c640d06 Merge branch 'master' of https://github.com/influxdata/telegraf into dn-particle-plugin 2017-11-03 13:48:45 -04:00
David G. Simmons 0e0e91ef1e Revert "New Particle Plugin"
This reverts commit ba462f5c94.
2017-11-03 13:28:54 -04:00
David G. Simmons 1b7a1d9228 Revert "bug fixes and refactoring"
This reverts commit 86961cc814.
2017-11-03 13:28:35 -04:00
David G. Simmons ae9c881d33 Revert "Update README.md"
This reverts commit 8ed00af10a.
2017-11-03 13:28:00 -04:00
David G. Simmons 9acf7f18ca Revert "Updated README.md"
This reverts commit a6ada03b91.
2017-11-03 13:27:06 -04:00
David G. Simmons f6ea405c7a Revert "Small fixes"
This reverts commit a987118b01.
2017-11-03 13:27:06 -04:00
David G. Simmons e1f478383e Revert "Updated Test JSON"
This reverts commit 92caf33fff.
2017-11-03 13:27:06 -04:00
David G. Simmons 479dead075 Revert "Updated Test JSON"
This reverts commit 92caf33fff.
2017-11-03 13:16:09 -04:00
David G. Simmons 43b8f19dce Merge branch 'master' into dn-particle-plugin 2017-11-03 12:13:49 -04:00
David G. Simmons 8f4db900f1 ignore mac-files 2017-11-03 12:07:03 -04:00
David G. Simmons c37b951717 Updated Test JSON 2017-11-03 12:07:03 -04:00
David G. Simmons 5cacf2738c Small fixes
Hoping to pass CircleCI this time
2017-11-03 12:07:03 -04:00
David G. Simmons 43fa8d180a Updated README.md 2017-11-03 12:07:03 -04:00
David G. Simmons 875e8ef16d Update README.md 2017-11-03 12:07:03 -04:00
David Norton d2e0794815 bug fixes and refactoring 2017-11-03 12:07:03 -04:00
David G. Simmons 8e70b56ded New Particle Plugin 2017-11-03 12:07:03 -04:00
David G. Simmons f5f8d5192a Updated Test JSON 2017-11-02 17:21:50 -04:00
David G. Simmons f73048da4e Fixed Readme 2017-11-02 17:19:37 -04:00
David G. Simmons c3a277999a test son update 2017-11-02 14:19:01 -04:00
Daniel Nelson c934f36c1c Remove incorrect comment about linker options 2017-11-01 18:17:51 -07:00
Daniel Nelson 43ac7b6e17 Add teamspeak to readme and update changelog 2017-11-01 13:30:43 -07:00
Patric Kanngießer deed04c2f7 Add Teamspeak 3 input plugin (#3315) 2017-11-01 13:27:59 -07:00
Maximilien Richer c569863119 Fix typos in comments (#3415) 2017-10-31 17:00:06 -07:00
Maximilien Richer 8bd21bb537 Add config to input-varnish README (#3414) 2017-10-31 16:58:45 -07:00
Daniel Nelson c849d88253 Clarify what it means to filter metrics from processors 2017-10-30 16:32:39 -07:00
Daniel Nelson d7857eea1a Update changelog 2017-10-30 15:35:34 -07:00
Daniel Nelson 97a1e4e706 Use explicit schemas in mqtt_consumer input (#3401) 2017-10-30 15:33:20 -07:00
Daniel Nelson ed52baf5dd Update changelog 2017-10-30 13:53:45 -07:00
Aditya C S 78df21d6a2 Add support for SSL settings to ElasticSearch output plugin (#3406) 2017-10-30 13:52:40 -07:00
Aditya C S f205c1b5e1 Update docker plugin README (#3404) 2017-10-30 12:26:39 -07:00
Daniel Nelson 8d8acb0f86 Update changelog 2017-10-27 11:55:17 -07:00
Maximilien Richer 626ff99b6e Add instance name option to varnish plugin (#3398)
This change add a new configuration option to allow probing of
namespaced varnish instances, usually reached using the '-n' switch on
the varnish cli.
2017-10-27 11:53:59 -07:00
Daniel Nelson b78b8f2b50 Update changelog 2017-10-26 13:37:54 -07:00
Vladimir S ba551c4bb0 Perform DNS lookup before ping (#3385) 2017-10-26 13:35:37 -07:00
Daniel Nelson 957740f64f Update changelog 2017-10-25 15:29:56 -07:00
Daniel Nelson ccea33ff28 Gather concurrently from snmp agents (#3365) 2017-10-25 15:28:55 -07:00
Daniel Nelson db8b7e2e17 Set release date for 1.4.3 2017-10-25 14:15:10 -07:00
Daniel Nelson e29aa45091 Update changelog 2017-10-24 16:31:22 -07:00
Jeremy Doupe 43d69d805d Add history and summary types to telegraf and prometheus plugins (#3337) 2017-10-24 16:28:52 -07:00
Daniel Nelson 76fbe598a7 Update changelog 2017-10-24 16:25:49 -07:00
Daniel Nelson 71480f8cee Use golang.org/x/sys/unix instead of syscall in diskio (#3384) 2017-10-24 16:22:31 -07:00
Daniel Nelson db9c0643de Update changelog 2017-10-24 15:46:47 -07:00
Daniel Nelson e19692dd60 If the connector name cannot be unquoted, use the raw value (#3371) 2017-10-24 15:36:23 -07:00
Daniel Nelson 67fbc67906 Update changelog 2017-10-23 12:36:31 -07:00
Sergei Smolianinov b4a0edf5ba Fix ACL token usage in consul input plugin (#3376) 2017-10-23 12:31:27 -07:00
Daniel Nelson db5769136c Update changelog 2017-10-19 17:06:14 -07:00
Daniel Nelson bb6d50f011 Add support for decimal timestamps to ts-epoch modifier (#3358) 2017-10-19 16:36:32 -07:00
Daniel Nelson 2ad9183e47 Update changelog 2017-10-19 16:27:29 -07:00
Daniel Nelson 69591e1fab Remove warning when JSON contains null value (#3359) 2017-10-19 16:25:58 -07:00
Mamat Rahmat 46d489c632 Fix small typo in documentation (#3364) 2017-10-19 14:47:40 -07:00
Daniel Nelson 6c587849ce Update changelog 2017-10-18 17:43:01 -07:00
Daniel Nelson 7ef88c4bf9 Use labels in prometheus output for string fields (#3350) 2017-10-18 17:42:30 -07:00
Daniel Nelson a6de4577b0 Update changelog 2017-10-18 14:53:34 -07:00
Daniel Nelson 01cfe1d505 Fix prometheus passthrough for existing value types (#3351) 2017-10-18 14:51:08 -07:00
Daniel Nelson 66376d54a2 Update changelog 2017-10-18 13:57:58 -07:00
clheikes 0fe8ed9ce3 Fix TELEGRAF_OPTS expansion in systemd service unit (#3354) 2017-10-18 13:57:32 -07:00
Daniel Nelson d68302b153 Update changelog 2017-10-18 12:47:58 -07:00
Daniel Nelson 45b92ded5f Update changelog 2017-10-18 12:25:46 -07:00
Ayrdrie ca3c03f710 Fix mongodb input panic when restarting mongodb (#3355) 2017-10-18 12:24:30 -07:00
Daniel Nelson fc9cbd3906 Add release date info to FAQ 2017-10-17 10:43:53 -07:00
Daniel Nelson 057b1557bc Update changelog 2017-10-16 14:26:12 -07:00
Pierre Fersing 7efce41d39 Fix CPU system plugin gets stuck after system suspend (#3342) 2017-10-16 14:25:00 -07:00
Daniel Nelson d739aa448d Update changelog 2017-10-16 14:19:16 -07:00
Craig Wickesser cc47382da0 Add UDP IPv6 support to statsd input (#3344) 2017-10-16 14:18:36 -07:00
Daniel Nelson b73f493f98 Update changelog 2017-10-16 11:27:00 -07:00
Daniel Nelson 7ff6cdd017 Fix case sensitivity issue in sqlserver query (#3336) 2017-10-16 11:26:16 -07:00
Daniel Nelson c8a5598b59 Fix typo in ipmi_sensor readme 2017-10-16 11:10:06 -07:00
Daniel Nelson d6ad16f431 Add ipmi_sensor permission documentation 2017-10-13 13:53:18 -07:00
Daniel Nelson fc1309c8fb Remove timing sensitive riemann test 2017-10-13 11:30:30 -07:00
Daniel Nelson f8546397d1 Update changelog 2017-10-13 11:12:27 -07:00
Adam Johnson 52a1f98695 Fix cloudwatch output requires unneeded permissions (#3335) 2017-10-13 11:04:40 -07:00
Daniel Nelson 86dd509ab6 Update changelog 2017-10-12 17:26:58 -07:00
Windkit Li 6a433f97e4 Fix snmpwalk address format in leofs input (#3328) 2017-10-12 17:26:14 -07:00
Daniel Nelson 943296a31d Update changelog 2017-10-12 15:52:01 -07:00
Daniel Nelson 4729c5697a Fix container name filters in docker input (#3331) 2017-10-12 15:50:09 -07:00
Patrick Hemmer 43a2b49ad1 Fix documented equation for diskio average queue depth (#3334) 2017-10-12 15:08:51 -07:00
Daniel Nelson d3264d9746 Remove suggested plugins from readme.
These are confusing since we don't support all of the examples.
2017-10-11 12:56:33 -07:00
Daniel Nelson b2365a7ebf Set 1.4.2 release date 2017-10-10 13:29:31 -07:00
Daniel Nelson be54556980 Update readme and changelog for basicstats aggregator 2017-10-10 12:04:41 -07:00
Toni Moreno 2901652bf9 Add new basicstats aggregator (#2167) 2017-10-10 12:02:01 -07:00
Pierre Tessier 81acd6e8a6 Fix link for wavefront plugin in changelog (#3317) 2017-10-10 11:21:46 -07:00
Daniel Nelson c49eda6810 Use 5 second timeout overhead when waiting for ping to complete 2017-10-09 15:09:07 -07:00
Daniel Nelson 6ed5dc444d Add HasPoint method to testutil.Accumulator 2017-10-09 15:02:57 -07:00
Daniel Nelson abfeafd248 Document /etc/default/telegraf file 2017-10-06 16:57:57 -07:00
Daniel Nelson 6618850cd7 Update changelog 2017-10-06 16:17:09 -07:00
Christian Meilke 4783b872ea Add ability to limit node stats in elasticsearch input (#3304) 2017-10-06 16:16:32 -07:00
Daniel Nelson 1cb21d2bda Use golang 1.9.1 2017-10-05 16:19:53 -07:00
Daniel Nelson 5d4507e1f4 Update changelog 2017-10-05 16:15:43 -07:00
Daniel Nelson bb448d5af7 Use chunked transfer encoding in InfluxDB output (#3307) 2017-10-05 16:14:21 -07:00
Daniel Nelson 852a419fa5 Update changelog 2017-10-05 16:05:51 -07:00
Daniel Nelson e7bbb66957 Fix panic in cpu input if number of cpus changes (#3306) 2017-10-05 16:02:21 -07:00
Daniel Nelson df4c24a01e Release buffer back to pool earlier 2017-10-05 12:12:14 -07:00
Daniel Nelson fe40bcb92e Update changelog 2017-10-04 15:30:11 -07:00
Christian Meilke 4acee14f8a Add cluster health level configuration to elasticsearch input (#3269) 2017-10-04 15:29:32 -07:00
Daniel Nelson ed9c2ccfa2 Add smart to changelog and readme 2017-10-04 15:18:15 -07:00
Rickard von Essen 3be58c6571 Add smart input plugin for collecting S.M.A.R.T. data (#2449) 2017-10-04 15:15:58 -07:00
Daniel Nelson 6b1d3edf6e Update changelog 2017-10-03 15:25:19 -07:00
Daniel Nelson bfd3a3ac13 Add support for proxy environment variables to http_response (#3302) 2017-10-03 15:22:57 -07:00
Daniel Nelson 25fb040f3e Update changelog 2017-10-03 14:37:02 -07:00
Aditya C S 308f4af40f Collect Docker Swarm service metrics in docker input plugin (#3141) 2017-10-03 14:36:26 -07:00
Daniel Nelson 9257f3b148 Skip invalid urls in nginx input 2017-10-03 10:54:31 -07:00
David Norton 68cd7a45ef bug fixes and refactoring 2017-10-03 09:07:15 -04:00
Pierre Tessier ea11fae57b Added newline to each metric line in wavefront output (#3290) 2017-10-02 17:42:21 -07:00
Daniel Nelson 2a840cf3de Update changelog 2017-10-02 17:39:32 -07:00
Jimena Cabrera Notari 5ca10ac5fe Add extra wired tiger cache metrics to mongodb input (#3281) 2017-10-02 17:38:51 -07:00
Daniel Nelson af36d5a7e7 Update changelog 2017-10-02 17:16:38 -07:00
Daniel Nelson fba3d66681 Fix case sensitivity error in sqlserver input (#3287) 2017-10-02 17:15:34 -07:00
Daniel Nelson 9b7fe6ce99 Regenerate TLS certs due to expiration 2017-10-02 15:44:55 -07:00
David G. Simmons 8b67272c7d New Particle Plugin 2017-10-02 16:50:23 -04:00
Daniel Nelson 63e898c058 Fix mqtt_consumer connection_timeout test 2017-10-02 12:28:31 -07:00
Daniel Nelson fcfa7ed7bf Add Wavefront output to changelog and readme 2017-09-29 16:15:48 -07:00
Pierre Tessier 8355f941f9 Add Wavefront output plugin (#3160) 2017-09-29 16:13:08 -07:00
Daniel Nelson fbb1cd0903 Update example config 2017-09-29 16:09:31 -07:00
Daniel Nelson e6912e1cd5 Add deprecation notice to jolokia sample config 2017-09-29 16:08:31 -07:00
Daniel Nelson 1b1840e939 Update changelog 2017-09-29 15:59:56 -07:00
Daniel Nelson f7f699995a Fix format of connection_timeout in mqtt_consumer (#3286) 2017-09-29 15:58:38 -07:00
Daniel Nelson b0b1e43f92 Document how to exclude kubernetes annotation 2017-09-29 14:07:19 -07:00
Daniel Nelson 742eafce33 Update changelog 2017-09-29 11:50:15 -07:00
François de Metz 945eabc273 Add support for the rollbar occurrence webhook event. (#1692) 2017-09-29 11:49:22 -07:00
David G. Simmons 7dfdc9304e Revert "New Particle.io Plugin for Telegraf"
This reverts commit c3b11f9cfb.
Accidentally pushed to master, instead of my fork. Backing it out.
2017-09-29 12:57:13 -04:00
David G. Simmons 520929a015 New Particle.io Plugin for Telegraf
Only the tests need to be fixed.
2017-09-29 12:45:06 -04:00
Daniel Nelson 5b1fada643 Update changelog 2017-09-27 11:38:43 -07:00
Daniel Nelson d0f6b4d6b2 Use underscore as default opentsdb seperator
Preserves backwards compatibility
2017-09-27 11:36:41 -07:00
owlet123 f634414935 Add configurable separator for metrics and fields in opentsdb output (#3106) 2017-09-27 11:29:40 -07:00
Daniel Nelson 084f73c0ea Add deprecation notice to jolokia plugin 2017-09-27 10:52:10 -07:00
Daniel Nelson 82a65fd70e Update changelog and readme for jolokia2 plugin 2017-09-26 17:42:38 -07:00
Dylan Meissner be8dd9e384 Add redesigned Jolokia input plugin (#2278) 2017-09-26 17:34:46 -07:00
Daniel Nelson 6addc388d5 Update changelog 2017-09-26 16:03:04 -07:00
Daniel Nelson 596c0f97d7 Allow JSON data format to contain zero metrics (#3268) 2017-09-26 15:58:33 -07:00
Daniel Nelson b167667432 Update changelog 2017-09-26 15:38:22 -07:00
Daniel Nelson 5a66090264 Fix parsing of JSON with a UTF8 BOM in httpjson (#3267) 2017-09-26 15:36:00 -07:00
Daniel Nelson 9938d5f2e3 Update changelog 2017-09-26 15:28:07 -07:00
Daniel Nelson ba270887fd Fix dmcache tests with 32bit int 2017-09-26 15:25:57 -07:00
Daniel Nelson 0d966ed5f7 Fix cgroup tests with 32bit int 2017-09-26 15:25:57 -07:00
Daniel Nelson 1861f0f752 Fix ceph tests with 32bit int 2017-09-26 15:25:57 -07:00
Daniel Nelson 2bd16ab923 Fix nginx_plus tests with 32bit int 2017-09-26 15:25:57 -07:00
Daniel Nelson 3398c43c75 Allow 64bit integers in kernel_vmstat 2017-09-26 15:25:57 -07:00
Daniel Nelson ffb04761d2 Set 1.4.1 release date in changelog 2017-09-26 14:19:02 -07:00
Daniel Nelson 3d0492b8b0 Update changelog 2017-09-25 16:34:04 -07:00
Lukasz Jagiello 77c7b6bee5 Add support for NSQLookupd to nsq_consumer (#3215) 2017-09-25 16:33:05 -07:00
James 4ccef6f99e Add additional numeric type handling tests for postgresql_extensible (#3066) 2017-09-25 10:58:10 -07:00
Agniva De Sarker c50e66e18f Improve statsd plugin perf by using a byte buffer pool (#3254) 2017-09-25 10:55:02 -07:00
Daniel Nelson 48edce0170 Remove nightly versioning scheme 2017-09-22 18:07:08 -07:00
Daniel Nelson 66b23ab505 Remove out of date Vagrantfile 2017-09-22 17:35:58 -07:00
Daniel Nelson 7b41da4ddd Fix build versioning; add dev.docker file 2017-09-22 17:35:58 -07:00
Daniel Nelson db787f9a83 Fix golang version 2017-09-22 17:35:58 -07:00
Daniel Nelson e4f429572d Fix unittest for golang 1.9 2017-09-22 17:35:58 -07:00
Christian Meilke b63f2a1acf Tag original URL for k8s services in prometheus input (#3257) 2017-09-22 17:26:19 -07:00
Daniel Nelson 53522f8eb2 Update changelog 2017-09-22 11:46:47 -07:00
Daniel Nelson 82f760e18d Unlock Statsd when stopping to prevent deadlock (#3258) 2017-09-22 11:45:45 -07:00
Daniel Nelson 4a406780c6 Add nginx_plus to changelog and readme 2017-09-19 11:49:55 -07:00
Patrick O'Brien d3cb050d8f Add new nginx_plus input plugin (#3214) 2017-09-19 11:46:01 -07:00
Daniel Nelson c0fa37bb03 Update changelog 2017-09-19 11:27:57 -07:00
Paulo Cabido 43e2400612 Add configurable metrics endpoint to prometheus output (#3245) 2017-09-19 11:27:11 -07:00
Daniel Nelson cf69a97ae0 Build with go 1.9 on circleci 2017-09-18 16:30:09 -07:00
Daniel Nelson bf3fdbd841 Update prometheus input documentation 2017-09-18 16:21:45 -07:00
Daniel Nelson 5eff12a840 Update changelog 2017-09-18 15:07:18 -07:00
Christian Meilke af7710dcb8 Add support for k8s service DNS discovery to prometheus input (#3236) 2017-09-18 15:06:11 -07:00
Daniel Nelson c47cc28950 Update changelog 2017-09-18 14:25:17 -07:00
Daniel Nelson 126bd1f38f Fix arm64 packages contain 32-bit executable (#3246) 2017-09-18 14:22:54 -07:00
Patrick Hemmer 19084f8fb6 Add process resource limits to procstat input (#3231) 2017-09-15 11:16:44 -07:00
Daniel Nelson c2383e9d78 Update input plugin example readme. 2017-09-14 15:50:55 -07:00
Daniel Nelson dc6fbed954 Update changelog 2017-09-14 15:28:47 -07:00
Trevor Pounds 3468ffc40f Fix panic in statsd p100 calculation (#3230) 2017-09-14 15:27:42 -07:00
Daniel Nelson 279fa20b4a Update changelog 2017-09-14 15:22:46 -07:00
Trevor Pounds 3073221f7d Add support for timing sums in statsd input (#3234) 2017-09-14 15:21:54 -07:00
Daniel Nelson f2294c7f2c Update changelog 2017-09-14 15:05:03 -07:00
Mark Wilkinson - m82labs 53cb9ffb31 Fix duplicate keys in perf counters sqlserver query (#3175) 2017-09-14 15:04:13 -07:00
Daniel Nelson 5fd6b64d10 Update changelog 2017-09-14 15:00:55 -07:00
Daniel Nelson 9ce131dfc0 Fix skipped line with empty target in iptables (#3235) 2017-09-14 14:59:28 -07:00
Daniel Nelson dc12ac391b Update changelog 2017-09-14 13:06:58 -07:00
Trevor Pounds ef7ce31f9c Fix counter and gauge metric types. (#3232) 2017-09-14 13:05:37 -07:00
Daniel Nelson d557cbd103 Update changelog 2017-09-13 17:31:39 -07:00
Daniel Nelson 8f9ede301d Whitelist allowed char classes for opentsdb output. (#3227) 2017-09-13 17:30:52 -07:00
Daniel Nelson 3727d2d470 Update changelog 2017-09-13 17:28:33 -07:00
Dimitris Rozakis 9cb7b2c249 Respect path prefix in influx output uri (#3224) 2017-09-13 17:27:01 -07:00
Daniel Nelson f87f44832b Fix fluentd test 2017-09-12 17:57:55 -07:00
Daniel Nelson 0a6aa238b9 Update changelog 2017-09-12 17:27:50 -07:00
Daniel Nelson 1e16cb463a Remove unneeded error check 2017-09-12 17:24:57 -07:00
Adrián López 2dc18135cf Add timeout option for kubernetes (#3211) 2017-09-12 17:22:15 -07:00
Daniel Nelson 6aa88c7113 Update changelog 2017-09-12 17:17:41 -07:00
Daniel Nelson aa5c5dfd87 Fix optional field types in fluentd input 2017-09-12 17:15:19 -07:00
Daniel Nelson a33a8fdc31 Update changelog 2017-09-11 12:27:39 -07:00
DanKans f5d400a1ce Fix MQTT input exits if Broker is not available on startup (#3202) 2017-09-11 12:24:51 -07:00
Daniel Nelson 8a82073891 Update changelog 2017-09-11 11:57:18 -07:00
Daniel Nelson b06e2a0c3d Add polling method to logparser and tail inputs (#3213) 2017-09-11 11:56:04 -07:00
Daniel Nelson cb40972635 Update changelog 2017-09-11 11:54:18 -07:00
DanKans 5079187fde Fix address already in use with webhooks input during reload (#3206) 2017-09-11 11:51:45 -07:00
Daniel Nelson 8e333492f8 Update changelog 2017-09-08 16:02:15 -07:00
Jeff Nickoloff ea6acff175 TLS and MTLS enhancements to HTTPListener input plugin (#3191) 2017-09-08 16:01:16 -07:00
Daniel Nelson 4fe8efd0e4 Update changelog 2017-09-08 15:36:26 -07:00
Daniel Nelson 5920cc9571 Add support for standard proxy env vars in outputs. (#3212) 2017-09-08 15:35:20 -07:00
Daniel Nelson c4e9c8cbf2 Fix short tests on darwin (#3099) 2017-09-08 13:03:37 -07:00
Daniel Nelson 225b7d90d8 Update changelog 2017-09-06 14:29:03 -07:00
Raúl Benencia f685837519 Include mount mode option in disk metrics (#3027) 2017-09-06 14:28:11 -07:00
Daniel Nelson 37bad29dc5 Update changelog 2017-09-06 14:20:38 -07:00
Daniel Nelson 359302a742 Fix typo 2017-09-06 14:19:42 -07:00
Pavel Gurkov 843e6ac044 Add Kafka output plugin topic_suffix option (#3196) 2017-09-06 14:18:26 -07:00
Daniel Nelson df9bb7278b Add 1.4.0 release date 2017-09-05 17:14:11 -07:00
Daniel Nelson d7f95a9dc9 Improve question title in FAQ 2017-09-05 17:12:36 -07:00
Daniel Nelson 4ec13c1b8e Add FAQ doc with dns resolver information 2017-09-05 13:12:11 -07:00
Daniel Nelson cd40999225 Use ip address for default InfluxDB ip in config
Helps with initial setup if localhost cannot be resolved due to the pure
go resolver.
2017-09-05 12:55:21 -07:00
Daniel Nelson 16edaa0ae9 Sort metrics before comparing in graphite test 2017-09-05 12:50:30 -07:00
rdxmb 7f71b10080 Fix docker image name in docs (#3193) 2017-09-05 11:44:51 -07:00
Daniel Nelson 28d41380a3 Update changelog 2017-08-31 13:43:47 -07:00
Daniel Nelson bdf5e4534d Fix panic when handling string fields with escapes (#3188) 2017-08-30 21:16:37 -07:00
Daniel Nelson f53b788987 Update changelog 2017-08-29 16:27:02 -07:00
Daniel Nelson 87d08e25fd Convert bool fields to int in graphite serializer 2017-08-29 16:22:03 -07:00
Seua Polyakov 9357059aef Skip non-numerical values in graphite format (#3179) 2017-08-29 15:59:38 -07:00
Daniel Nelson 2af953c128 Move changelog item to 1.4 2017-08-28 17:17:03 -07:00
Daniel Nelson 7811eeea83 Update changelog 2017-08-28 17:08:44 -07:00
Jeff Nickoloff 6c6dd69b74 Added CloudWatch metric constraint validation (#3183) 2017-08-28 16:56:03 -07:00
Daniel Nelson 50ba31a161 Update changelog 2017-08-28 16:30:51 -07:00
Nevins 77c6089634 Add support for sharding based on metric name (#3170) 2017-08-28 16:24:38 -07:00
Daniel Nelson 914a8132b5 Update changelog 2017-08-28 16:11:00 -07:00
Dylan Meissner 701b34dc55 HTTP headers can be added to InfluxDB output (#3182) 2017-08-28 16:08:50 -07:00
Daniel Nelson d21fbf14f4 Update changelog 2017-08-25 18:08:33 -07:00
Ashton Kinslow 5be114ca6f Fix NSQ input plugin when used with version 1.0.0-compat 2017-08-25 18:06:48 -07:00
Daniel Nelson e99099f49c Close response bodies in http_listener test 2017-08-25 13:58:45 -07:00
Daniel Nelson 97d9c0c430 Update changelog 2017-08-25 12:59:19 -07:00
Rickard von Essen c77b8f2e77 Don't fail parsing of zpool stats if pool health is UNAVAIL on FreeBSD (#3149) 2017-08-25 12:57:35 -07:00
Daniel Nelson 9d65670d19 Update changelog 2017-08-25 11:55:59 -07:00
Jan Willem Janssen f0f77486a5 Fix parsing of SHM remotes in ntpq input (#3163) 2017-08-25 11:54:06 -07:00
Daniel Nelson ae1611aecc Update fail2ban documentation 2017-08-25 11:42:07 -07:00
Daniel Nelson 9c792c26e3 Fix amqp_consumer data_format documentation
closes #3164
2017-08-24 13:17:29 -07:00
Daniel Nelson d61b509ade Add links to nightly builds 2017-08-23 15:42:25 -07:00
Daniel Nelson 236d5e0ec9 Update changelog 2017-08-23 15:21:48 -07:00
Daniel Nelson 6caa896cb1 Escape backslash within string fields (#3161) 2017-08-23 15:17:26 -07:00
Daniel Nelson 7fdb8d3e4f Update changelog 2017-08-23 15:16:04 -07:00
Rickard von Essen 8edebf65e9 Enable hddtemp on all platforms (#3153)
Also disables dmcache tests on non-linux.
2017-08-23 15:14:32 -07:00
Daniel Nelson ca690fe48d Update changelog 2017-08-22 17:03:00 -07:00
Daniel Nelson ec44f8bbab Don't start Telegraf on install in Amazon Linux (#3156) 2017-08-22 17:01:59 -07:00
Daniel Nelson 6efb52e22c Update changelog 2017-08-22 16:55:15 -07:00
Daniel Nelson 5520f662ba Don't retry points beyond retention policy (#3155) 2017-08-22 16:52:26 -07:00
Daniel Nelson fcd5e5d9e3 Hide output of git describe 2017-08-22 13:32:52 -07:00
Rickard von Essen 5dd9031830 Enable fail2ban on all platforms (#3151) 2017-08-22 12:58:00 -07:00
Rickard von Essen 35ccafaaad Enable chrony for all platforms (#3152) 2017-08-22 11:49:51 -07:00
Daniel Nelson 6339228e44 Update config directory documentation 2017-08-22 11:33:26 -07:00
Daniel Nelson 77ae5dacc6 Cache intermediate objects during build 2017-08-21 17:26:55 -07:00
Chris Goller d50e20f323 Add JSON input support to zipkin plugin (#3150) 2017-08-21 17:24:54 -07:00
Daniel Nelson bbc4a82e9f Add win_services to the readme 2017-08-18 17:57:30 -07:00
Daniel Nelson 580fd73468 Update histogram aggregator documentation (#3133) 2017-08-18 13:24:05 -07:00
Daniel Nelson d5bfc683fe Remove version test 2017-08-18 11:08:48 -07:00
Daniel Nelson 36b1ad979d Update example config 2017-08-17 18:54:06 -07:00
Daniel Nelson 2ec1f63ae6 Add queues to rabbitmq documentation (#3135) 2017-08-17 18:52:27 -07:00
Daniel Nelson d008ffca1a Update master for 1.5 development 2017-08-16 16:54:15 -07:00
Daniel Nelson c267b8d617 Update sample config 2017-08-16 16:46:40 -07:00
Daniel Nelson 7d634a8134 Add tomcat input to changelog and readme 2017-08-16 15:36:56 -07:00
Daniel Nelson 3aa8e61e21 Add error status handle to tomcat input 2017-08-16 15:33:47 -07:00
mlindes cd52747ced Add tomcat input plugin (#3112) 2017-08-16 15:33:20 -07:00
Daniel Nelson eb0b2feee1 Update changelog 2017-08-16 12:26:00 -07:00
Daniel Nelson 9b99dcac64 Discard logging from tail library (#3128) 2017-08-16 12:06:07 -07:00
Daniel Nelson cc67c3f4de Allow using system plugin in Windows (#3127) 2017-08-16 12:05:46 -07:00
Daniel Nelson 733dcf6c65 Remove log message on ping timeout (#3126) 2017-08-16 11:59:41 -07:00
Daniel Nelson 0d6aca44fc Move http_response headers to end of configuration.
If the subtable comes before other options, they will be placed in the
subtable.
2017-08-15 11:50:08 -07:00
Daniel Nelson 064071ba51 Test for nil metric before reading tags in logparser 2017-08-15 11:43:16 -07:00
Daniel Nelson 71c567fb87 Update changelog 2017-08-14 14:51:28 -07:00
Bob Shannon 67fe167b79 Add gzip content-encoding support to influxdb output (#2978) 2017-08-14 14:50:15 -07:00
Daniel Nelson 68e6841a5c Improve apache input docs (#3120) 2017-08-11 17:50:51 -07:00
Daniel Nelson d0cc7d9d2f Use double hyphen in cli examples 2017-08-11 16:26:54 -07:00
Daniel Nelson d5cf0bc25f Merge LDFLAGS from env into build 2017-08-11 16:26:54 -07:00
G-Research 45228c0bcd Build NTPQ input on Windows (#3117) 2017-08-11 13:36:25 -07:00
Patrick Hemmer 585fbb6f0d Add weighted_io_time to diskio input (#3119) 2017-08-11 11:49:42 -07:00
Daniel Nelson 51c9c11505 Update changelog 2017-08-10 12:41:09 -07:00
Daniel Nelson 8da8608fb9 Skip compilcation of logparser and tail on solaris (#3113)
Allows compilation for solaris
2017-08-10 12:36:11 -07:00
Daniel Nelson 6911c474ef Update changelog 2017-08-10 10:22:11 -07:00
Daniel Nelson 294b7322b4 Converge to typed value in prometheus output (#3104) 2017-08-10 10:19:28 -07:00
Daniel Nelson b1eb240b18 Update changelog 2017-08-09 11:48:36 -07:00
Daniel Nelson e7e3be6d18 Tweak formatting of varnish README 2017-08-09 11:48:12 -07:00
Daniel Nelson fb2af61743 Fix ordering of all target 2017-08-09 11:47:55 -07:00
Benjamin Stromski 83c9b8ad9c Add option to run varnish under sudo (#3097) 2017-08-09 11:38:54 -07:00
Daniel Nelson 6f0ff514c9 Add diskio %util sample query 2017-08-09 11:28:27 -07:00
Seva Poliakov ad2b962563 Remove tag_env duplicate from docker README (#3109) 2017-08-09 10:21:22 -07:00
Daniel Nelson faa3606262 Update changelog and readme 2017-08-08 11:50:16 -07:00
Rodolphe Blancho 2ef93a10e0 Add salesforce input plugin (#3075) 2017-08-08 11:48:01 -07:00
Slawomir Skowron b9b5b74ede Add TCP listener for statsd input (#2293) 2017-08-08 11:41:26 -07:00
Daniel Nelson 6ebb93abcc Remove Godeps_windows from build.py 2017-08-07 17:43:06 -07:00
Daniel Nelson 88a11e6a67 Fix artifact redirection 2017-08-07 17:41:52 -07:00
Daniel Nelson 6cbc90da50 Only upload nightly if on master branch 2017-08-07 17:24:35 -07:00
Daniel Nelson c1c85f4330 Upload as nightly builds if PACKAGE set 2017-08-07 17:16:34 -07:00
Daniel Nelson 1dcca00e3d Update changelog 2017-08-07 16:18:01 -07:00
Daniel Nelson d9ddf7bfd0 Add path tag to logparser containing path of logfile (#3098) 2017-08-07 16:16:31 -07:00
Daniel Nelson 07cda8903a Build releases with -w -s ldflags 2017-08-07 15:47:20 -07:00
Daniel Nelson 6efd5a0b67 Update changelog 2017-08-07 14:39:22 -07:00
Vlasta Hajek 09b1f7e468 Add Windows Services input plugin (#3023) 2017-08-07 14:36:15 -07:00
Daniel Nelson 2a106be2b8 Cleanup Makefile (#3089) 2017-08-03 11:54:05 -07:00
Daniel Nelson 4fe36a9caf Update changelog and readme 2017-08-02 18:02:41 -07:00
Daniel Nelson 7611b40b7e Add Zipkin input plugin (#3080) 2017-08-02 17:58:26 -07:00
Daniel Nelson 008d0e724f Update precision documentation and examples
Precision is no longer used by the InfluxDB output.

closes #3079
2017-08-01 15:02:36 -07:00
Daniel Nelson 7b467a47a7 Add Appveyor continuous integration (#3074) 2017-07-31 16:12:09 -07:00
Daniel Nelson 0802a7bfc8 Update changelog 2017-07-31 11:37:32 -07:00
Vladislav Mugultyanov 265a558958 Add histogram aggregator plugin (#2387) 2017-07-31 11:33:51 -07:00
Daniel Nelson 9dad79eeb4 Update changelog 2017-07-31 11:30:27 -07:00
DanKans 4c84e5996c Sanitize password from couchbase metric (#3033) 2017-07-31 11:29:14 -07:00
Jeff Ashton cb56269c8b Fix win_perf_counters tests (#3068) 2017-07-31 11:03:26 -07:00
Oscar Sironi 26ca13849d Add config file path troubleshooting advice for Windows (#3071) 2017-07-31 10:58:12 -07:00
Daniel Nelson 896422e4ee Update changelog 2017-07-27 17:19:33 -07:00
Daniel Nelson 4bfeb1512e Add tls options to docker input (#3063) 2017-07-27 17:18:44 -07:00
Daniel Nelson 3fc5a18c18 Update changelog 2017-07-27 15:21:52 -07:00
Daniel Nelson 25df7dcd8d Allow iptable entries with trailing text (#3060) 2017-07-27 15:21:06 -07:00
Daniel Nelson 2cae419783 Update changelog 2017-07-27 15:15:11 -07:00
Daniel Nelson eb609f553f Fix docker memory and cpu reporting in Windows (#3043) 2017-07-27 15:12:29 -07:00
Daniel Nelson 539c340c4d Add circleci parameter to build packages 2017-07-26 17:13:50 -07:00
Daniel Nelson 98b5b906f8 Set 1.3.5 release date 2017-07-26 15:53:49 -07:00
Daniel Nelson 65d578c488 Update changelog 2017-07-25 17:12:45 -07:00
Daniel Nelson 742f18e8bf Default to localhost if zookeeper has no servers set (#3056) 2017-07-25 17:08:32 -07:00
Daniel Nelson a3307476c7 Fix panic in logparser if file cannot be opened (#3055) 2017-07-25 17:08:03 -07:00
Daniel Nelson 60e0a839f2 Add redis_version field to redis input (#3054) 2017-07-25 17:07:43 -07:00
Daniel Nelson 7b5e5ed980 Update changelog 2017-07-25 16:09:48 -07:00
Daniel Nelson daf357e7b2 Update changelog 2017-07-25 15:43:13 -07:00
Daniel Nelson a5c0dac37f Fix prometheus output cannot be reloaded (#3053) 2017-07-25 15:41:18 -07:00
xin053 f179cdbec7 Correct spelling of toml field in mysql input (#3051) 2017-07-25 10:57:27 -07:00
Théophile Helleboid - chtitux cc52711e1e Fix typo in postgresql_extensible/README.md (#3052) 2017-07-25 10:39:14 -07:00
Daniel Nelson f486cc99cc Update changelog 2017-07-24 18:26:29 -07:00
Daniel Nelson 6ae58430cd Start first aggregator period at startup time (#3050)
Fixes issue where metrics collected immediately after startup would not
be aggregated.
2017-07-24 18:25:05 -07:00
Oskar 6396e3409d Fix go vet under windows (#3046) 2017-07-24 12:36:33 -07:00
Daniel Nelson 41c99827e1 Update changelog 2017-07-21 16:57:28 -07:00
Daniel Nelson 4adc6cafd0 Add network option to dns_query (#3042) 2017-07-21 16:56:08 -07:00
Daniel Nelson b3fa28e449 Update changelog 2017-07-21 15:46:22 -07:00
Andy Cobaugh 9fb8cf6319 Add input plugin for OpenLDAP (#2612) 2017-07-21 15:44:20 -07:00
Daniel Nelson 06a285bb5e Update changelog 2017-07-21 14:31:25 -07:00
Daniel Nelson e822d22565 Don't match pattern on any error (#3040)
This prevents a pattern with no wildcards from matching in case
permissions is denied.
2017-07-21 14:28:14 -07:00
Daniel Nelson a6c44e3a00 Update changelog 2017-07-21 14:26:39 -07:00
Yann Cézard b487a36a33 Only report cpu usage for online cpus in docker input (#3035) 2017-07-21 14:25:17 -07:00
Daniel Nelson 6d5a81dafb Document GNU make requirement 2017-07-21 11:15:00 -07:00
Daniel Nelson 22db07e417 Update changelog 2017-07-21 10:57:39 -07:00
Daniel Nelson ac6e10528f Line wrap documentation 2017-07-21 10:57:12 -07:00
DanKans 0bfe723718 Fix filtering when both pass and drop match an item (#3036)
Adjust logic in functions responsible for passing metrics in order to be able
to process them correctly in case where pass and drop are defined together.
2017-07-21 10:53:57 -07:00
Daniel Nelson b3a9fe0502 Update changelog 2017-07-19 13:09:49 -07:00
DanKans 7f3716b2b8 Fix combined tagdrop/tagpass filtering (#3031) 2017-07-19 13:08:40 -07:00
Daniel Nelson 6dd9bf5d1a Switch skipped kafka test 2017-07-18 18:18:57 -07:00
Daniel Nelson 5224e1748d Update download information in readme 2017-07-18 13:54:38 -07:00
Daniel Nelson f2adecd753 Update changelog 2017-07-18 11:03:07 -07:00
DanKans 9a8de6085f Fix ntpq parse issue when using dns_lookup (#3026) 2017-07-18 11:01:08 -07:00
soldierkam 880ff896ac Add read timeout to socket_listener 2017-07-17 18:34:36 -07:00
Daniel Nelson fa086ca5e4 Remove command in example output 2017-07-17 15:08:17 -07:00
Daniel Nelson f8dd52c69d Update changelog 2017-07-17 12:01:35 -07:00
Daniel Nelson 99e5ccee6f Prevent startup if intervals are 0 2017-07-17 11:58:47 -07:00
Daniel Nelson 6edd848e4c Update changelog 2017-07-14 10:45:32 -07:00
Bob Shannon 39a8f73260 Add result_type field to net_response input plugin (#2990) 2017-07-14 10:43:36 -07:00
Daniel Nelson 08bdb780c6 Add credits for new plugins to changelog 2017-07-13 16:14:18 -07:00
Daniel Nelson 6d708d870b Update changelog 2017-07-13 16:00:09 -07:00
DanKans 0ca0e213de Add fluentd input plugin (#2661) 2017-07-13 15:58:20 -07:00
Daniel Nelson 4138e8af08 Update changelog 2017-07-13 15:39:45 -07:00
Daniel Nelson 8567dfe7b1 Prevent possible deadlock when using aggregators (#3016)
Looping the metrics back through the same channel could result in a
deadlock, by using a new channel and locking the processor we can ensure
that all stages can make continual progress.
2017-07-13 15:34:21 -07:00
Daniel Nelson 88037c8a2c Add release date for 1.3.4 2017-07-12 17:15:38 -07:00
Daniel Nelson b201814cea Update changelog 2017-07-12 12:04:43 -07:00
Daniel Nelson c5d49d1863 Prevent Write from being called concurrently (#3011) 2017-07-12 12:03:23 -07:00
Daniel Nelson d21a9316bd Update changelog 2017-07-11 15:55:44 -07:00
Daniel Nelson b086ecee0c Do not allow metrics with trailing slashes (#3007)
It is not possible to encode a measurement, tag, or field whose last
character is a backslash due to it being an unescapable character.
Because the tight coupling between line protocol and the internal metric
model, prevent metrics like this from being created.

Measurements with a trailing slash are not allowed and the point will be
dropped.  Tags and fields with a trailing a slash will be dropped from
the point.
2017-07-11 15:54:38 -07:00
Daniel Nelson c35b4c8f1b Update changelog 2017-07-11 14:10:09 -07:00
JSH 0197a614ed Fix chrony plugin does not track system time offset (#2989) 2017-07-11 14:08:40 -07:00
Daniel Nelson a9d332d673 Update changelog 2017-07-10 19:07:28 -07:00
Daniel Nelson 4780073ba1 Fix handling of escapes within fieldset (#3003)
Line protocol does not require or allow escaping of backslash, the only
requirement for a byte to be escaped is if it is an escapable char and
preceeded immediately by a slash.
2017-07-10 19:05:18 -07:00
Daniel Nelson 05309855e3 Update changelog 2017-07-10 12:23:16 -07:00
Daniel Nelson c928e21462 Update elastic version to 5.0.41 (#2999) 2017-07-10 12:18:56 -07:00
Daniel Nelson 1a1ebc1424 Update dependencies 2017-07-10 12:01:22 -07:00
Wesley Merkel 050fb93e6c Add link to Graylog input to README.md (#2995) 2017-07-10 11:22:37 -07:00
Daniel Nelson 06fc3f66a5 Update changelog 2017-07-05 14:29:59 -07:00
Song Wenhao 89c5fab917 Display error message if prometheus output fails to listen (#2984) 2017-07-05 14:28:44 -07:00
Daniel Nelson 135ce41a59 Update changelog 2017-06-29 16:17:08 -07:00
Aleksey Shirokih 45c1fd1950 Change default prometheus_client port (#2973) 2017-06-29 14:03:42 -07:00
Daniel Nelson daecb88808 Set release date for 1.3.3 2017-06-28 13:05:06 -07:00
Daniel Nelson bd78b8e1b3 Fix build on Windows (#2972) 2017-06-27 16:31:28 -07:00
Daniel Nelson 8bd9ac8697 Use git sha1 as version if not tagged (#2969) 2017-06-27 13:24:06 -07:00
Adam Perlin c30124e192 Fix several bugs in minecraft input (#2970) 2017-06-27 13:14:07 -07:00
Daniel Nelson 6cd958b215 Update changelog 2017-06-26 15:25:06 -07:00
Bob Shannon 35fff50eec Fix panic in elasticsearch input if cannot determine master (#2954) 2017-06-26 15:23:53 -07:00
Daniel Nelson 35f64043a3 Update changelog 2017-06-26 15:15:31 -07:00
Bob Shannon d7af1b797c Add optional usage_active and time_active CPU metrics (#2943) 2017-06-26 15:13:38 -07:00
Daniel Nelson 6f803308c5 Log aerospike field value on error 2017-06-26 14:48:22 -07:00
Daniel Nelson cbd93f0598 Update changelog 2017-06-26 14:31:17 -07:00
vodolaz095 ab876bb525 Add support for RethinkDB 1.0 handshake protocol (#2963)
Allow rethinkdb input plugin to work with RethinkDB 2.3.5+ databases that requires username,password authorization and Handshake protocol v1.0

* remove top level header not required in sample config

* remove top level header not required in sample config
2017-06-26 14:29:48 -07:00
Daniel Nelson f070f1460a Update changelog 2017-06-23 16:56:36 -07:00
Ayrdrie 2ed4a73d73 Add Minecraft input plugin (#2960) 2017-06-23 16:54:12 -07:00
Daniel Nelson e9d25c8e7e Update changelog 2017-06-23 11:13:00 -07:00
MatthewCh cc72af94e6 Support HOST_PROC in processes and linux_sysctl_fs inputs (#2924) 2017-06-23 11:11:33 -07:00
Daniel Nelson 793f12a0b1 Update changelog 2017-06-23 11:04:13 -07:00
Daniel Nelson 87f2b44566 Fix bug parsing default timestamps with modified precision (#2949) 2017-06-23 10:59:04 -07:00
Daniel Nelson 2c61e5d3a5 Use strings.Join in statsd input (#2947) 2017-06-21 16:24:23 -07:00
Daniel Nelson 41b6445eb2 Update changelog 2017-06-21 12:46:57 -07:00
grugrut 1999cd3ba9 Add input plugin for Fail2ban (#2875) 2017-06-21 12:42:13 -07:00
Daniel Nelson a4d18aab81 Update changelog 2017-06-21 12:39:09 -07:00
Daniel Nelson 995be3847a Remove label value sanitization in prometheus output (#2939) 2017-06-21 12:36:29 -07:00
Daniel Nelson 07dbf058ad Update changelog 2017-06-19 11:52:53 -07:00
Eugene Shilin a7e3033646 Add standard SSL options to mysql input (#2933) 2017-06-19 11:42:43 -07:00
Artem Kovardin 31449368c9 More explicit 404 error in cassandra input (#2936) 2017-06-19 11:06:49 -07:00
trastle 0dcede784b Update README for Prometheus Client Output (#2452) 2017-06-19 11:04:08 -07:00
Daniel Nelson 4f095bfc1c Set default ping count in Windows
fixes #2934
2017-06-16 13:39:55 -07:00
Daniel Nelson a375c9ac6b Document that ping_interval is non-linux only 2017-06-16 13:32:04 -07:00
Daniel Nelson 476656a523 Update changelog 2017-06-16 13:18:27 -07:00
Daniel Nelson 6bbed7aa90 Allow dos line endings in tail and logparser (#2920)
Parsing dos line ending delimited line protocol is still illegal in most
cases.
2017-06-16 13:16:48 -07:00
Daniel Nelson 148f81682a Update changelog 2017-06-16 12:06:40 -07:00
Simone Rotondo b68c2d48ef Add HTTP Proxy support to influxdb output (#2929) 2017-06-16 12:05:08 -07:00
Daniel Nelson 2cfe2a3497 Update 1.3.2 release date 2017-06-14 12:16:47 -07:00
Daniel Nelson c270fc4b7b Update changelog 2017-06-13 18:07:12 -07:00
Daniel Nelson 6ebeeef452 Ensure prometheus metrics have same set of labels (#2857) 2017-06-13 18:04:26 -07:00
Daniel Nelson e2ab598a4a Update changelog 2017-06-13 17:19:33 -07:00
Daniel Nelson 868f7abc64 Change node_name to be a tag in aerospike input (#2918) 2017-06-13 17:09:38 -07:00
Daniel Nelson 810f2e74e8 Update changelog 2017-06-13 14:10:33 -07:00
Heston Kan 075066f452 Add min/max response time on linux/darwin to ping (#2908) 2017-06-13 14:09:17 -07:00
Daniel Nelson 26e526df67 Update changelog 2017-06-13 13:44:07 -07:00
Dheeraj Dwivedi a27b074588 Add secure connection support to graphite output (#2602) 2017-06-13 13:42:11 -07:00
Daniel Nelson 4c8ff30611 Update changelog 2017-06-12 18:32:50 -07:00
Daniel Nelson 8a0d1bc283 Update aws-sdk-go dependency to latest release. (#2912) 2017-06-12 18:31:27 -07:00
Daniel Nelson 23794f35cc Update changelog 2017-06-08 16:55:27 -07:00
Daniel Nelson f944bd1feb Fix support for mongodb/leofs urls without scheme (#2900)
This was broken by changes in go 1.8 to url.Parse.  This change allows
the string but prompts the user to move to the correct url string.
2017-06-08 16:52:01 -07:00
Daniel Nelson d2eddec1e8 Update changelog 2017-06-08 13:20:44 -07:00
Matteo Cerutti d7d64a76fe Add wildcard support for container inclusion/exclusion (#2793) 2017-06-08 13:17:31 -07:00
Daniel Nelson d5c7fa206c Skip kafka_consumer_integration_test due to issue on CircleCI 2017-06-07 18:31:52 -07:00
Daniel Nelson 56d08c6b4f Add release note to changelog regarding kafka_consumer 2017-06-07 18:27:12 -07:00
Seuf a24f7a0a05 Add Kafka 0.9+ consumer support (#2487) 2017-06-07 18:22:28 -07:00
Bob Shannon 8e309f864a Add SSL/TLS support to nginx input plugin (#2883) 2017-06-07 17:52:10 -07:00
Daniel Nelson 5f274f1a8e Update changelog 2017-06-07 13:46:06 -07:00
Daniel Nelson f0c10b4012 Fix metric splitting edge cases (#2896)
Metrics needing one extra byte to fit the output buffer would not be split, so we would emit lines without a line ending. Metrics which overflowed by exactly one field length would be split one field too late, causing truncated fields.
2017-06-07 13:37:54 -07:00
Daniel Nelson d3562b7730 Update changelog 2017-06-06 13:55:11 -07:00
Frederick Roth 38b760936d Add result_type field for http_response input (#2814) 2017-06-06 13:39:07 -07:00
Daniel Nelson ef72c5703b Update changelog 2017-06-06 11:56:19 -07:00
Mariusz Brzeski 4bfef75fdb Fix timeout option in Windows ping input sample configuration (#2885) 2017-06-06 11:55:01 -07:00
Daniel Nelson b2d208bb0d Update changelog 2017-06-05 14:47:34 -07:00
Sebastian Borza 1b3c2cdedf Add timezone support to logparser timestamps (#2882) 2017-06-05 14:45:11 -07:00
Daniel Nelson 3e946994ba Update changelog 2017-06-05 12:46:50 -07:00
Daniel Nelson c1da0002c0 Fix udp metric splitting (#2880) 2017-06-05 12:44:29 -07:00
Daniel Nelson 3103fc775d Set 1.3.1 release date 2017-05-31 15:00:31 -07:00
Daniel Nelson e9c233746f Generate sha256 hashes when packaging 2017-05-31 12:29:39 -07:00
Daniel Nelson 928320ae8d Update changelog 2017-05-30 17:40:37 -07:00
Daniel Nelson 25d3f06756 Fix length calculation of split metric buffer (#2869) 2017-05-30 17:38:32 -07:00
Daniel Nelson 3d16d714d0 Update changelog 2017-05-30 11:04:39 -07:00
Steve Nardone e39ebdadd1 Fix panic in mongo input (#2848) 2017-05-30 11:02:26 -07:00
Daniel Nelson 88e4c6335b Update changelog 2017-05-26 12:12:18 -07:00
Matteo Cerutti c8b7739237 MySQL input: log and continue on field parse error (#2855) 2017-05-26 12:09:43 -07:00
Daniel Nelson 5065a6e7f5 Update changelog 2017-05-25 16:20:29 -07:00
Daniel Nelson db5560e2ea Update gopsutil version
fixes #2856
2017-05-25 16:11:49 -07:00
Daniel Nelson a6fb50efd1 Update changelog 2017-05-25 13:39:16 -07:00
Daniel Nelson f3321bc194 Fix influxdb output database quoting (#2851) 2017-05-25 13:25:52 -07:00
Olivier Lambert d3241d5fd2 Add documentation for fetching metrics on Caddy HTTP and Prometheus (#2853) 2017-05-25 13:07:49 -07:00
Sylvain Boily fba7735d96 Documentation privilege requirements for specific procstat metrics (#2787) 2017-05-25 13:06:27 -07:00
Matteo Cerutti c505e015b3 Add timeout option to ipmi_sensor plugin - solves #2817 (#2818) 2017-05-22 13:41:34 -07:00
Daniel Nelson c97f65c100 Remove changelog item from pull request template
Person who merges PR is now expected to update the CHANGELOG.
2017-05-22 12:06:48 -07:00
Daniel Nelson 4e46842546 Update CHANGELOG with fixed issue #1137 2017-05-22 12:01:22 -07:00
Steven Burgart 35004c5170 Fix multiple plugin loading in win_perf_counters (#2800) 2017-05-22 11:58:00 -07:00
Daniel Nelson 0bb9de5fe3 Update dependency license file 2017-05-19 18:03:49 -07:00
Lukasz Jagiello ba74206597 Consul plugin README typo (#2829) 2017-05-19 11:37:31 -07:00
Daniel Nelson bc6c311ffb Update changelog 2017-05-18 18:11:49 -07:00
rsingh2411 f28f166069 Add Docker container environment variables as tags. Only whitelisted #2580 (#2581) 2017-05-18 16:58:34 -07:00
mced 02d40565c6 [enh] set db_version at 0 if query version fails (#2819) 2017-05-18 13:52:56 -07:00
Daniel Nelson 5d7127e4e4 Update changelog for #2815 2017-05-16 17:37:51 -07:00
Timo Mihaljov b66e53a2ac Handle process termination during read from /proc (#2816)
Fixes #2815.
2017-05-16 17:33:35 -07:00
Frederick Roth ec3c27a555 Fixed inconsistency between HasIntField and IntField (#2813) 2017-05-16 15:25:30 -07:00
Daniel Nelson f29cd638c9 Add release date for 1.3.0 2017-05-15 19:52:35 -07:00
Daniel Nelson 4a827243fc Add back the changelog entry for 2141 2017-05-15 12:54:03 -07:00
Daniel Nelson dd0c04b6e2 Only split metrics if there is an udp output (#2799) 2017-05-12 15:34:05 -07:00
Zack Zatkin-Gold 61be19b1f0 Fix telegraf example arguments (#2788)
Many of the examples provided within documentation are using a single
dash for the command line arguments, but the telegraf executable
explicitly has two dashes.

There are also some inconsistencies with the ordering of the command
line argument examples.  I've ordered them so that the examples will
show: config, config-directory, input-filter, test
2017-05-12 15:22:29 -07:00
Sebastian Borza a871b64ac3 split metrics based on UDPPayload size (#2795) 2017-05-12 14:45:50 -07:00
Daniel Nelson 0ed404e7ba Merge branch 'reuse-transport' 2017-05-10 18:19:21 -07:00
Daniel Nelson 1cc7fe7f3d Ensure keep-alive is not used in http_response input.
Using Keep-Alive would change the timing for already established
connections.  Previous to this commit, Keep-Alive worked only when using
a response_string_match due to failure to close the request body.
2017-05-10 14:40:55 -07:00
Daniel Nelson f42768ed2e Update changelog 2017-05-10 13:11:33 -07:00
Daniel Nelson 3381ac8f94 Fix http_response input creation of transport on every gather 2017-05-09 16:23:38 -07:00
Daniel Nelson b6312cf13c Fix prometheus input creation of transport on every gather 2017-05-09 16:21:49 -07:00
Daniel Nelson 9e85002875 Fix apache input creation of transport on every gather. 2017-05-09 16:19:56 -07:00
Daniel Nelson 5e739572f0 Merge branch 'update-readme' 2017-05-09 13:50:19 -07:00
Daniel Nelson ac466b393d Add missing plugins to README 2017-05-09 13:50:12 -07:00
Daniel Nelson c648cf48a2 Update contributing section
Hoping this will encourage more non-plugin contributions.
2017-05-09 13:50:12 -07:00
Adrian Sadłocha b1b0efa546 Improve PostgreSQL plugin documentation (#2777) 2017-05-09 12:58:43 -07:00
Lukasz Jagiello 4133765208 Add support for self-signed certs to InfluxDB input plugin (#2773) 2017-05-08 15:20:24 -07:00
Sylvain Boily 6aa3762049 Systemd does not see all shutdowns as failures (#2716) 2017-05-08 11:48:29 -07:00
Daniel Nelson dd2ef7a67e Update cloudwatch documentation
Mention that some metrics are available only at larger intervals than 5
minutes.  Update dead links to new locations and example config.

closes #1907
2017-05-08 11:31:20 -07:00
Daniel Nelson d9c6543fac Enable s390x builds
closes #2766
2017-05-05 14:39:56 -07:00
Daniel Nelson c7f5d96d0d Add SLES11 support to rpm package (#2768) 2017-05-05 14:29:40 -07:00
Sébastien c7da3f1063 fix systemd path in order to add compatibility with SuSe (#2499) 2017-05-05 14:04:33 -07:00
ceseuron 45d22d942b Fixed sqlserver input to work with case sensitive server collation. (#2749)
Fixed a problem with sqlserver input where database properties are not returned by Telegraf when SQL Server has been set up with a case sensitive server-level collation.

* Added bugfix entry to CHANGELOG.md for sqlserver collation input fix.
2017-05-04 10:47:03 -07:00
Daniel Nelson 3e71a12cea Add 1.4 section to changelog 2017-05-03 17:29:34 -07:00
Daniel Nelson 23cdf12d83 Remove documentation in kafka_consumer for metric_buffer 2017-05-03 11:51:49 -07:00
Damien Krotkine abe736ee8f reflect zookeeper chroot config in readme (#2759) 2017-05-03 11:50:08 -07:00
Daniel Nelson 99888bd614 Return an error if no valid patterns. (#2753) 2017-05-02 14:54:38 -07:00
Alexander Blagoev 1da3e41941 Improve redis input documentation (#2708) 2017-05-02 11:43:07 -07:00
Patrick Hemmer 74dd3be3a5 fix close on closed socket_writer (#2748) 2017-05-02 11:06:49 -07:00
Daniel Nelson 92673a915e Add initial documentation for rabbitmq input. (#2745) 2017-05-01 18:55:48 -07:00
Daniel Nelson ec6ed1303d Don't log error creating database on connect (#2740)
closes #2739
2017-04-28 15:58:46 -07:00
Daniel Nelson 99f3dbcd60 Update telegraf.conf 2017-04-28 13:47:32 -07:00
Daniel Nelson 3d0df7a056 Clarify retention policy option for influxdb output
closes #2696
2017-04-28 13:46:23 -07:00
Daniel Nelson d840bbafbb Clarify retention policy option for influxdb output
closes #2696
2017-04-28 13:40:58 -07:00
Daniel Nelson ade21b0c6c Use go 1.8.1 for CI and Release builds (#2732) 2017-04-27 16:18:11 -07:00
Daniel Nelson b60c8f1899 Fix grammar 2017-04-27 14:59:18 -07:00
Daniel Nelson 786915aa2a Update telegraf.conf 2017-04-27 11:53:32 -07:00
Seuf b23596c232 Added SASL options for ouput kafka plugin (#2721) 2017-04-27 11:50:25 -07:00
Ross McDonald c9921f5cf3 Kapacitor input plugin (#2031) 2017-04-27 11:47:22 -07:00
Daniel Nelson d4a13c88fd Fix logfile documentation 2017-04-27 11:38:49 -07:00
Daniel Nelson 78d31992d7 Update haproxy README 2017-04-27 11:23:37 -07:00
Seuf 01f2a952c7 Added SSL configuration for input haproxy (#2723) 2017-04-27 11:20:41 -07:00
Daniel Nelson dcd6936483 Fix amqp output block on write if disconnected (#2727)
fixes #2603
2017-04-27 11:10:30 -07:00
Damien Krotkine ce3dc1f82b it's -> its (#2728) 2017-04-27 11:10:00 -07:00
Damien Krotkine 05a1af700d it's -> its (#2729) 2017-04-27 11:06:40 -07:00
Nevins 0514b3cfa7 add option to randomize Kinesis partition key (#2705) 2017-04-26 10:54:24 -07:00
Jeff Zellner b95ade7ec4 Update README.md (#2719) 2017-04-25 13:17:15 -07:00
Daniel Nelson 6ff98da985 Don't close stdout on config reload. (#2707)
fixes #2528
2017-04-24 16:18:58 -07:00
Patrick Hemmer b1a2f896a2 add keep-alive support to socket_listener & socket_writer (#2697)
closes #2635
2017-04-24 13:14:42 -07:00
Alexander Blagoev ddc2f64593 Improve procstat input documentation (#2699)
closes #1895
2017-04-24 11:18:55 -07:00
Patrick Hemmer 5ebe43f86f use AddError everywhere (#2372) 2017-04-24 11:13:26 -07:00
Alexander Blagoev 06199523ac System net input documentation (#2698)
closes #2166
2017-04-24 11:03:53 -07:00
Daniel Nelson 0fd3aeb34d Update EXAMPLE_README.md 2017-04-21 14:27:36 -07:00
Daniel Nelson 8a1b070e2f Use C locale when running sadf (#2690)
fixes #1911
2017-04-21 10:55:54 -07:00
Daniel Nelson 1d4843bc8e Update commit hash of tail fork 2017-04-20 16:29:39 -07:00
Daniel Nelson 35d6d17989 Add fix for network aliases to changelog
Change was made in gopsutil
2017-04-20 15:34:30 -07:00
Alexander Blagoev 2cd12b9d8f Memcached input documentation (#2685)
Closes #2615
2017-04-20 11:25:22 -07:00
Oleg Grytsynevych 8cddffb43c win_perf_counters: Format errors reported by pdh.dll in human-readable format (#2338) 2017-04-20 11:22:44 -07:00
Martin c0daa68e00 Fixed install/remove of telegraf on non-systemd Debian/Ubuntu systems (#2360) 2017-04-20 11:19:33 -07:00
Daniel Nelson 7e07d17b64 Fix ipmi_sensor config is shared between all plugin instances (#2684) 2017-04-19 17:02:44 -07:00
Daniel Nelson c6b60744ed Add diskio for darwin to changelog 2017-04-19 13:42:24 -07:00
Patrick Hemmer 0f5d49a7fd change jolokia input to use bulk requests (#2253) 2017-04-18 13:00:41 -07:00
Nikolay Denev cc44150054 Simplify system.DiskUsage() (#2630) 2017-04-18 11:42:58 -07:00
Ross McDonald 018bb9d742 Add input for receiving papertrail webhooks (#2038) 2017-04-17 13:49:36 -07:00
François de Metz b7c34a3ff3 GitHub webhooks: check signature (#2493) 2017-04-17 11:42:03 -07:00
Daniel Nelson b92f6f5fb2 Rename heap_objects_bytes to heap_objects in internal plugin. (#2674)
* Rename heap_objects_bytes to heap_objects in internal plugin.

This field does not contain bytes

fixes #2671
2017-04-14 17:32:14 -07:00
Daniel Nelson 2177270d80 Use variadic disk.IOCounters() function 2017-04-14 13:48:02 -07:00
Daniel Nelson f005ea4a27 Improve logparser README (#2664) 2017-04-14 13:47:43 -07:00
calerogers 4df8b034bf Refactor interrupts plugin code (#2670) 2017-04-14 13:40:36 -07:00
calerogers 3e5980d017 Irqstat input plugin (#2494)
closes #2469
2017-04-13 15:53:02 -07:00
ingosus 2e306bf13d Feature #1820: add testing without outputs (#2446) 2017-04-13 12:59:28 -07:00
Gregory Kman ff4f5286ea Update ping-input-plugin Readme (#2651) 2017-04-12 17:46:48 -07:00
Chris Goffinet 1e95e53375 Fixed content-type header in output plugin OpenTSDB (#2663) 2017-04-12 17:40:10 -07:00
Daniel Nelson 1876441ed7 Update grok version (#2662) 2017-04-12 17:10:17 -07:00
Jesús Roncero 0c0b22874e Nagios plugin documentation fix (#2659) 2017-04-12 12:04:44 -07:00
Daniel Nelson 87f4f67b44 Clarify precision documentation (#2655) 2017-04-12 10:42:11 -07:00
Daniel Nelson c4634c1743 Add collectd parser (#2654) 2017-04-12 10:41:26 -07:00
Nick Irvine 198ef8de3a Add max_message_len in kafka_consumer input (#2636) 2017-04-11 12:05:39 -07:00
Daniel Nelson 8b4c3201a2 Use name filter for IOCounters in diskio (#2649)
Use IOCountersForNames for disk counters.
2017-04-11 11:41:09 -07:00
Patrick Hemmer fa0f5bd6f3 set default measurement name on snmp input (#2639) 2017-04-10 16:45:02 -07:00
Daniel Nelson 147200f675 Add support for precision in http_listener (#2644) 2017-04-10 16:39:40 -07:00
Daniel Nelson db7c97be32 Use random port in http_listener tests 2017-04-10 14:39:39 -07:00
Vladimir S a92ec65549 Add dmcache input plugin (#1667) 2017-04-07 15:39:43 -07:00
Rajaseelan Ganeswaran 223fce4770 Add sample config stanza for CPU (#2620) 2017-04-06 14:44:02 -07:00
Victor Yunevich fac5d605ac ipmi_sensor: allow @ symbol in password (#2633) 2017-04-06 14:40:34 -07:00
Daniel Nelson 6a98f9d8ea Update filtering documentation (#2631) 2017-04-06 12:06:08 -07:00
James 1191c12515 fix postgresql connection leak (#2611) 2017-04-04 17:37:44 -07:00
Daniel Nelson 2d51ecb300 Update httpjson documentation (#2619)
closes  #2536
2017-04-03 18:34:04 -07:00
Patrick Hemmer d9ad9cfdeb socket_listener: clean up unix socket file on start & stop (#2618) 2017-04-03 18:06:51 -07:00
Shakeel Sorathia 95a9d904e4 Docker: optionally add labels as tags (#2425) 2017-04-03 13:43:15 -07:00
Patrick Hemmer 0def641ce8 add support for linux sysctl fs metrics (#2609) 2017-03-31 14:01:02 -07:00
Daniel Nelson b8bb159a41 Fix possible deadlock when output cannot write. (#2610) 2017-03-31 12:45:28 -07:00
Dmitry Ulyanov 807c11629f Added pprof tool (#2512) 2017-03-29 18:28:43 -07:00
Daniel Nelson d9b34c266e Update changelog for #2587 2017-03-29 17:15:11 -07:00
tjmcs 373839a594 Adds a new json_timestamp_units configuration parameter (#2587) 2017-03-29 17:12:29 -07:00
Patrick Hemmer f13686bdf7 fix race in testutil Accumulator.Wait() (#2598) 2017-03-29 17:03:06 -07:00
djjorjinho 71cdcee8b2 fix timestamp parsing on prometheus plugin (#2596) 2017-03-29 15:04:29 -07:00
Daniel Nelson 5b9b04a4fc Use fork of hpcloud/tail (#2595) 2017-03-29 14:25:33 -07:00
Daniel Nelson d4011169df Remove wait loop in riemann tests
This testcase still has a race condition but I believe it is when the
test does not complete quickly enough.
2017-03-28 13:05:10 -07:00
mgresser c772d0a398 Removed duplicate evictions metric (#2577) 2017-03-28 10:47:00 -07:00
Daniel Nelson 6971ec6f8d Add elasticsearch output to changelog 2017-03-28 10:22:28 -07:00
Daniel Nelson 29ea9be71e Add write timeout to Riemann output (#2576) 2017-03-27 15:49:45 -07:00
Daniel Nelson a855718cd9 Skip elasticsearch output integration test in short mode 2017-03-27 15:05:06 -07:00
Daniel Nelson 3bf45f9365 Update telegraf.conf 2017-03-27 14:49:04 -07:00
Daniel Nelson d2afe424f5 Clarify influxdb output url format
closes #2568
2017-03-24 16:04:18 -07:00
Patrick Hemmer 36c1a39a09 snmp: support table indexes as tags (#2366) 2017-03-24 12:06:52 -07:00
Patrick Hemmer c65cfb6a6e remove sleep from tests (#2555) 2017-03-24 12:03:36 -07:00
Oskar 6588c4a1a7 Multi instances in win_perf_counters (#2352) 2017-03-22 12:04:58 -07:00
Daniel Nelson 6860545ea3 Really fix procstat initialization 2017-03-21 11:40:51 -07:00
Daniel Nelson 6d0fbe9cf3 Fix procstat initialization 2017-03-21 10:59:41 -07:00
Leandro Piccilli a7e8bc1c02 Add Elasticsearch 5.x output (#2332) 2017-03-20 17:47:57 -07:00
Daniel Nelson 12adad6b54 Refactor procstat input (#2540)
fixes #1636 
fixes #2315
2017-03-17 16:49:11 -07:00
Patrick Hemmer be7de16dd5 return error on unsupported serializer data format (#2542) 2017-03-17 10:14:03 -07:00
Antoine Augusti dcb8e3f7a6 Update default value for Cloudwatch rate limit (#2520) 2017-03-15 15:20:18 -07:00
Daniel Nelson f740aff4a3 Add support for new SSL configuration to mongodb (#2522)
closes #2519
2017-03-10 11:27:55 -08:00
jeremydenoun ccd2182295 Report DEAD (X) State Process (#2501)
Report count of processes in dead (X) process state from the processes input.  This process state is only valid on Linux.
2017-03-09 11:28:54 -08:00
Daniel Nelson 24ae421ad5 Fix typo in postgresql README 2017-03-09 10:13:31 -08:00
Cameron Sparr 3f35ae306f create telegraf.d directory in tarball
closes #2513
2017-03-09 11:41:08 +00:00
Timothy 94a733219e Update CONFIGURATION.md (#2516)
Add information about default configuration file locations.  Also mention that the config directory option is available.
2017-03-09 11:21:03 +00:00
Dennis Dryden 8ca01a5f5a Add configuration docs to Postgresql input plugin (#2515)
* Add configuration docs to Postgresql input plugin

Add configuration docs to PostgreSQL input plugin README (mostly from the source code) though I've not included the configuration example that seems to use all he connections on the database[1].

[1] https://github.com/influxdata/telegraf/issues/2410

* Fix typo in readme and sampleConfig string.
2017-03-09 11:19:03 +00:00
jeremydenoun 068611263a Remove warning if parse empty content (#2500)
closes #2448
2017-03-08 14:08:55 -08:00
Robpol86 68b10d73fc Exporting Ipmi.Path to be set by config. (#2498)
* Exporting Ipmi.Path to be set by config.

Currently "path" is not exported, giving this error when users try to
override the variable via telegraf.conf as per the sample config:

`field corresponding to `path' is not defined in `*ipmi_sensor.Ipmi'`

Exporting the variable solves the problem.

* Updating changelog.
2017-03-08 16:38:36 +00:00
vvvkamper 4a9650ecf5 Fix part 2 of #1291
added PDH_FMT_NOCAP100 format option

closes #2483
2017-03-08 13:39:03 +00:00
Cameron Sparr 5c3cd822a9 update gopsutil for file close fixes
hopefully this will fix #2472
2017-03-08 12:54:17 +00:00
Daniel Nelson 99176458ad Update issue template 2017-03-06 11:20:53 -08:00
Cameron Sparr 8f83d9318a Revert "Procstat: don't cache PIDs" (#2479) 2017-03-06 15:59:36 +00:00
François de Metz e76dcf09ec Respond 200 when receiving a ping event. (#2492) 2017-03-06 12:34:41 +00:00
Jack Zampolin 2fe161356b AMQP Consumer plugin (#1678) 2017-03-03 10:24:50 -08:00
Charles-Henri 53fb5608a8 Iptables input: document better the ignored rules behavior (#2482)
During issue #2215 it was highlighted that the current behavior where
rules without a comment are ignored is confusing for several users.

This commit improves the documentation and adds a NOTE to the sample
config to clarify the behavior for new users.
2017-03-02 09:58:26 +00:00
Chris Koehnke e37b28896e Disk counter array newline (#2481)
Tweak formatting of `LogicalDisk` counter array to have one entry per
line.
2017-03-02 08:43:33 +00:00
Cameron Sparr 629ba4c1c7 Fix type conflict on windows ping plugin (#2462)
closes #1433
2017-03-01 11:22:42 +00:00
Cameron Sparr 49ed1a278b Handle nil os.FileInfo in filepath.Walk
closes #2466
2017-02-28 17:51:03 +00:00
Cameron Sparr a38ebcbe54 log error message when invalid regex is used
closes #2178
2017-02-28 12:48:14 +00:00
Cameron Sparr ed43e1010b Remove sleep from riemann test 2017-02-28 12:46:27 +00:00
Cameron Sparr 633ccf33c5 add cgroup plugin to README 2017-02-24 09:43:22 +00:00
Cameron Sparr 0379c7309c switch out deprecated docker client library
closes #2071
2017-02-22 10:55:00 +00:00
Rickard von Essen 08395de355 Updated readme, now requires Go 1.8 (#2455) 2017-02-21 22:13:22 +01:00
Carlos 7f24cb1edd Added default config to file output pugin's README (#2426) 2017-02-20 11:50:39 +01:00
Cameron Sparr 3bbf8153ed Check for errors in user stats & process list
closes #2414
2017-02-17 15:38:33 +00:00
Cameron Sparr 31249eb20d Only set the buffer size once
fixes #2380
2017-02-17 14:11:15 +00:00
Cameron Sparr a29c02f09f socket_writer output plugin README 2017-02-16 23:13:14 +00:00
Leandro Piccilli df402e885d Check if tag value is empty before allocation
closes #2390
closes #2404
2017-02-16 23:07:27 +00:00
Cameron Sparr dfddcc5146 Fix prometheus_client reload behavior
fixes #2282
2017-02-16 21:57:13 +00:00
Priyank Trivedi e0a36c38df Fix typo - Default from Defalt (#2417) 2017-02-16 19:03:17 +00:00
Yaron de Leeuw 212fdc587a README: update golang requirement to 1.7 (#2412)
The docker engine-api package we use needs golang 1.7+, see:
https://github.com/docker/engine-api/pull/382#issuecomment-244512952

So telegraf won't compile without 1.7
2017-02-15 17:17:26 +00:00
François de Metz c1f825c705 Fix setting the username and the password to the influxdb output. (#2401) 2017-02-13 15:30:30 +00:00
Cameron Sparr 577b25bcd1 Skip service input plugins in test mode 2017-02-13 10:40:38 +00:00
Cameron Sparr d373fbbaaf prepend 'inputs.' to --test output check 2017-02-13 10:33:51 +00:00
Cameron Sparr 4b289fc60d don't use influxdata/config, just use influxdata/toml 2017-02-10 17:27:18 +00:00
Cameron Sparr b550328aa8 update naoina/toml to do config validation 2017-02-10 17:05:13 +00:00
Cameron Sparr 4c7fbc490c deprecate udp_listener & tcp_listener 2017-02-06 10:41:44 +00:00
Cameron Sparr f6e1409c9a Remove metric.Point from metric interface 2017-02-03 16:53:07 +00:00
Cosmo Petrich aeffacbe4a Increment gather_errors for all input errors
closes #2339
2017-02-03 11:22:31 +00:00
Cameron Sparr e8fdd96b2d changelog update 2017-02-03 10:04:50 +00:00
Nick Irvine 4816615deb Remove pidfile if pidfile was created (#2358)
Also, ensure pidfile perms are 644
2017-02-03 10:02:19 +00:00
Patrick Hemmer 510b750da4 add socket listener & writer (#2094)
closes #1516 
closes #1711 
closes #1721 
closes #1526
2017-02-02 16:24:03 +00:00
Yaron de Leeuw 2a32cba35b Procstat: don't cache PIDs (#2206)
* Procstat: don't cache PIDs

Changed the procstat input plugin to not cache PIDs. Solves #1636.
The logic of creating a process by pid was moved from `procstat.go` to
`spec_processor.go`.

* Procstat: go fmt

* procstat: modify changelog for #2206
2017-02-02 14:12:22 +00:00
Patrick Hemmer 036d1beb87 add missing fields to haproxy input (#2323) 2017-02-02 13:46:53 +00:00
Matteo Cerutti c8de4833e3 allow querying sensors via the open interface
closes #2244
closes #1547
2017-02-02 13:31:04 +00:00
Cameron Sparr cf3fcf328d fix telegraf swallowing panics in --test mode
this defer function was causing telegraf to call os.Exit(0) instead of
panicking when it was supposed to.

closes #2341
2017-02-02 12:14:35 +00:00
Cameron Sparr 7bd6dc3d7f Godeps update
closes #2356
2017-02-02 09:52:06 +00:00
Cameron Sparr c158346642 Add more nested globpath tests 2017-02-01 23:44:35 +00:00
Nathan Haugo 0c5e329212 Update readme to link to k8s plugin (#2355) 2017-02-01 21:23:45 +00:00
Jérôme Vizcaino 6baa06121e Ceph: represent pgmap states using tags (#2229)
* ceph: maps are already refs, no need to use a pointer

* ceph: pgmap_states are represented in a single metric "count", differenciated by tag

* Update CHANGELOG
2017-02-01 14:47:23 +00:00
Cameron Sparr 2bf599b8c8 changelog update 2017-02-01 14:41:58 +00:00
James Gregory f0f913ab41 Kubernetes input: Handle null startTime for stopped pods (#2335) 2017-02-01 14:41:04 +00:00
Cameron Sparr c65d74d54e iptables changelog update 2017-02-01 14:39:16 +00:00
ldep30 4740b818fe Add lock option to the IPtables input plugin (#2201)
* Update README.md

* Add lock support to the IPtables input plugin

* Update iptables.go

Doc cleaning
2017-02-01 14:37:18 +00:00
Cameron Sparr 786557a2cc changelog fix 2017-02-01 14:22:31 +00:00
Len Smith c2d86e6649 http_response : Add in support for looking for substring in response (#2204)
* Add in support for looking for substring in response

* Add note to CHANGELOG.md

* Switch from substring match to regex match

* Requested code changes

* Make requested changes and refactor to avoid nested if-else.

* Convert tabs to space and compile regex once
2017-02-01 14:21:08 +00:00
Pierre Fersing 07f2e6dc94 Keep -config-directory when running as Windows service (#2330)
* Keep -config-directory when running as Windows service

* Update changelog
2017-02-01 14:12:35 +00:00
njwhite 360b3a8b9f Make Logparser Plugin Check For New Files (#2141)
* Make Logparser Plugin Check For New Files

Check in the Gather metric to see if any new files matching the glob
have appeared. If so, start tailing them from the beginning.

* changelog update for #2141
2017-02-01 14:11:39 +00:00
Cameron Sparr 7549299e5e Go 1.7.5 update cherry-picked to 1.2.1 release 2017-02-01 10:11:16 +00:00
Cameron Sparr b717b24443 Changelog update 2017-02-01 10:07:31 +00:00
Cameron Sparr cd5bef3e14 metric: Fix negative number handling
closes #2324
2017-02-01 10:07:31 +00:00
Martin f0498491b2 Go version 1.7.4 -> 1.7.5 (#2348) 2017-02-01 10:07:02 +00:00
Cameron Sparr c1bf4d15f2 Changelog update 2017-02-01 08:59:54 +00:00
Cameron Sparr 2262a6a421 running output: Drop nil metrics
fixes #2317
2017-02-01 08:55:22 +00:00
John Engelman ce2ad1a74c Add numerical representation of Consul health check state. (#2277) 2017-01-28 16:47:25 -08:00
Patrick Hemmer 28314f93b6 add support for diskio name templates & udev tags
closes #1453
closes #1386
closes #1428
2017-01-27 16:15:42 -08:00
Cameron Sparr c88f2ba3a6 Fix riemann output unit tests 2017-01-27 15:08:21 -08:00
Cameron Sparr a42e8380d5 Riemann rewrite changelog update 2017-01-27 14:59:35 -08:00
Fabio Berchtold fc76f47e43 Rewriting Riemann output plugin (#1900)
* rename to riemann_legacy

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* initial draft for Riemann output plugin rewrite

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* add unit tests

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* add option to send string metrics as states

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* add integration tests

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* add plugin README.md

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* bump riemann library

* clarify settings description

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* update Readme.md with updated description

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* add Riemann event examples

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* use full URL for Riemann server address

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

closes #1878
2017-01-27 14:54:59 -08:00
Cameron Sparr a36fd375de Revert using fasthttp library to net/http 2017-01-25 16:42:23 -08:00
Cameron Sparr f5d892d7d3 Improve the InfluxDB through-put performance
This changes the current use of the InfluxDB client to instead use a
baked-in client that uses the fasthttp library.

This allows for significantly smaller allocations, the re-use of http
body buffers, and the re-use of the actual bytes of the line-protocol
metric representations.
2017-01-25 11:54:16 -08:00
Cameron Sparr a8b1fe6f00 ntpq: correct number of seconds in an hour
closes #2256
2017-01-24 15:27:44 -08:00
Jonas Hahnfeld b0c721491d snmp: Allow lines with empty or missing tags (#2172)
The changes in #1848 resulted in lines being dropped if they had an empty
tag. Let's allow all lines that have empty or missing tags!
2017-01-24 14:57:43 -08:00
Kali Hernandez d4d3f61673 Debian package: check for group before useradd (#2107)
Fixes #2106
2017-01-24 14:54:19 -08:00
Will Pearson e1faf06974 Fix problem with graphite talking to closed connections (#2171)
We were having problems with telegraf talking to
carbon-relay-ng using the graphite output. When
the carbon-relay-ng server restarted the connection
the telegraf side would go into CLOSE_WAIT but telegraf
would continue to send statistics through the connection.

Reading around it seems you need to a read from the connection
and see a EOF error. We've implemented this and added a test
that replicates roughly the error we were having.

Pair: @whpearson @joshmyers
2017-01-24 12:50:29 -08:00
Pierre Fersing 29933d0835 Improve win_perf_counters on non English systems (#2261) 2017-01-24 12:46:06 -08:00
Pierre Fersing de38403899 Added more Windows metrics (#2290)
Signed-off-by: Pierre Fersing <pierre.fersing@bleemeo.com>
2017-01-24 12:38:10 -08:00
James 6994b25656 fix postgresql 'name', and 'oid' data types by switching to a driver (#1750)
that handles them properly
2017-01-24 12:36:36 -08:00
Cameron Sparr 845392e090 32-bit binary for windows and freebsd
closes #1346
closes #2218
2017-01-23 20:28:13 -08:00
Cameron Sparr 819813ad72 influxdb output: treat field type conflicts as a successful write
If we write a batch of points and get a "field type conflict" error
message in return, we should drop the entire batch of points because
this indicates that one or more points have a type that doesnt match the
database.

These errors will never go away on their own, and InfluxDB will
successfully write the points that dont have a conflict.

closes #2245
2017-01-23 16:41:29 -08:00
Cameron Sparr e06f1e0323 Run scheduled flushes in background
doing this unblocks incoming metrics while waiting for a flush to take
place.

we have to create a semaphore so that we can
'skip' flushes that try to run while a flush is already running.

closes #2262
2017-01-23 14:41:40 -08:00
Cameron Sparr 4d72cd7c9f Add newline to influx line-protocol if not present
closes #2297
2017-01-23 13:52:20 -08:00
Cameron Sparr a852e8106e opentsdb: add tcp:// prefix if not present
closes #2299
2017-01-23 13:45:16 -08:00
Claudius Zingerli c9e0ae4d81 Add minimal documentation to the diskio plugin (#2296)
* Add documentation to diskio plugin

* Update spelling, fix iops_in_progress unit
2017-01-21 15:08:17 -08:00
Cameron Sparr c3d1da6b6b Direct people to downloads page for installation 2017-01-13 17:02:10 +00:00
Cameron Sparr 6ca52195a9 mysql build fixup and changelog update 2017-01-13 14:44:28 +00:00
Pierre Fersing a33c86f9c4 Added more InnoDB metric to MySQL plugin (#2179) 2017-01-13 14:28:56 +00:00
Cameron Sparr 3551fbff4d Changelog update and go fmt 2017-01-13 14:27:20 +00:00
acezellponce 338f01a8da Added userstats to mysql input plugin (#2137)
* Added GatherUserStatistics, row Uptime in gatherGlobalStatuses, and version fields & tags

* Updated README file

* pulling in latest from master

* ran go fmt to fix formatting

* fix unreachable code

* few fixes

* cleaning up and applying suggestions from sparrc
2017-01-13 14:25:25 +00:00
Viet Hung Nguyen c4a708751a Ignore devfs on OSX (#2232) 2017-01-13 14:19:57 +00:00
Cameron Sparr ab8b7a55e1 Changelog update 2017-01-13 13:50:07 +00:00
Kebus1 6a9187ff48 Fixed Bug 2077 SQL Server (#2212) 2017-01-13 13:47:47 +00:00
Cameron Sparr 9b0e1e9c49 OpenTSDB filter types for HTTP AND telnet 2017-01-13 11:44:28 +00:00
Cameron Sparr eb67b17c28 Accept an HTTP request body without newline at end (#2266)
I don't like this behavior, but it's what InfluxDB accepts, so the
telegraf listener should be consistent with that.

I accidentally reverted this behavior when I refactored the telegraf
metric representation earlier in this release cycle.
2017-01-13 11:43:50 +00:00
Cameron Sparr 51d343785e update etc/telegraf.conf 2017-01-12 11:14:12 +00:00
Patrick Hemmer 0c15099d94 allow changing jolokia delimiter (#2255) 2017-01-12 11:08:22 +00:00
Mohammad Ali Alfarra b277a28e15 Document basic auth for haproxy (#2258)
* Document basic auth for haproxy

* Typo in haproxy readme
2017-01-12 08:47:01 +00:00
Cameron Sparr 61defd36d3 mongodb: Remove superfluous ReplSet log message
closes #2248
2017-01-11 17:50:01 +00:00
Emil Haugbergsmyr 9d28ebce91 Fixes change in Kafka consumer input plugin (#2222)
* Fixes change to the error api in the kafka project.

* Updated test to reflect the change.

* Update kafka to match master branch.
2017-01-11 16:24:09 +00:00
Kurt Mackey 6735d4e62c Fix for broken librato output (#2225)
* Fix for broken librato output

These errors are delightful, but I'd rather avoid them:

```
Error parsing /etc/telegraf/telegraf.conf, line 2: field corresponding to `api_user' is not defined in `*librato.Librato'
```

* Fixed bad format from last commit
2017-01-09 14:48:32 +00:00
Patrick Hemmer e225aa85e4 ensure proper context on snmp error messages (#2220) 2017-01-09 13:03:33 +00:00
Jérôme Vizcaino 0d357a7d5b ceph: sample config should reflect actual defaults (#2228) 2017-01-09 12:51:15 +00:00
Cameron Sparr fe9d3257c7 readme fixup & test output fixup 2017-01-09 12:28:13 +00:00
Cameron Sparr c9089b4700 mongodb: dont print unecessary & inaccurate auth failure
closes #2209
2017-01-06 13:11:24 +01:00
YKlausz fb225d69f8 Remove print call in cassandra plugin (#2192) 2016-12-21 17:23:54 +00:00
Dominik Labuda e16072876d [plugins] jolokia input plugin: configurable http timeouts (#2098) 2016-12-21 12:41:58 +00:00
Cameron Sparr b77dc90741 docker: check type when totalling blkio & net metrics
closes #2027
2016-12-21 12:18:38 +00:00
Cameron Sparr 1cc763e514 Do not try Uint parsing in redis plugin
this is just a waste of cpu cycles, since telegraf converts all uints to
int64 anyways.
2016-12-20 23:42:14 +00:00
Mark Wolfe 2f521a87d4 Fix for loop over value array range issue. (#2187) 2016-12-20 22:56:02 +00:00
Cameron Sparr 8311ff9156 Mask username/password from error messages
closes #1980
2016-12-20 19:35:45 +00:00
Cameron Sparr 84fc621a1a changelog update 2016-12-20 18:50:32 +00:00
Mark Wolfe 829c190b8c Moved to using the inbuilt serializer. (#1942)
* Moved to using the inbuilt serializer.

* Remove Atomic variable as it is not required.

* Adjusted metric type in line with latest changes.
2016-12-20 18:49:28 +00:00
Cameron Sparr 73acd114d1 Do not create a global statsd "previous instance"
this basically reverts #887

at some point we might want to do some special handling of reloading
plugins and keeping their state intact, but that will need to be done at
a higher level, and in a way that is thread-safe for multiple input
plugins of the same type.

Unfortunately this is a rather large feature that will not have a quick
fix available for it.

fixes #1975
fixes #2102
2016-12-20 17:55:04 +00:00
Cameron Sparr 491ba10b00 changelog update 2016-12-20 16:30:49 +00:00
Matt O'Hara b0609beb2b Add clusterstats to elasticsearch plugin (#1979)
* add clusterstats to elasticsearch input plugin

* add clusterstats to elasticsearch input plugin

* add clusterstats to elasticsearch input plugin

* add clusterstats to elasticsearch input plugin

* add clusterstats to elasticsearch input plugin

* responses to requested changes

* remove unnecessary recommendation
2016-12-20 16:30:03 +00:00
Cameron Sparr 48fc28331e Fix & unit test logparser CLF pattern with IPv6
deals partially with #1973

see also https://github.com/vjeantet/grok/issues/17
2016-12-20 15:57:32 +00:00
Cameron Sparr cf9fd9bb5f Support negative statsd counters
closes #1898
2016-12-20 13:21:51 +00:00
Cameron Sparr a2feba21de changelog update 2016-12-20 13:04:51 +00:00
Łukasz Harasimowicz 68b351ca05 Fixing consul with multiple health checks per service (#1994)
* plugins/input/consul: moved check_id from regular fields to tags.

When service has more than one check sending data for both would overwrite each other
resulting only in one check being written (the last one). Adding check_id as a tag
ensures we will get info for all unique checks per service.

* plugins/inputs/consul: updated tests
2016-12-20 13:03:31 +00:00
Jeff Ashton ca31aaad85 Importing pdh from github.com/lxn/win
closes #1763
closes #2017
2016-12-20 12:06:40 +00:00
Ken Dilley 08057b2bd3 Update MySQL Readme to clarify connection string examples. (#2175)
* Update MySQL Readme to clarify connection string examples.

* Update mysql sample config to clarify connection string examples
2016-12-20 10:17:00 +00:00
Cameron Sparr 468e7e2c74 internal.Duration build fixup 2016-12-17 13:10:33 +00:00
Pierre Tessier 6f9de7ab3d Added response_timeout property
closes #2006
2016-12-17 13:06:04 +00:00
Steven Pall 08f03ceba2 Add trailing slash to jolokia context (#2105) 2016-12-17 12:51:46 +00:00
Cameron Sparr d2d00a6286 changelog update 2016-12-16 17:30:13 +00:00
Vincent b7764a58a9 fix mongodb replica set lag awalys 0 #1449 (#2125) 2016-12-16 17:29:04 +00:00
Cameron Sparr e8e5e5b818 rabbitmq, decrease timeout verbosity in config 2016-12-16 14:12:50 +00:00
Tevin Jeffrey c02828322c Add field for last GC pause time (#2121) 2016-12-16 14:03:53 +00:00
Cameron Sparr fbdb8b83d6 changelog update 2016-12-16 14:02:11 +00:00
Patrick Hemmer f57b8aac0b fix tail input seeking when used with pipe (#2090) 2016-12-16 14:01:49 +00:00
alekseyp d27c78a979 Standard deviation (jitter) for Input plugin Ping (#2078) 2016-12-16 13:58:27 +00:00
Cameron Sparr cf7ea36541 changelog update 2016-12-16 13:54:51 +00:00
Alex Sherwin 86d31a5b79 fixes #1987 custom docker repos with non-standard port (#2018)
* fixed parsing of docker image name/version

now accounts for custom docker repo's which contain a colon for a non-default port

* 1978: modifying docker test case to have a custom repo with non-standard port

* using a temp var to store index, ran gofmt

* fixes #1987, renaming iterator to 'i'
2016-12-16 13:53:16 +00:00
Pierre Tessier 6255e9b474 Add Questions status variable for issue: #1988 (#2004) 2016-12-16 13:47:47 +00:00
Doug Reese 2214ee90b6 MongoDB input plugin: Improve state data (#2001)
* MongoDB input plugin: Improve state data

Adds ARB as a "member_status" (replica set arbiter).
Uses MongoDB replica set state string for "state" value.

* MongoDB input plugin: Improve state data - changelog update
2016-12-16 13:46:32 +00:00
Frank Stutz 99daa52254 fix for puppetagent config - test 1
put Makefile back to normal

removed comment from puppetagent.go

changed config_version to config_version_string and fixed yaml for build

changed workind from branch to environment for config_string

fixed casing and Changelog

fixed test case

closes #1917
2016-12-16 13:36:06 +00:00
Cameron Sparr 1ebc7245a3 Revert "Rabbitmq plugin: connection-related metrics." (#2169) 2016-12-15 19:31:40 +00:00
Florian Klink d323a8f2e8 ping: fix typo in README (#2163) 2016-12-14 19:47:48 +00:00
Jose Luis Navarro 283bd6d965 Collect JSON values recursively
closes #1993
closes #1693
2016-12-13 21:06:05 +00:00
Pieter Slabbert 182f282fd2 Enable setting a clientID for MQTT Output
closes #2079
closes #1910
2016-12-13 20:03:09 +00:00
Cameron Sparr cebbf39276 Set default values for delete_ configuration options
closes #1893
2016-12-13 20:00:52 +00:00
Jonas Falck 74d8aef0c0 Change hddtemp to always put temperature in temperature field (#1905)
Added unit tests for the changes

Fixes #1904
2016-12-13 19:40:55 +00:00
Cameron Sparr c4c13c4e90 Graylog output should set short_message field
closes #2045
2016-12-13 16:10:59 +00:00
Cameron Sparr b78859b331 Fix documentation for net_response plugin
closes #2103
2016-12-13 16:02:03 +00:00
Cameron Sparr a5fd775369 Support strings in statsd set measurements
closes #2068
2016-12-13 15:42:22 +00:00
Cameron Sparr 8176f6f273 Fix possible panic when file info cannot be gotten
closes #2061
2016-12-13 14:54:07 +00:00
Cameron Sparr 921a3b1b65 Update changelog 2016-12-13 14:28:28 +00:00
Da1den 3e2296541a Fixed bug that you cannot gather data on non english systems (#1944) 2016-12-13 14:24:41 +00:00
krise3k 4a0f3a7100 Add missing slim (#1937) 2016-12-13 14:23:18 +00:00
Kishore Nallan af850b8854 Rabbitmq plugin: connection-related metrics. (#1908)
* Rabbitmq plugin: connection-related metrics.

* Run go fmt.
2016-12-13 14:17:20 +00:00
Anthony Arnaud a61148904d Output openTSDB HTTPS with basic auth (#1913) 2016-12-13 14:15:51 +00:00
Leon Barrett 9add7b9e9a Fix bug: too many cloudwatch metrics (#1885)
* Fix bug: too many cloudwatch metrics

Cloudwatch metrics were being added incorrectly. The most obvious
symptom of this was that too many metrics were being added. A simple
check against the name of the metric proved to be a sufficient fix. In
order to test the fix, a metric selection function was factored out.

* Go fmt cloudwatch

* Cloudwatch isSelected checks metric name

* Move cloudwatch line in changelog to 1.2 features
2016-12-13 14:13:53 +00:00
Rikaard Hosein bf8e1b5f13 Can turn pid into tag instead of field
closes #1843
fixes  #1668
2016-12-13 13:21:39 +00:00
Cameron Sparr cab1118c31 Check if metric is nil before calling SetAggregate
fixes #2146
2016-12-13 12:27:10 +00:00
1128 changed files with 44417 additions and 164643 deletions

View File

@ -2,214 +2,91 @@
defaults:
defaults: &defaults
working_directory: '/go/src/github.com/influxdata/telegraf'
environment:
GOFLAGS: -p=8
go-1_13: &go-1_13
go-1_9: &go-1_9
docker:
- image: 'quay.io/influxdb/telegraf-ci:1.13.11'
go-1_14: &go-1_14
- image: 'circleci/golang:1.9.7'
go-1_10: &go-1_10
docker:
- image: 'quay.io/influxdb/telegraf-ci:1.14.3'
mac: &mac
macos:
xcode: 11.3.1
working_directory: '~/go/src/github.com/influxdata/telegraf'
environment:
HOMEBREW_NO_AUTO_UPDATE: 1
GOFLAGS: -p=8
- image: 'circleci/golang:1.10.3'
version: 2
jobs:
deps:
<<: [ *defaults, *go-1_14 ]
<<: [ *defaults, *go-1_10 ]
steps:
- checkout
- restore_cache:
key: go-mod-v1-{{ checksum "go.sum" }}
key: vendor-{{ .Branch }}-{{ checksum "Gopkg.lock" }}
- run: 'make deps'
- run: 'make tidy'
- save_cache:
name: 'go module cache'
key: go-mod-v1-{{ checksum "go.sum" }}
name: 'vendored deps'
key: vendor-{{ .Branch }}-{{ checksum "Gopkg.lock" }}
paths:
- '/go/pkg/mod'
- './vendor'
- persist_to_workspace:
root: '/go'
root: '/go/src'
paths:
- '*'
macdeps:
<<: [ *mac ]
steps:
- checkout
- restore_cache:
key: mac-go-mod-v1-{{ checksum "go.sum" }}
- run: 'brew install go@1.13'
- run: 'make deps'
- run: 'make tidy'
- save_cache:
name: 'go module cache'
key: mac-go-mod-v1-{{ checksum "go.sum" }}
paths:
- '~/go/pkg/mod'
- '/usr/local/Cellar/go'
- '/usr/local/bin/go'
- '/usr/local/bin/gofmt'
- persist_to_workspace:
root: '/'
paths:
- 'usr/local/bin/go'
- 'usr/local/Cellar/go'
- 'usr/local/bin/gofmt'
- 'Users/distiller/go'
test-go-1.13:
<<: [ *defaults, *go-1_13 ]
test-go-1.9:
<<: [ *defaults, *go-1_9 ]
steps:
- attach_workspace:
at: '/go'
- run: 'make'
- run: 'make test'
test-go-1.13-386:
<<: [ *defaults, *go-1_13 ]
at: '/go/src'
- run: 'make test-ci'
test-go-1.10:
<<: [ *defaults, *go-1_10 ]
steps:
- attach_workspace:
at: '/go'
- run: 'GOARCH=386 make'
- run: 'GOARCH=386 make test'
test-go-1.14:
<<: [ *defaults, *go-1_14 ]
steps:
- attach_workspace:
at: '/go'
- run: 'make'
- run: 'make check'
- run: 'make check-deps'
- run: 'make test'
test-go-1.14-386:
<<: [ *defaults, *go-1_14 ]
steps:
- attach_workspace:
at: '/go'
- run: 'GOARCH=386 make'
- run: 'GOARCH=386 make check'
- run: 'GOARCH=386 make test'
test-go-1.13-darwin:
<<: [ *mac ]
steps:
- attach_workspace:
at: '/'
- run: 'make'
- run: 'make check'
- run: 'make test'
package:
<<: [ *defaults, *go-1_14 ]
steps:
- attach_workspace:
at: '/go'
- run: 'make package'
- store_artifacts:
path: './build'
destination: 'build'
at: '/go/src'
- run: 'make test-ci'
- run: 'GOARCH=386 make test-ci'
release:
<<: [ *defaults, *go-1_14 ]
<<: [ *defaults, *go-1_10 ]
steps:
- attach_workspace:
at: '/go'
- run: 'make package-release'
at: '/go/src'
- run: './scripts/release.sh'
- store_artifacts:
path: './build'
destination: 'build'
path: './artifacts'
destination: '.'
nightly:
<<: [ *defaults, *go-1_14 ]
<<: [ *defaults, *go-1_10 ]
steps:
- attach_workspace:
at: '/go'
- run: 'make package-nightly'
at: '/go/src'
- run: './scripts/release.sh'
- store_artifacts:
path: './build'
destination: 'build'
path: './artifacts'
destination: '.'
workflows:
version: 2
check:
build_and_release:
jobs:
- 'macdeps':
filters:
tags:
only: /.*/
- 'deps':
filters:
tags:
only: /.*/
- 'test-go-1.13':
- 'deps'
- 'test-go-1.9':
requires:
- 'deps'
filters:
tags:
only: /.*/
- 'test-go-1.13-386':
- 'test-go-1.10':
requires:
- 'deps'
filters:
tags:
only: /.*/
- 'test-go-1.14':
requires:
- 'deps'
filters:
tags:
only: /.*/
- 'test-go-1.14-386':
requires:
- 'deps'
filters:
tags:
only: /.*/
- 'test-go-1.13-darwin':
requires:
- 'macdeps'
filters:
tags: # only runs on tags if you specify this filter
only: /.*/
- 'package':
requires:
- 'test-go-1.13'
- 'test-go-1.13-386'
- 'test-go-1.14'
- 'test-go-1.14-386'
- 'release':
requires:
- 'test-go-1.13'
- 'test-go-1.13-386'
- 'test-go-1.14'
- 'test-go-1.14-386'
filters:
tags:
only: /.*/
branches:
ignore: /.*/
- 'test-go-1.9'
- 'test-go-1.10'
nightly:
jobs:
- 'deps'
- 'test-go-1.13':
- 'test-go-1.9':
requires:
- 'deps'
- 'test-go-1.13-386':
requires:
- 'deps'
- 'test-go-1.14':
requires:
- 'deps'
- 'test-go-1.14-386':
- 'test-go-1.10':
requires:
- 'deps'
- 'nightly':
requires:
- 'test-go-1.13'
- 'test-go-1.13-386'
- 'test-go-1.14'
- 'test-go-1.14-386'
- 'test-go-1.9'
- 'test-go-1.10'
triggers:
- schedule:
cron: "0 7 * * *"

1
.gitattributes vendored
View File

@ -1,5 +1,4 @@
CHANGELOG.md merge=union
README.md merge=union
go.sum merge=union
plugins/inputs/all/all.go merge=union
plugins/outputs/all/all.go merge=union

View File

@ -1,45 +1,24 @@
---
name: Bug report
about: Create a report to help us improve
---
<!--
Please redirect any questions about Telegraf usage to the InfluxData Community
site: https://community.influxdata.com
Check the documentation for the related plugin including the troubleshooting
section if available.
-->
### Relevant telegraf.conf:
<!-- Place config in the toml code section. -->
```toml
```
### System info:
<!-- Include Telegraf version, operating system, and other relevant details -->
### Docker
<!-- If your bug involves third party dependencies or services, it can be very helpful to provide a Dockerfile or docker-compose.yml that reproduces the environment you're testing against -->
### Steps to reproduce:
<!-- Describe the steps to reproduce the bug. -->
1. ...
2. ...
### Expected behavior:
<!-- Describe what you expected to happen when you performed the above steps. -->
### Actual behavior:
<!-- Describe what actually happened when you performed the above steps. -->
### Additional info:
<!-- Include gist of relevant config, logs, etc. -->
---
name: Bug report
about: Create a report to help us improve
---
### Relevant telegraf.conf:
### System info:
[Include Telegraf version, operating system name, and other relevant details]
### Steps to reproduce:
1. ...
2. ...
### Expected behavior:
### Actual behavior:
### Additional info:
[Include gist of relevant config, logs, etc.]

View File

@ -1,17 +1,17 @@
---
name: Feature request
about: Suggest an idea for this project
---
## Feature Request
Opening a feature request kicks off a discussion.
### Proposal:
### Current behavior:
### Desired behavior:
### Use case: <!-- [Why is this important (helps with prioritizing requests)] -->
---
name: Feature request
about: Suggest an idea for this project
---
## Feature Request
Opening a feature request kicks off a discussion.
### Proposal:
### Current behavior:
### Desired behavior:
### Use case: [Why is this important (helps with prioritizing requests)]

File diff suppressed because it is too large Load Diff

View File

@ -1,62 +1,484 @@
### Contributing
## Steps for Contributing:
1. [Sign the CLA][cla].
1. Open a [new issue][] to discuss the changes you would like to make. This is
not strictly required but it may help reduce the amount of rework you need
to do later.
1. Make changes or write plugin using the guidelines in the following
documents:
- [Input Plugins][inputs]
- [Processor Plugins][processors]
- [Aggregator Plugins][aggregators]
- [Output Plugins][outputs]
1. Ensure you have added proper unit tests and documentation.
1. Open a new [pull request][].
1. [Sign the CLA](http://influxdb.com/community/cla.html)
1. Make changes or write plugin (see below for details)
1. Add your plugin to one of: `plugins/{inputs,outputs,aggregators,processors}/all/all.go`
1. If your plugin requires a new Go package,
[add it](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#adding-a-dependency)
1. Write a README for your plugin, if it's an input plugin, it should be structured
like the [input example here](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/EXAMPLE_README.md).
Output plugins READMEs are less structured,
but any information you can provide on how the data will look is appreciated.
See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
for a good example.
1. **Optional:** Help users of your plugin by including example queries for populating dashboards. Include these sample queries in the `README.md` for the plugin.
1. **Optional:** Write a [tickscript](https://docs.influxdata.com/kapacitor/v1.0/tick/syntax/) for your plugin and add it to [Kapacitor](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf).
#### Contributing an External Plugin *(experimental)*
Input plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/execd) without having to change the plugin code.
Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/execd/shim) to easily compile it as a separate app and run it from the inputs.execd plugin.
#### Security Vulnerability Reporting
InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our
open source projects, please responsibly disclose it by contacting security@influxdata.com. More details about
security vulnerability reporting,
including our GPG key, [can be found here](https://www.influxdata.com/how-to-report-security-vulnerabilities/).
### GoDoc
## GoDoc
Public interfaces for inputs, outputs, processors, aggregators, metrics,
and the accumulator can be found in the GoDoc:
and the accumulator can be found on the GoDoc
[![GoDoc](https://godoc.org/github.com/influxdata/telegraf?status.svg)](https://godoc.org/github.com/influxdata/telegraf)
### Common development tasks
## Sign the CLA
**Adding a dependency:**
Before we can merge a pull request, you will need to sign the CLA,
which can be found [on our website](http://influxdb.com/community/cla.html)
Telegraf uses Go modules. Assuming you can already build the project, run this in the telegraf directory:
## Adding a dependency
1. `go get github.com/[dependency]/[new-package]`
Assuming you can already build the project, run these in the telegraf directory:
**Unit Tests:**
1. `go get -u github.com/golang/dep/cmd/dep`
2. `dep ensure`
3. `dep ensure -add github.com/[dependency]/[new-package]`
## Input Plugins
This section is for developers who want to create new collection inputs.
Telegraf is entirely plugin driven. This interface allows for operators to
pick and chose what is gathered and makes it easy for developers
to create new ways of generating metrics.
Plugin authorship is kept as simple as possible to promote people to develop
and submit new inputs.
### Input Plugin Guidelines
* A plugin must conform to the [`telegraf.Input`](https://godoc.org/github.com/influxdata/telegraf#Input) interface.
* Input Plugins should call `inputs.Add` in their `init` function to register themselves.
See below for a quick example.
* Input Plugins must be added to the
`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
plugin can be configured. This is include in `telegraf config`.
* The `Description` function should say in one line what this plugin does.
Let's say you've written a plugin that emits metrics about processes on the
current host.
### Input Plugin Example
```go
package simple
// simple.go
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
type Simple struct {
Ok bool
}
func (s *Simple) Description() string {
return "a demo plugin"
}
func (s *Simple) SampleConfig() string {
return `
## Indicate if everything is fine
ok = true
`
}
func (s *Simple) Gather(acc telegraf.Accumulator) error {
if s.Ok {
acc.AddFields("state", map[string]interface{}{"value": "pretty good"}, nil)
} else {
acc.AddFields("state", map[string]interface{}{"value": "not great"}, nil)
}
return nil
}
func init() {
inputs.Add("simple", func() telegraf.Input { return &Simple{} })
}
```
## Adding Typed Metrics
In addition the the `AddFields` function, the accumulator also supports an
`AddGauge` and `AddCounter` function. These functions are for adding _typed_
metrics. Metric types are ignored for the InfluxDB output, but can be used
for other outputs, such as [prometheus](https://prometheus.io/docs/concepts/metric_types/).
## Input Plugins Accepting Arbitrary Data Formats
Some input plugins (such as
[exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec))
accept arbitrary input data formats. An overview of these data formats can
be found
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
In order to enable this, you must specify a `SetParser(parser parsers.Parser)`
function on the plugin object (see the exec plugin for an example), as well as
defining `parser` as a field of the object.
You can then utilize the parser internally in your plugin, parsing data as you
see fit. Telegraf's configuration layer will take care of instantiating and
creating the `Parser` object.
You should also add the following to your SampleConfig() return:
```toml
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
```
Below is the `Parser` interface.
```go
// Parser is an interface defining functions that a parser plugin must satisfy.
type Parser interface {
// Parse takes a byte buffer separated by newlines
// ie, `cpu.usage.idle 90\ncpu.usage.busy 10`
// and parses it into telegraf metrics
Parse(buf []byte) ([]telegraf.Metric, error)
// ParseLine takes a single string metric
// ie, "cpu.usage.idle 90"
// and parses it into a telegraf metric.
ParseLine(line string) (telegraf.Metric, error)
}
```
And you can view the code
[here.](https://github.com/influxdata/telegraf/blob/henrypfhu-master/plugins/parsers/registry.go)
## Service Input Plugins
This section is for developers who want to create new "service" collection
inputs. A service plugin differs from a regular plugin in that it operates
a background service while Telegraf is running. One example would be the `statsd`
plugin, which operates a statsd server.
Service Input Plugins are substantially more complicated than a regular plugin, as they
will require threads and locks to verify data integrity. Service Input Plugins should
be avoided unless there is no way to create their behavior with a regular plugin.
Their interface is quite similar to a regular plugin, with the addition of `Start()`
and `Stop()` methods.
### Service Plugin Guidelines
* Same as the `Plugin` guidelines, except that they must conform to the
[`telegraf.ServiceInput`](https://godoc.org/github.com/influxdata/telegraf#ServiceInput) interface.
## Output Plugins
This section is for developers who want to create a new output sink. Outputs
are created in a similar manner as collection plugins, and their interface has
similar constructs.
### Output Plugin Guidelines
* An output must conform to the [`telegraf.Output`](https://godoc.org/github.com/influxdata/telegraf#Output) interface.
* Outputs should call `outputs.Add` in their `init` function to register themselves.
See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
output can be configured. This is include in `telegraf config`.
* The `Description` function should say in one line what this output does.
### Output Example
```go
package simpleoutput
// simpleoutput.go
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs"
)
type Simple struct {
Ok bool
}
func (s *Simple) Description() string {
return "a demo output"
}
func (s *Simple) SampleConfig() string {
return `
ok = true
`
}
func (s *Simple) Connect() error {
// Make a connection to the URL here
return nil
}
func (s *Simple) Close() error {
// Close connection to the URL here
return nil
}
func (s *Simple) Write(metrics []telegraf.Metric) error {
for _, metric := range metrics {
// write `metric` to the output sink here
}
return nil
}
func init() {
outputs.Add("simpleoutput", func() telegraf.Output { return &Simple{} })
}
```
## Output Plugins Writing Arbitrary Data Formats
Some output plugins (such as
[file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file))
can write arbitrary output data formats. An overview of these data formats can
be found
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md).
In order to enable this, you must specify a
`SetSerializer(serializer serializers.Serializer)`
function on the plugin object (see the file plugin for an example), as well as
defining `serializer` as a field of the object.
You can then utilize the serializer internally in your plugin, serializing data
before it's written. Telegraf's configuration layer will take care of
instantiating and creating the `Serializer` object.
You should also add the following to your SampleConfig() return:
```toml
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
```
## Service Output Plugins
This section is for developers who want to create new "service" output. A
service output differs from a regular output in that it operates a background service
while Telegraf is running. One example would be the `prometheus_client` output,
which operates an HTTP server.
Their interface is quite similar to a regular output, with the addition of `Start()`
and `Stop()` methods.
### Service Output Guidelines
* Same as the `Output` guidelines, except that they must conform to the
`output.ServiceOutput` interface.
## Processor Plugins
This section is for developers who want to create a new processor plugin.
### Processor Plugin Guidelines
* A processor must conform to the [`telegraf.Processor`](https://godoc.org/github.com/influxdata/telegraf#Processor) interface.
* Processors should call `processors.Add` in their `init` function to register themselves.
See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/processors/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
processor can be configured. This is include in the output of `telegraf config`.
* The `Description` function should say in one line what this processor does.
### Processor Example
```go
package printer
// printer.go
import (
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/processors"
)
type Printer struct {
}
var sampleConfig = `
`
func (p *Printer) SampleConfig() string {
return sampleConfig
}
func (p *Printer) Description() string {
return "Print all metrics that pass through this filter."
}
func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric {
for _, metric := range in {
fmt.Println(metric.String())
}
return in
}
func init() {
processors.Add("printer", func() telegraf.Processor {
return &Printer{}
})
}
```
## Aggregator Plugins
This section is for developers who want to create a new aggregator plugin.
### Aggregator Plugin Guidelines
* A aggregator must conform to the [`telegraf.Aggregator`](https://godoc.org/github.com/influxdata/telegraf#Aggregator) interface.
* Aggregators should call `aggregators.Add` in their `init` function to register themselves.
See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
aggregator can be configured. This is include in `telegraf config`.
* The `Description` function should say in one line what this aggregator does.
* The Aggregator plugin will need to keep caches of metrics that have passed
through it. This should be done using the builtin `HashID()` function of each
metric.
* When the `Reset()` function is called, all caches should be cleared.
### Aggregator Example
```go
package min
// min.go
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
)
type Min struct {
// caches for metric fields, names, and tags
fieldCache map[uint64]map[string]float64
nameCache map[uint64]string
tagCache map[uint64]map[string]string
}
func NewMin() telegraf.Aggregator {
m := &Min{}
m.Reset()
return m
}
var sampleConfig = `
## period is the flush & clear interval of the aggregator.
period = "30s"
## If true drop_original will drop the original metrics and
## only send aggregates.
drop_original = false
`
func (m *Min) SampleConfig() string {
return sampleConfig
}
func (m *Min) Description() string {
return "Keep the aggregate min of each metric passing through."
}
func (m *Min) Add(in telegraf.Metric) {
id := in.HashID()
if _, ok := m.nameCache[id]; !ok {
// hit an uncached metric, create caches for first time:
m.nameCache[id] = in.Name()
m.tagCache[id] = in.Tags()
m.fieldCache[id] = make(map[string]float64)
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
m.fieldCache[id][k] = fv
}
}
} else {
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
if _, ok := m.fieldCache[id][k]; !ok {
// hit an uncached field of a cached metric
m.fieldCache[id][k] = fv
continue
}
if fv < m.fieldCache[id][k] {
// set new minimum
m.fieldCache[id][k] = fv
}
}
}
}
}
func (m *Min) Push(acc telegraf.Accumulator) {
for id, _ := range m.nameCache {
fields := map[string]interface{}{}
for k, v := range m.fieldCache[id] {
fields[k+"_min"] = v
}
acc.AddFields(m.nameCache[id], fields, m.tagCache[id])
}
}
func (m *Min) Reset() {
m.fieldCache = make(map[uint64]map[string]float64)
m.nameCache = make(map[uint64]string)
m.tagCache = make(map[uint64]map[string]string)
}
func convert(in interface{}) (float64, bool) {
switch v := in.(type) {
case float64:
return v, true
case int64:
return float64(v), true
default:
return 0, false
}
}
func init() {
aggregators.Add("min", func() telegraf.Aggregator {
return NewMin()
})
}
```
## Unit Tests
Before opening a pull request you should run the linter checks and
the short tests.
```
make check
make test
```
### Execute linter
**Execute integration tests:**
execute `make lint`
(Optional)
### Execute short tests
execute `make test`
### Execute integration tests
Running the integration tests requires several docker containers to be
running. You can start the containers with:
```
docker-compose up
make docker-run
```
And run the full test suite with:
@ -65,12 +487,3 @@ make test-all
```
Use `make docker-kill` to stop the containers.
[cla]: https://www.influxdata.com/legal/cla/
[new issue]: https://github.com/influxdata/telegraf/issues/new/choose
[pull request]: https://github.com/influxdata/telegraf/compare
[inputs]: /docs/INPUTS.md
[processors]: /docs/PROCESSORS.md
[aggregators]: /docs/AGGREGATORS.md
[outputs]: /docs/OUTPUTS.md

View File

@ -1,16 +0,0 @@
# Copy of scripts/stretch.docker
FROM golang:1.13.8 as builder
WORKDIR /go/src/github.com/influxdata/telegraf
COPY . /go/src/github.com/influxdata/telegraf
RUN make go-install
FROM buildpack-deps:stretch-curl
COPY --from=builder /go/bin/* /usr/bin/
COPY etc/telegraf.conf /etc/telegraf/telegraf.conf
EXPOSE 8125/udp 8092/udp 8094
COPY scripts/docker-entrypoint.sh /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]
CMD ["telegraf"]

View File

@ -1,9 +0,0 @@
# External Plugins
This is a list of plugins that can be compiled outside of Telegraf and used via the execd input.
Pull requests welcome.
## Inputs
- [rand](https://github.com/ssoroka/rand) - Generate random numbers
- [twitter](https://github.com/inabagumi/twitter-telegraf-plugin) - Gather account information from Twitter accounts

973
Gopkg.lock generated Normal file
View File

@ -0,0 +1,973 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
branch = "master"
name = "code.cloudfoundry.org/clock"
packages = ["."]
revision = "02e53af36e6c978af692887ed449b74026d76fec"
[[projects]]
name = "collectd.org"
packages = [
"api",
"cdtime",
"network"
]
revision = "2ce144541b8903101fb8f1483cc0497a68798122"
version = "v0.3.0"
[[projects]]
name = "github.com/Microsoft/ApplicationInsights-Go"
packages = [
"appinsights",
"appinsights/contracts"
]
revision = "d2df5d440eda5372f24fcac03839a64d6cb5f7e5"
version = "v0.4.2"
[[projects]]
name = "github.com/Microsoft/go-winio"
packages = ["."]
revision = "7da180ee92d8bd8bb8c37fc560e673e6557c392f"
version = "v0.4.7"
[[projects]]
name = "github.com/Shopify/sarama"
packages = ["."]
revision = "35324cf48e33d8260e1c7c18854465a904ade249"
version = "v1.17.0"
[[projects]]
name = "github.com/StackExchange/wmi"
packages = ["."]
revision = "5d049714c4a64225c3c79a7cf7d02f7fb5b96338"
version = "1.0.0"
[[projects]]
name = "github.com/aerospike/aerospike-client-go"
packages = [
".",
"internal/lua",
"internal/lua/resources",
"logger",
"pkg/bcrypt",
"pkg/ripemd160",
"types",
"types/atomic",
"types/particle_type",
"types/rand",
"utils/buffer"
]
revision = "c10b5393e43bd60125aca6289c7b24879edb1787"
version = "v1.33.0"
[[projects]]
branch = "master"
name = "github.com/alecthomas/template"
packages = [
".",
"parse"
]
revision = "a0175ee3bccc567396460bf5acd36800cb10c49c"
[[projects]]
branch = "master"
name = "github.com/alecthomas/units"
packages = ["."]
revision = "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a"
[[projects]]
branch = "master"
name = "github.com/amir/raidman"
packages = [
".",
"proto"
]
revision = "1ccc43bfb9c93cb401a4025e49c64ba71e5e668b"
[[projects]]
branch = "master"
name = "github.com/apache/thrift"
packages = ["lib/go/thrift"]
revision = "f5f430df56871bc937950274b2c86681d3db6e59"
[[projects]]
name = "github.com/aws/aws-sdk-go"
packages = [
"aws",
"aws/awserr",
"aws/awsutil",
"aws/client",
"aws/client/metadata",
"aws/corehandlers",
"aws/credentials",
"aws/credentials/ec2rolecreds",
"aws/credentials/endpointcreds",
"aws/credentials/stscreds",
"aws/csm",
"aws/defaults",
"aws/ec2metadata",
"aws/endpoints",
"aws/request",
"aws/session",
"aws/signer/v4",
"internal/sdkio",
"internal/sdkrand",
"internal/shareddefaults",
"private/protocol",
"private/protocol/json/jsonutil",
"private/protocol/jsonrpc",
"private/protocol/query",
"private/protocol/query/queryutil",
"private/protocol/rest",
"private/protocol/xml/xmlutil",
"service/cloudwatch",
"service/kinesis",
"service/sts"
]
revision = "bfc1a07cf158c30c41a3eefba8aae043d0bb5bff"
version = "v1.14.8"
[[projects]]
branch = "master"
name = "github.com/beorn7/perks"
packages = ["quantile"]
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
[[projects]]
name = "github.com/bsm/sarama-cluster"
packages = ["."]
revision = "cf455bc755fe41ac9bb2861e7a961833d9c2ecc3"
version = "v2.1.13"
[[projects]]
name = "github.com/cenkalti/backoff"
packages = ["."]
revision = "2ea60e5f094469f9e65adb9cd103795b73ae743e"
version = "v2.0.0"
[[projects]]
branch = "master"
name = "github.com/couchbase/go-couchbase"
packages = ["."]
revision = "16db1f1fe037412f12738fa4d8448c549c4edd77"
[[projects]]
branch = "master"
name = "github.com/couchbase/gomemcached"
packages = [
".",
"client"
]
revision = "0da75df145308b9a4e6704d762ca9d9b77752efc"
[[projects]]
branch = "master"
name = "github.com/couchbase/goutils"
packages = [
"logging",
"scramsha"
]
revision = "e865a1461c8ac0032bd37e2d4dab3289faea3873"
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
name = "github.com/dgrijalva/jwt-go"
packages = ["."]
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
version = "v3.2.0"
[[projects]]
name = "github.com/docker/distribution"
packages = [
"digest",
"reference"
]
revision = "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89"
version = "v2.6.2"
[[projects]]
name = "github.com/docker/docker"
packages = [
"api/types",
"api/types/blkiodev",
"api/types/container",
"api/types/events",
"api/types/filters",
"api/types/mount",
"api/types/network",
"api/types/reference",
"api/types/registry",
"api/types/strslice",
"api/types/swarm",
"api/types/time",
"api/types/versions",
"api/types/volume",
"client",
"pkg/tlsconfig"
]
revision = "eef6495eddab52828327aade186443681ed71a4e"
version = "v17.03.2-ce-rc1"
[[projects]]
name = "github.com/docker/go-connections"
packages = [
"nat",
"sockets",
"tlsconfig"
]
revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d"
version = "v0.3.0"
[[projects]]
name = "github.com/docker/go-units"
packages = ["."]
revision = "47565b4f722fb6ceae66b95f853feed578a4a51c"
version = "v0.3.3"
[[projects]]
name = "github.com/eapache/go-resiliency"
packages = ["breaker"]
revision = "ea41b0fad31007accc7f806884dcdf3da98b79ce"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/eapache/go-xerial-snappy"
packages = ["."]
revision = "bb955e01b9346ac19dc29eb16586c90ded99a98c"
[[projects]]
name = "github.com/eapache/queue"
packages = ["."]
revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98"
version = "v1.1.0"
[[projects]]
name = "github.com/eclipse/paho.mqtt.golang"
packages = [
".",
"packets"
]
revision = "36d01c2b4cbeb3d2a12063e4880ce30800af9560"
version = "v1.1.1"
[[projects]]
name = "github.com/go-ini/ini"
packages = ["."]
revision = "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5"
version = "v1.37.0"
[[projects]]
name = "github.com/go-logfmt/logfmt"
packages = ["."]
revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5"
version = "v0.3.0"
[[projects]]
name = "github.com/go-ole/go-ole"
packages = [
".",
"oleutil"
]
revision = "a41e3c4b706f6ae8dfbff342b06e40fa4d2d0506"
version = "v1.2.1"
[[projects]]
name = "github.com/go-redis/redis"
packages = [
".",
"internal",
"internal/consistenthash",
"internal/hashtag",
"internal/pool",
"internal/proto",
"internal/singleflight",
"internal/util"
]
revision = "83fb42932f6145ce52df09860384a4653d2d332a"
version = "v6.12.0"
[[projects]]
name = "github.com/go-sql-driver/mysql"
packages = ["."]
revision = "d523deb1b23d913de5bdada721a6071e71283618"
version = "v1.4.0"
[[projects]]
name = "github.com/gobwas/glob"
packages = [
".",
"compiler",
"match",
"syntax",
"syntax/ast",
"syntax/lexer",
"util/runes",
"util/strings"
]
revision = "5ccd90ef52e1e632236f7326478d4faa74f99438"
version = "v0.2.3"
[[projects]]
name = "github.com/gogo/protobuf"
packages = ["proto"]
revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
version = "v1.0.0"
[[projects]]
name = "github.com/golang/protobuf"
packages = [
"proto",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp"
]
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/golang/snappy"
packages = ["."]
revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
[[projects]]
name = "github.com/google/go-cmp"
packages = [
"cmp",
"cmp/internal/diff",
"cmp/internal/function",
"cmp/internal/value"
]
revision = "3af367b6b30c263d47e8895973edcca9a49cf029"
version = "v0.2.0"
[[projects]]
name = "github.com/gorilla/context"
packages = ["."]
revision = "08b5f424b9271eedf6f9f0ce86cb9396ed337a42"
version = "v1.1.1"
[[projects]]
name = "github.com/gorilla/mux"
packages = ["."]
revision = "e3702bed27f0d39777b0b37b664b6280e8ef8fbf"
version = "v1.6.2"
[[projects]]
branch = "master"
name = "github.com/hailocab/go-hostpool"
packages = ["."]
revision = "e80d13ce29ede4452c43dea11e79b9bc8a15b478"
[[projects]]
name = "github.com/hashicorp/consul"
packages = ["api"]
revision = "5174058f0d2bda63fa5198ab96c33d9a909c58ed"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/hashicorp/go-cleanhttp"
packages = ["."]
revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d"
[[projects]]
branch = "master"
name = "github.com/hashicorp/go-rootcerts"
packages = ["."]
revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00"
[[projects]]
name = "github.com/hashicorp/serf"
packages = ["coordinate"]
revision = "d6574a5bb1226678d7010325fb6c985db20ee458"
version = "v0.8.1"
[[projects]]
name = "github.com/influxdata/go-syslog"
packages = [
"rfc5424",
"rfc5425"
]
revision = "eecd51df3ad85464a2bab9b7d3a45bc1e299059e"
version = "v1.0.1"
[[projects]]
branch = "master"
name = "github.com/influxdata/tail"
packages = [
".",
"ratelimiter",
"util",
"watch",
"winfile"
]
revision = "c43482518d410361b6c383d7aebce33d0471d7bc"
[[projects]]
branch = "master"
name = "github.com/influxdata/toml"
packages = [
".",
"ast"
]
revision = "2a2e3012f7cfbef64091cc79776311e65dfa211b"
[[projects]]
branch = "master"
name = "github.com/influxdata/wlog"
packages = ["."]
revision = "7c63b0a71ef8300adc255344d275e10e5c3a71ec"
[[projects]]
name = "github.com/jackc/pgx"
packages = [
".",
"chunkreader",
"internal/sanitize",
"pgio",
"pgproto3",
"pgtype",
"stdlib"
]
revision = "da3231b0b66e2e74cdb779f1d46c5e958ba8be27"
version = "v3.1.0"
[[projects]]
name = "github.com/jmespath/go-jmespath"
packages = ["."]
revision = "0b12d6b5"
[[projects]]
branch = "master"
name = "github.com/kardianos/osext"
packages = ["."]
revision = "ae77be60afb1dcacde03767a8c37337fad28ac14"
[[projects]]
branch = "master"
name = "github.com/kardianos/service"
packages = ["."]
revision = "615a14ed75099c9eaac6949e22ac2341bf9d3197"
[[projects]]
branch = "master"
name = "github.com/kballard/go-shellquote"
packages = ["."]
revision = "95032a82bc518f77982ea72343cc1ade730072f0"
[[projects]]
branch = "master"
name = "github.com/kr/logfmt"
packages = ["."]
revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0"
[[projects]]
branch = "master"
name = "github.com/mailru/easyjson"
packages = [
".",
"buffer",
"jlexer",
"jwriter"
]
revision = "3fdea8d05856a0c8df22ed4bc71b3219245e4485"
[[projects]]
name = "github.com/matttproud/golang_protobuf_extensions"
packages = ["pbutil"]
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
version = "v1.0.1"
[[projects]]
name = "github.com/miekg/dns"
packages = ["."]
revision = "5a2b9fab83ff0f8bfc99684bd5f43a37abe560f1"
version = "v1.0.8"
[[projects]]
branch = "master"
name = "github.com/mitchellh/go-homedir"
packages = ["."]
revision = "3864e76763d94a6df2f9960b16a20a33da9f9a66"
[[projects]]
branch = "master"
name = "github.com/mitchellh/mapstructure"
packages = ["."]
revision = "bb74f1db0675b241733089d5a1faa5dd8b0ef57b"
[[projects]]
name = "github.com/multiplay/go-ts3"
packages = ["."]
revision = "d0d44555495c8776880a17e439399e715a4ef319"
version = "v1.0.0"
[[projects]]
name = "github.com/naoina/go-stringutil"
packages = ["."]
revision = "6b638e95a32d0c1131db0e7fe83775cbea4a0d0b"
version = "v0.1.0"
[[projects]]
name = "github.com/nats-io/gnatsd"
packages = [
"conf",
"logger",
"server",
"server/pse",
"util"
]
revision = "add6d7930ae6d4bff8823b28999ea87bf1bfd23d"
version = "v1.1.0"
[[projects]]
name = "github.com/nats-io/go-nats"
packages = [
".",
"encoders/builtin",
"util"
]
revision = "062418ea1c2181f52dc0f954f6204370519a868b"
version = "v1.5.0"
[[projects]]
name = "github.com/nats-io/nuid"
packages = ["."]
revision = "289cccf02c178dc782430d534e3c1f5b72af807f"
version = "v1.0.0"
[[projects]]
name = "github.com/nsqio/go-nsq"
packages = ["."]
revision = "eee57a3ac4174c55924125bb15eeeda8cffb6e6f"
version = "v1.0.7"
[[projects]]
branch = "master"
name = "github.com/opentracing-contrib/go-observer"
packages = ["."]
revision = "a52f2342449246d5bcc273e65cbdcfa5f7d6c63c"
[[projects]]
name = "github.com/opentracing/opentracing-go"
packages = [
".",
"ext",
"log"
]
revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
version = "v1.0.2"
[[projects]]
name = "github.com/openzipkin/zipkin-go-opentracing"
packages = [
".",
"flag",
"thrift/gen-go/scribe",
"thrift/gen-go/zipkincore",
"types",
"wire"
]
revision = "26cf9707480e6b90e5eff22cf0bbf05319154232"
version = "v0.3.4"
[[projects]]
name = "github.com/pierrec/lz4"
packages = [
".",
"internal/xxh32"
]
revision = "6b9367c9ff401dbc54fabce3fb8d972e799b702d"
version = "v2.0.2"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
name = "github.com/prometheus/client_golang"
packages = [
"prometheus",
"prometheus/promhttp"
]
revision = "c5b7fccd204277076155f10851dad72b76a49317"
version = "v0.8.0"
[[projects]]
branch = "master"
name = "github.com/prometheus/client_model"
packages = ["go"]
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
[[projects]]
branch = "master"
name = "github.com/prometheus/common"
packages = [
"expfmt",
"internal/bitbucket.org/ww/goautoneg",
"log",
"model"
]
revision = "7600349dcfe1abd18d72d3a1770870d9800a7801"
[[projects]]
branch = "master"
name = "github.com/prometheus/procfs"
packages = [
".",
"internal/util",
"nfs",
"xfs"
]
revision = "7d6f385de8bea29190f15ba9931442a0eaef9af7"
[[projects]]
branch = "master"
name = "github.com/rcrowley/go-metrics"
packages = ["."]
revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
[[projects]]
branch = "master"
name = "github.com/samuel/go-zookeeper"
packages = ["zk"]
revision = "c4fab1ac1bec58281ad0667dc3f0907a9476ac47"
[[projects]]
name = "github.com/satori/go.uuid"
packages = ["."]
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
version = "v1.2.0"
[[projects]]
name = "github.com/shirou/gopsutil"
packages = [
"cpu",
"disk",
"host",
"internal/common",
"load",
"mem",
"net",
"process"
]
revision = "eeb1d38d69593f121e060d24d17f7b1f0936b203"
version = "v2.18.05"
[[projects]]
branch = "master"
name = "github.com/shirou/w32"
packages = ["."]
revision = "bb4de0191aa41b5507caa14b0650cdbddcd9280b"
[[projects]]
name = "github.com/sirupsen/logrus"
packages = ["."]
revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc"
version = "v1.0.5"
[[projects]]
branch = "master"
name = "github.com/soniah/gosnmp"
packages = ["."]
revision = "bcf840db66be7d64bf96c3c0e075c92e3d98f793"
[[projects]]
branch = "master"
name = "github.com/streadway/amqp"
packages = ["."]
revision = "e5adc2ada8b8efff032bf61173a233d143e9318e"
[[projects]]
name = "github.com/stretchr/objx"
packages = ["."]
revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c"
version = "v0.1.1"
[[projects]]
name = "github.com/stretchr/testify"
packages = [
"assert",
"mock",
"require"
]
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
version = "v1.2.2"
[[projects]]
name = "github.com/tidwall/gjson"
packages = ["."]
revision = "afaeb9562041a8018c74e006551143666aed08bf"
version = "v1.1.1"
[[projects]]
branch = "master"
name = "github.com/tidwall/match"
packages = ["."]
revision = "1731857f09b1f38450e2c12409748407822dc6be"
[[projects]]
name = "github.com/vjeantet/grok"
packages = ["."]
revision = "ce01e59abcf6fbc9833b7deb5e4b8ee1769bcc53"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/wvanbergen/kafka"
packages = ["consumergroup"]
revision = "e2edea948ddfee841ea9a263b32ccca15f7d6c2f"
[[projects]]
branch = "master"
name = "github.com/wvanbergen/kazoo-go"
packages = ["."]
revision = "f72d8611297a7cf105da904c04198ad701a60101"
[[projects]]
branch = "master"
name = "github.com/yuin/gopher-lua"
packages = [
".",
"ast",
"parse",
"pm"
]
revision = "ca850f594eaafa5468da2bd53b865e4ee55be18b"
[[projects]]
branch = "master"
name = "github.com/zensqlmonitor/go-mssqldb"
packages = ["."]
revision = "e8fbf836e44e86764eba398361d1825651709547"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = [
"bcrypt",
"blowfish",
"ed25519",
"ed25519/internal/edwards25519",
"md4",
"pbkdf2",
"ssh/terminal"
]
revision = "027cca12c2d63e3d62b670d901e8a2c95854feec"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = [
"bpf",
"context",
"context/ctxhttp",
"html",
"html/atom",
"html/charset",
"http/httpguts",
"http2",
"http2/hpack",
"idna",
"internal/iana",
"internal/socket",
"internal/socks",
"internal/timeseries",
"ipv4",
"ipv6",
"proxy",
"trace",
"websocket"
]
revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = [
"unix",
"windows",
"windows/registry",
"windows/svc",
"windows/svc/debug",
"windows/svc/eventlog",
"windows/svc/mgr"
]
revision = "6c888cc515d3ed83fc103cf1d84468aad274b0a7"
[[projects]]
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"encoding",
"encoding/charmap",
"encoding/htmlindex",
"encoding/internal",
"encoding/internal/identifier",
"encoding/japanese",
"encoding/korean",
"encoding/simplifiedchinese",
"encoding/traditionalchinese",
"encoding/unicode",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"internal/utf8internal",
"language",
"runes",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable"
]
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
name = "google.golang.org/appengine"
packages = ["cloudsql"]
revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
revision = "32ee49c4dd805befd833990acba36cb75042378c"
[[projects]]
name = "google.golang.org/grpc"
packages = [
".",
"balancer",
"balancer/base",
"balancer/roundrobin",
"channelz",
"codes",
"connectivity",
"credentials",
"encoding",
"encoding/proto",
"grpclb/grpc_lb_v1/messages",
"grpclog",
"internal",
"keepalive",
"metadata",
"naming",
"peer",
"resolver",
"resolver/dns",
"resolver/passthrough",
"stats",
"status",
"tap",
"transport"
]
revision = "7a6a684ca69eb4cae85ad0a484f2e531598c047b"
version = "v1.12.2"
[[projects]]
name = "gopkg.in/alecthomas/kingpin.v2"
packages = ["."]
revision = "947dcec5ba9c011838740e680966fd7087a71d0d"
version = "v2.2.6"
[[projects]]
name = "gopkg.in/asn1-ber.v1"
packages = ["."]
revision = "379148ca0225df7a432012b8df0355c2a2063ac0"
version = "v1.2"
[[projects]]
name = "gopkg.in/fatih/pool.v2"
packages = ["."]
revision = "010e0b745d12eaf8426c95f9c3924d81dd0b668f"
version = "v2.0.0"
[[projects]]
name = "gopkg.in/fsnotify.v1"
packages = ["."]
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz"
version = "v1.4.7"
[[projects]]
name = "gopkg.in/gorethink/gorethink.v3"
packages = [
".",
"encoding",
"ql2",
"types"
]
revision = "7f5bdfd858bb064d80559b2a32b86669c5de5d3b"
version = "v3.0.5"
[[projects]]
name = "gopkg.in/ldap.v2"
packages = ["."]
revision = "bb7a9ca6e4fbc2129e3db588a34bc970ffe811a9"
version = "v2.5.1"
[[projects]]
branch = "v2"
name = "gopkg.in/mgo.v2"
packages = [
".",
"bson",
"internal/json",
"internal/sasl",
"internal/scram"
]
revision = "3f83fa5005286a7fe593b055f0d7771a7dce4655"
[[projects]]
name = "gopkg.in/olivere/elastic.v5"
packages = [
".",
"config",
"uritemplates"
]
revision = "b708306d715bea9b983685e94ab4602cdc9f988b"
version = "v5.0.69"
[[projects]]
branch = "v1"
name = "gopkg.in/tomb.v1"
packages = ["."]
revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8"
[[projects]]
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "024194b983d91b9500fe97e0aa0ddb5fe725030cb51ddfb034e386cae1098370"
solver-name = "gps-cdcl"
solver-version = 1

243
Gopkg.toml Normal file
View File

@ -0,0 +1,243 @@
[[constraint]]
name = "collectd.org"
version = "0.3.0"
[[constraint]]
name = "github.com/aerospike/aerospike-client-go"
version = "^1.33.0"
[[constraint]]
name = "github.com/amir/raidman"
branch = "master"
[[constraint]]
name = "github.com/apache/thrift"
branch = "master"
[[constraint]]
name = "github.com/aws/aws-sdk-go"
version = "1.14.8"
# version = "1.8.39"
[[constraint]]
name = "github.com/bsm/sarama-cluster"
version = "2.1.13"
# version = "2.1.10"
[[constraint]]
name = "github.com/couchbase/go-couchbase"
branch = "master"
[[constraint]]
name = "github.com/dgrijalva/jwt-go"
version = "3.2.0"
# version = "3.1.0"
[[constraint]]
name = "github.com/docker/docker"
version = "~17.03.2-ce"
[[constraint]]
name = "github.com/docker/go-connections"
version = "0.3.0"
# version = "0.2.1"
[[constraint]]
name = "github.com/eclipse/paho.mqtt.golang"
version = "~1.1.1"
# version = "1.1.0"
[[constraint]]
name = "github.com/go-sql-driver/mysql"
version = "1.4.0"
# version = "1.3.0"
[[constraint]]
name = "github.com/gobwas/glob"
version = "0.2.3"
# version = "0.2.2"
[[constraint]]
name = "github.com/golang/protobuf"
version = "1.1.0"
# version = "1.0.0"
[[constraint]]
name = "github.com/google/go-cmp"
version = "0.2.0"
# version = "0.1.0"
[[constraint]]
name = "github.com/gorilla/mux"
version = "1.6.2"
# version = "1.6.1"
[[constraint]]
name = "github.com/go-redis/redis"
version = "6.12.0"
[[constraint]]
name = "github.com/hashicorp/consul"
version = "1.1.0"
[[constraint]]
name = "github.com/influxdata/go-syslog"
version = "1.0.1"
[[constraint]]
name = "github.com/influxdata/tail"
branch = "master"
[[constraint]]
name = "github.com/influxdata/toml"
branch = "master"
[[constraint]]
name = "github.com/influxdata/wlog"
branch = "master"
[[constraint]]
name = "github.com/jackc/pgx"
version = "3.1.0"
[[constraint]]
name = "github.com/kardianos/service"
branch = "master"
[[constraint]]
name = "github.com/kballard/go-shellquote"
branch = "master"
[[constraint]]
name = "github.com/matttproud/golang_protobuf_extensions"
version = "1.0.1"
[[constraint]]
name = "github.com/Microsoft/ApplicationInsights-Go"
branch = "master"
[[constraint]]
name = "github.com/miekg/dns"
version = "1.0.8"
# version = "1.0.0"
[[constraint]]
name = "github.com/multiplay/go-ts3"
version = "1.0.0"
[[constraint]]
name = "github.com/nats-io/gnatsd"
version = "1.1.0"
# version = "1.0.4"
[[constraint]]
name = "github.com/nats-io/go-nats"
version = "1.5.0"
# version = "1.3.0"
[[constraint]]
name = "github.com/nsqio/go-nsq"
version = "1.0.7"
[[constraint]]
name = "github.com/openzipkin/zipkin-go-opentracing"
version = "0.3.4"
# version = "0.3.0"
[[constraint]]
name = "github.com/prometheus/client_golang"
version = "0.8.0"
[[constraint]]
name = "github.com/prometheus/client_model"
branch = "master"
[[constraint]]
name = "github.com/prometheus/common"
branch = "master"
[[constraint]]
name = "github.com/satori/go.uuid"
version = "1.2.0"
[[constraint]]
name = "github.com/shirou/gopsutil"
version = "2.18.05"
# version = "2.18.04"
[[constraint]]
name = "github.com/Shopify/sarama"
version = "1.17.0"
# version = "1.15.0"
[[constraint]]
name = "github.com/soniah/gosnmp"
branch = "master"
[[constraint]]
name = "github.com/StackExchange/wmi"
version = "1.0.0"
[[constraint]]
name = "github.com/streadway/amqp"
branch = "master"
[[constraint]]
name = "github.com/stretchr/testify"
version = "1.2.2"
# version = "1.2.1"
[[constraint]]
name = "github.com/tidwall/gjson"
version = "1.1.1"
# version = "1.0.0"
[[constraint]]
name = "github.com/vjeantet/grok"
version = "1.0.0"
[[constraint]]
name = "github.com/wvanbergen/kafka"
branch = "master"
[[constraint]]
name = "github.com/zensqlmonitor/go-mssqldb"
branch = "master"
[[constraint]]
name = "golang.org/x/net"
branch = "master"
[[constraint]]
name = "golang.org/x/sys"
branch = "master"
[[constraint]]
name = "google.golang.org/grpc"
version = "1.12.2"
# version = "1.8.0"
[[constraint]]
name = "gopkg.in/gorethink/gorethink.v3"
version = "3.0.5"
[[constraint]]
name = "gopkg.in/ldap.v2"
version = "2.5.1"
[[constraint]]
name = "gopkg.in/mgo.v2"
branch = "v2"
[[constraint]]
name = "gopkg.in/olivere/elastic.v5"
version = "^5.0.69"
# version = "^6.1.23"
[[constraint]]
name = "gopkg.in/yaml.v2"
version = "^2.2.1"
[[override]]
source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz"
name = "gopkg.in/fsnotify.v1"

View File

@ -1,6 +1,6 @@
The MIT License (MIT)
Copyright (c) 2015-2019 InfluxData Inc.
Copyright (c) 2015 InfluxDB
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

108
Makefile
View File

@ -1,23 +1,15 @@
ifeq ($(OS), Windows_NT)
VERSION := $(shell git describe --exact-match --tags 2>nul)
HOME := $(HOMEPATH)
CGO_ENABLED ?= 0
export CGO_ENABLED
else
VERSION := $(shell git describe --exact-match --tags 2>/dev/null)
endif
PREFIX := /usr/local
VERSION := $(shell git describe --exact-match --tags 2>/dev/null)
BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
COMMIT := $(shell git rev-parse --short HEAD)
GOFILES ?= $(shell git ls-files '*.go')
GOFMT ?= $(shell gofmt -l -s $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)))
GOFMT ?= $(shell gofmt -l $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)))
BUILDFLAGS ?=
ifdef GOBIN
PATH := $(GOBIN):$(PATH)
else
PATH := $(subst :,/bin:,$(shell go env GOPATH))/bin:$(PATH)
PATH := $(subst :,/bin:,$(GOPATH))/bin:$(PATH)
endif
LDFLAGS := $(LDFLAGS) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)
@ -25,38 +17,33 @@ ifdef VERSION
LDFLAGS += -X main.version=$(VERSION)
endif
.PHONY: all
all:
@$(MAKE) --no-print-directory deps
@$(MAKE) --no-print-directory telegraf
$(MAKE) deps
$(MAKE) telegraf
.PHONY: deps
deps:
go mod download
go get -u github.com/golang/lint/golint
go get -u github.com/golang/dep/cmd/dep
dep ensure
.PHONY: telegraf
telegraf:
go build -ldflags "$(LDFLAGS)" ./cmd/telegraf
.PHONY: go-install
go-install:
go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf
.PHONY: install
install: telegraf
mkdir -p $(DESTDIR)$(PREFIX)/bin/
cp telegraf $(DESTDIR)$(PREFIX)/bin/
.PHONY: test
test:
go test -short ./...
.PHONY: fmt
fmt:
@gofmt -s -w $(filter-out plugins/parsers/influx/machine.go, $(GOFILES))
@gofmt -w $(filter-out plugins/parsers/influx/machine.go, $(GOFILES))
.PHONY: fmtcheck
fmtcheck:
@echo '[INFO] running gofmt to identify incorrectly formatted code...'
@if [ ! -z "$(GOFMT)" ]; then \
echo "[ERROR] gofmt has found errors in the following files:" ; \
echo "$(GOFMT)" ; \
@ -64,17 +51,17 @@ fmtcheck:
echo "Run make fmt to fix them." ; \
exit 1 ;\
fi
@echo '[INFO] done.'
.PHONY: test-windows
test-windows:
go test -short ./plugins/inputs/ping/...
go test -short ./plugins/inputs/win_perf_counters/...
go test -short ./plugins/inputs/win_services/...
go test -short ./plugins/inputs/procstat/...
go test -short ./plugins/inputs/ntpq/...
go test -short ./plugins/processors/port_name/...
go test ./plugins/inputs/ping/...
go test ./plugins/inputs/win_perf_counters/...
go test ./plugins/inputs/win_services/...
go test ./plugins/inputs/procstat/...
go test ./plugins/inputs/ntpq/...
.PHONY: vet
# vet runs the Go source code static analysis tool `vet` to find
# any common errors.
vet:
@echo 'go vet $$(go list ./... | grep -v ./plugins/parsers/influx)'
@go vet $$(go list ./... | grep -v ./plugins/parsers/influx) ; if [ $$? -ne 0 ]; then \
@ -84,72 +71,25 @@ vet:
exit 1; \
fi
.PHONY: tidy
tidy:
go mod verify
go mod tidy
@if ! git diff --quiet go.mod go.sum; then \
echo "please run go mod tidy and check in changes"; \
exit 1; \
fi
test-ci: fmtcheck vet
go test -short ./...
.PHONY: check
check: fmtcheck vet
@$(MAKE) --no-print-directory tidy
.PHONY: test-all
test-all: fmtcheck vet
go test ./...
.PHONY: check-deps
check-deps:
./scripts/check-deps.sh
.PHONY: package
package:
./scripts/build.py --package --platform=all --arch=all
.PHONY: package-release
package-release:
./scripts/build.py --release --package --platform=all --arch=all \
--upload --bucket=dl.influxdata.com/telegraf/releases
.PHONY: package-nightly
package-nightly:
./scripts/build.py --nightly --package --platform=all --arch=all \
--upload --bucket=dl.influxdata.com/telegraf/nightlies
.PHONY: clean
clean:
rm -f telegraf
rm -f telegraf.exe
.PHONY: docker-image
docker-image:
docker build -f scripts/stretch.docker -t "telegraf:$(COMMIT)" .
./scripts/build.py --package --platform=linux --arch=amd64
cp build/telegraf*$(COMMIT)*.deb .
docker build -f scripts/dev.docker --build-arg "package=telegraf*$(COMMIT)*.deb" -t "telegraf-dev:$(COMMIT)" .
plugins/parsers/influx/machine.go: plugins/parsers/influx/machine.go.rl
ragel -Z -G2 $^ -o $@
.PHONY: static
static:
@echo "Building static linux binary..."
@CGO_ENABLED=0 \
GOOS=linux \
GOARCH=amd64 \
go build -ldflags "$(LDFLAGS)" ./cmd/telegraf
.PHONY: plugin-%
plugin-%:
@echo "Starting dev environment for $${$(@)} input plugin..."
@docker-compose -f plugins/inputs/$${$(@)}/dev/docker-compose.yml up
.PHONY: ci-1.13
ci-1.13:
docker build -t quay.io/influxdb/telegraf-ci:1.13.8 - < scripts/ci-1.13.docker
docker push quay.io/influxdb/telegraf-ci:1.13.8
.PHONY: ci-1.12
ci-1.12:
docker build -t quay.io/influxdb/telegraf-ci:1.12.17 - < scripts/ci-1.12.docker
docker push quay.io/influxdb/telegraf-ci:1.12.17
.PHONY: deps telegraf install test test-windows lint vet test-all package clean docker-image fmtcheck uint64

281
README.md
View File

@ -1,24 +1,24 @@
# Telegraf [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/)
Telegraf is an agent for collecting, processing, aggregating, and writing metrics.
Telegraf is an agent written in Go for collecting, processing, aggregating,
and writing metrics.
Design goals are to have a minimal memory footprint with a plugin system so
that developers in the community can easily add support for collecting
metrics.
that developers in the community can easily add support for collecting metrics
. For an example configuration referencet from local or remote services.
Telegraf is plugin-driven and has the concept of 4 distinct plugin types:
Telegraf is plugin-driven and has the concept of 4 distinct plugins:
1. [Input Plugins](#input-plugins) collect metrics from the system, services, or 3rd party APIs
2. [Processor Plugins](#processor-plugins) transform, decorate, and/or filter metrics
3. [Aggregator Plugins](#aggregator-plugins) create aggregate metrics (e.g. mean, min, max, quantiles, etc.)
4. [Output Plugins](#output-plugins) write metrics to various destinations
New plugins are designed to be easy to contribute, pull requests are welcomed
and we work to incorporate as many pull requests as possible.
For more information on Processor and Aggregator plugins please [read this](./docs/AGGREGATORS_AND_PROCESSORS.md).
## Try in Browser :rocket:
You can try Telegraf right in your browser in the [Telegraf playground](https://rootnroll.com/d/telegraf/).
New plugins are designed to be easy to contribute,
we'll eagerly accept pull
requests and will manage the set of plugins that Telegraf supports.
## Contributing
@ -26,19 +26,8 @@ There are many ways to contribute:
- Fix and [report bugs](https://github.com/influxdata/telegraf/issues/new)
- [Improve documentation](https://github.com/influxdata/telegraf/issues?q=is%3Aopen+label%3Adocumentation)
- [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls)
- Answer questions and discuss here on github and on the [Community Site](https://community.influxdata.com/)
- Answer questions on github and on the [Community Site](https://community.influxdata.com/)
- [Contribute plugins](CONTRIBUTING.md)
- [Contribute external plugins](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/execd/shim) *(experimental)*
## Minimum Requirements
Telegraf shares the same [minimum requirements][] as Go:
- Linux kernel version 2.6.23 or later
- Windows 7 or later
- FreeBSD 11.2 or later
- MacOS 10.11 El Capitan or later
[minimum requirements]: https://github.com/golang/go/wiki/MinimumRequirements#minimum-requirements
## Installation:
@ -51,29 +40,20 @@ Ansible role: https://github.com/rossmcdonald/telegraf
### From Source:
Telegraf requires Go version 1.13 or newer, the Makefile requires GNU make.
Telegraf requires golang version 1.9 or newer, the Makefile requires GNU make.
1. [Install Go](https://golang.org/doc/install) >=1.13 (1.14 recommended)
2. Clone the Telegraf repository:
```
cd ~/src
git clone https://github.com/influxdata/telegraf.git
```
3. Run `make` from the source directory
```
cd ~/src/telegraf
make
```
Dependencies are managed with [dep](https://github.com/golang/dep),
which is installed by the Makefile if you don't have it already.
### Changelog
View the [changelog](/CHANGELOG.md) for the latest updates and changes by
version.
1. [Install Go](https://golang.org/doc/install)
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
3. Run `go get -d github.com/influxdata/telegraf`
4. Run `cd $GOPATH/src/github.com/influxdata/telegraf`
5. Run `make`
### Nightly Builds
These builds are generated from the master branch:
- [telegraf-nightly_darwin_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_amd64.tar.gz)
- [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb)
- [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb)
- [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm)
@ -103,171 +83,112 @@ These builds are generated from the master branch:
See usage with:
```
telegraf --help
./telegraf --help
```
#### Generate a telegraf config file:
```
telegraf config > telegraf.conf
./telegraf config > telegraf.conf
```
#### Generate config with only cpu input & influxdb output plugins defined:
```
telegraf --section-filter agent:inputs:outputs --input-filter cpu --output-filter influxdb config
./telegraf --input-filter cpu --output-filter influxdb config
```
#### Run a single telegraf collection, outputting metrics to stdout:
#### Run a single telegraf collection, outputing metrics to stdout:
```
telegraf --config telegraf.conf --test
./telegraf --config telegraf.conf --test
```
#### Run telegraf with all plugins defined in config file:
```
telegraf --config telegraf.conf
./telegraf --config telegraf.conf
```
#### Run telegraf, enabling the cpu & memory input, and influxdb output plugins:
```
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
./telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
```
## Documentation
[Latest Release Documentation][release docs].
## Configuration
For documentation on the latest development code see the [documentation index][devel docs].
[release docs]: https://docs.influxdata.com/telegraf
[devel docs]: docs
See the [configuration guide](docs/CONFIGURATION.md) for a rundown of the more advanced
configuration options.
## Input Plugins
* [activemq](./plugins/inputs/activemq)
* [aerospike](./plugins/inputs/aerospike)
* [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq)
* [apache](./plugins/inputs/apache)
* [apcupsd](./plugins/inputs/apcupsd)
* [aurora](./plugins/inputs/aurora)
* [aws cloudwatch](./plugins/inputs/cloudwatch) (Amazon Cloudwatch)
* [azure_storage_queue](./plugins/inputs/azure_storage_queue)
* [aws cloudwatch](./plugins/inputs/cloudwatch)
* [bcache](./plugins/inputs/bcache)
* [beanstalkd](./plugins/inputs/beanstalkd)
* [bind](./plugins/inputs/bind)
* [bond](./plugins/inputs/bond)
* [burrow](./plugins/inputs/burrow)
* [cassandra](./plugins/inputs/cassandra) (deprecated, use [jolokia2](./plugins/inputs/jolokia2))
* [burrow](./plugins/inputs/burrow)
* [ceph](./plugins/inputs/ceph)
* [cgroup](./plugins/inputs/cgroup)
* [chrony](./plugins/inputs/chrony)
* [cisco_telemetry_gnmi](./plugins/inputs/cisco_telemetry_gnmi)
* [cisco_telemetry_mdt](./plugins/inputs/cisco_telemetry_mdt)
* [clickhouse](./plugins/inputs/clickhouse)
* [cloud_pubsub](./plugins/inputs/cloud_pubsub) Google Cloud Pub/Sub
* [cloud_pubsub_push](./plugins/inputs/cloud_pubsub_push) Google Cloud Pub/Sub push endpoint
* [conntrack](./plugins/inputs/conntrack)
* [consul](./plugins/inputs/consul)
* [conntrack](./plugins/inputs/conntrack)
* [couchbase](./plugins/inputs/couchbase)
* [couchdb](./plugins/inputs/couchdb)
* [cpu](./plugins/inputs/cpu)
* [DC/OS](./plugins/inputs/dcos)
* [diskio](./plugins/inputs/diskio)
* [disk](./plugins/inputs/disk)
* [disque](./plugins/inputs/disque)
* [dmcache](./plugins/inputs/dmcache)
* [dns query time](./plugins/inputs/dns_query)
* [docker](./plugins/inputs/docker)
* [docker_log](./plugins/inputs/docker_log)
* [dovecot](./plugins/inputs/dovecot)
* [aws ecs](./plugins/inputs/ecs) (Amazon Elastic Container Service, Fargate)
* [elasticsearch](./plugins/inputs/elasticsearch)
* [ethtool](./plugins/inputs/ethtool)
* [eventhub_consumer](./plugins/inputs/eventhub_consumer) (Azure Event Hubs \& Azure IoT Hub)
* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
* [execd](./plugins/inputs/execd)
* [fail2ban](./plugins/inputs/fail2ban)
* [fibaro](./plugins/inputs/fibaro)
* [file](./plugins/inputs/file)
* [filestat](./plugins/inputs/filestat)
* [filecount](./plugins/inputs/filecount)
* [fireboard](/plugins/inputs/fireboard)
* [fluentd](./plugins/inputs/fluentd)
* [github](./plugins/inputs/github)
* [graylog](./plugins/inputs/graylog)
* [haproxy](./plugins/inputs/haproxy)
* [hddtemp](./plugins/inputs/hddtemp)
* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
* [http_listener](./plugins/inputs/influxdb_listener) (deprecated, renamed to [influxdb_listener](/plugins/inputs/influxdb_listener))
* [http_listener_v2](./plugins/inputs/http_listener_v2)
* [http](./plugins/inputs/http) (generic HTTP plugin, supports using input data formats)
* [http_response](./plugins/inputs/http_response)
* [icinga2](./plugins/inputs/icinga2)
* [infiniband](./plugins/inputs/infiniband)
* [influxdb](./plugins/inputs/influxdb)
* [influxdb_listener](./plugins/inputs/influxdb_listener)
* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
* [internal](./plugins/inputs/internal)
* [influxdb](./plugins/inputs/influxdb)
* [interrupts](./plugins/inputs/interrupts)
* [ipmi_sensor](./plugins/inputs/ipmi_sensor)
* [ipset](./plugins/inputs/ipset)
* [iptables](./plugins/inputs/iptables)
* [ipvs](./plugins/inputs/ipvs)
* [jenkins](./plugins/inputs/jenkins)
* [jolokia2](./plugins/inputs/jolokia2) (java, cassandra, kafka)
* [ipset](./plugins/inputs/ipset)
* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2))
* [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry)
* [kafka_consumer](./plugins/inputs/kafka_consumer)
* [jolokia2](./plugins/inputs/jolokia2) (java, cassandra, kafka)
- [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry)
* [kapacitor](./plugins/inputs/kapacitor)
* [aws kinesis](./plugins/inputs/kinesis_consumer) (Amazon Kinesis)
* [kernel](./plugins/inputs/kernel)
* [kernel_vmstat](./plugins/inputs/kernel_vmstat)
* [kibana](./plugins/inputs/kibana)
* [kubernetes](./plugins/inputs/kubernetes)
* [kube_inventory](./plugins/inputs/kube_inventory)
* [lanz](./plugins/inputs/lanz)
* [leofs](./plugins/inputs/leofs)
* [linux_sysctl_fs](./plugins/inputs/linux_sysctl_fs)
* [logparser](./plugins/inputs/logparser) (deprecated, use [tail](/plugins/inputs/tail))
* [logstash](./plugins/inputs/logstash)
* [lustre2](./plugins/inputs/lustre2)
* [mailchimp](./plugins/inputs/mailchimp)
* [marklogic](./plugins/inputs/marklogic)
* [mcrouter](./plugins/inputs/mcrouter)
* [memcached](./plugins/inputs/memcached)
* [mem](./plugins/inputs/mem)
* [mesos](./plugins/inputs/mesos)
* [minecraft](./plugins/inputs/minecraft)
* [modbus](./plugins/inputs/modbus)
* [mongodb](./plugins/inputs/mongodb)
* [monit](./plugins/inputs/monit)
* [mqtt_consumer](./plugins/inputs/mqtt_consumer)
* [multifile](./plugins/inputs/multifile)
* [mysql](./plugins/inputs/mysql)
* [nats_consumer](./plugins/inputs/nats_consumer)
* [nats](./plugins/inputs/nats)
* [neptune_apex](./plugins/inputs/neptune_apex)
* [net](./plugins/inputs/net)
* [net_response](./plugins/inputs/net_response)
* [netstat](./plugins/inputs/net)
* [nginx](./plugins/inputs/nginx)
* [nginx_plus_api](./plugins/inputs/nginx_plus_api)
* [nginx_plus](./plugins/inputs/nginx_plus)
* [nginx_upstream_check](./plugins/inputs/nginx_upstream_check)
* [nginx_vts](./plugins/inputs/nginx_vts)
* [nsq_consumer](./plugins/inputs/nsq_consumer)
* [nsq](./plugins/inputs/nsq)
* [nstat](./plugins/inputs/nstat)
* [ntpq](./plugins/inputs/ntpq)
* [nvidia_smi](./plugins/inputs/nvidia_smi)
* [openldap](./plugins/inputs/openldap)
* [openntpd](./plugins/inputs/openntpd)
* [opensmtpd](./plugins/inputs/opensmtpd)
* [openweathermap](./plugins/inputs/openweathermap)
* [pf](./plugins/inputs/pf)
* [pgbouncer](./plugins/inputs/pgbouncer)
* [phpfpm](./plugins/inputs/phpfpm)
* [phusion passenger](./plugins/inputs/passenger)
* [ping](./plugins/inputs/ping)
@ -275,8 +196,6 @@ For documentation on the latest development code see the [documentation index][d
* [postgresql_extensible](./plugins/inputs/postgresql_extensible)
* [postgresql](./plugins/inputs/postgresql)
* [powerdns](./plugins/inputs/powerdns)
* [powerdns_recursor](./plugins/inputs/powerdns_recursor)
* [processes](./plugins/inputs/processes)
* [procstat](./plugins/inputs/procstat)
* [prometheus](./plugins/inputs/prometheus) (can be used for [Caddy server](./plugins/inputs/prometheus/README.md#usage-for-caddy-http-server))
* [puppetagent](./plugins/inputs/puppetagent)
@ -287,35 +206,49 @@ For documentation on the latest development code see the [documentation index][d
* [riak](./plugins/inputs/riak)
* [salesforce](./plugins/inputs/salesforce)
* [sensors](./plugins/inputs/sensors)
* [sflow](./plugins/inputs/sflow)
* [smart](./plugins/inputs/smart)
* [snmp_legacy](./plugins/inputs/snmp_legacy)
* [snmp](./plugins/inputs/snmp)
* [snmp_trap](./plugins/inputs/snmp_trap)
* [socket_listener](./plugins/inputs/socket_listener)
* [snmp_legacy](./plugins/inputs/snmp_legacy)
* [solr](./plugins/inputs/solr)
* [sql server](./plugins/inputs/sqlserver) (microsoft)
* [stackdriver](./plugins/inputs/stackdriver) (Google Cloud Monitoring)
* [statsd](./plugins/inputs/statsd)
* [suricata](./plugins/inputs/suricata)
* [swap](./plugins/inputs/swap)
* [synproxy](./plugins/inputs/synproxy)
* [syslog](./plugins/inputs/syslog)
* [sysstat](./plugins/inputs/sysstat)
* [systemd_units](./plugins/inputs/systemd_units)
* [system](./plugins/inputs/system)
* [tail](./plugins/inputs/tail)
* [temp](./plugins/inputs/temp)
* [tcp_listener](./plugins/inputs/socket_listener)
* [teamspeak](./plugins/inputs/teamspeak)
* [tengine](./plugins/inputs/tengine)
* [tomcat](./plugins/inputs/tomcat)
* [twemproxy](./plugins/inputs/twemproxy)
* [udp_listener](./plugins/inputs/socket_listener)
* [unbound](./plugins/inputs/unbound)
* [uwsgi](./plugins/inputs/uwsgi)
* [varnish](./plugins/inputs/varnish)
* [vsphere](./plugins/inputs/vsphere) VMware vSphere
* [zfs](./plugins/inputs/zfs)
* [zookeeper](./plugins/inputs/zookeeper)
* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters)
* [win_services](./plugins/inputs/win_services)
* [sysstat](./plugins/inputs/sysstat)
* [system](./plugins/inputs/system)
* cpu
* mem
* net
* netstat
* disk
* diskio
* swap
* processes
* kernel (/proc/stat)
* kernel (/proc/vmstat)
* linux_sysctl_fs (/proc/sys/fs)
Telegraf can also collect metrics via the following service plugins:
* [http_listener](./plugins/inputs/http_listener)
* [kafka_consumer](./plugins/inputs/kafka_consumer)
* [mqtt_consumer](./plugins/inputs/mqtt_consumer)
* [nats_consumer](./plugins/inputs/nats_consumer)
* [nsq_consumer](./plugins/inputs/nsq_consumer)
* [logparser](./plugins/inputs/logparser)
* [statsd](./plugins/inputs/statsd)
* [socket_listener](./plugins/inputs/socket_listener)
* [tail](./plugins/inputs/tail)
* [tcp_listener](./plugins/inputs/socket_listener)
* [udp_listener](./plugins/inputs/socket_listener)
* [webhooks](./plugins/inputs/webhooks)
* [filestack](./plugins/inputs/webhooks/filestack)
* [github](./plugins/inputs/webhooks/github)
@ -323,107 +256,61 @@ For documentation on the latest development code see the [documentation index][d
* [papertrail](./plugins/inputs/webhooks/papertrail)
* [particle](./plugins/inputs/webhooks/particle)
* [rollbar](./plugins/inputs/webhooks/rollbar)
* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters)
* [win_services](./plugins/inputs/win_services)
* [wireguard](./plugins/inputs/wireguard)
* [wireless](./plugins/inputs/wireless)
* [x509_cert](./plugins/inputs/x509_cert)
* [zfs](./plugins/inputs/zfs)
* [zipkin](./plugins/inputs/zipkin)
* [zookeeper](./plugins/inputs/zookeeper)
## Parsers
Telegraf is able to parse the following input data formats into metrics, these
formats may be used with input plugins supporting the `data_format` option:
- [InfluxDB Line Protocol](/plugins/parsers/influx)
- [Collectd](/plugins/parsers/collectd)
- [CSV](/plugins/parsers/csv)
- [Dropwizard](/plugins/parsers/dropwizard)
- [FormUrlencoded](/plugins/parser/form_urlencoded)
- [Graphite](/plugins/parsers/graphite)
- [Grok](/plugins/parsers/grok)
- [JSON](/plugins/parsers/json)
- [Logfmt](/plugins/parsers/logfmt)
- [Nagios](/plugins/parsers/nagios)
- [Value](/plugins/parsers/value), ie: 45 or "booyah"
- [Wavefront](/plugins/parsers/wavefront)
## Serializers
- [InfluxDB Line Protocol](/plugins/serializers/influx)
- [JSON](/plugins/serializers/json)
- [Graphite](/plugins/serializers/graphite)
- [ServiceNow](/plugins/serializers/nowmetric)
- [SplunkMetric](/plugins/serializers/splunkmetric)
- [Carbon2](/plugins/serializers/carbon2)
- [Wavefront](/plugins/serializers/wavefront)
* [InfluxDB Line Protocol](./docs/DATA_FORMATS_INPUT.md#influx)
* [JSON](./docs/DATA_FORMATS_INPUT.md#json)
* [Graphite](./docs/DATA_FORMATS_INPUT.md#graphite)
* [Value](./docs/DATA_FORMATS_INPUT.md#value)
* [Nagios](./docs/DATA_FORMATS_INPUT.md#nagios)
* [Collectd](./docs/DATA_FORMATS_INPUT.md#collectd)
* [Dropwizard](./docs/DATA_FORMATS_INPUT.md#dropwizard)
## Processor Plugins
* [clone](/plugins/processors/clone)
* [converter](/plugins/processors/converter)
* [date](/plugins/processors/date)
* [dedup](/plugins/processors/dedup)
* [defaults](/plugins/processors/defaults)
* [enum](/plugins/processors/enum)
* [filepath](/plugins/processors/filepath)
* [override](/plugins/processors/override)
* [parser](/plugins/processors/parser)
* [pivot](/plugins/processors/pivot)
* [printer](/plugins/processors/printer)
* [regex](/plugins/processors/regex)
* [rename](/plugins/processors/rename)
* [s2geo](/plugins/processors/s2geo)
* [strings](/plugins/processors/strings)
* [tag_limit](/plugins/processors/tag_limit)
* [template](/plugins/processors/template)
* [topk](/plugins/processors/topk)
* [unpivot](/plugins/processors/unpivot)
* [converter](./plugins/processors/converter)
* [override](./plugins/processors/override)
* [printer](./plugins/processors/printer)
* [regex](./plugins/processors/regex)
* [topk](./plugins/processors/topk)
## Aggregator Plugins
* [basicstats](./plugins/aggregators/basicstats)
* [final](./plugins/aggregators/final)
* [histogram](./plugins/aggregators/histogram)
* [merge](./plugins/aggregators/merge)
* [minmax](./plugins/aggregators/minmax)
* [histogram](./plugins/aggregators/histogram)
* [valuecounter](./plugins/aggregators/valuecounter)
## Output Plugins
* [influxdb](./plugins/outputs/influxdb) (InfluxDB 1.x)
* [influxdb_v2](./plugins/outputs/influxdb_v2) ([InfluxDB 2.x](https://github.com/influxdata/influxdb))
* [influxdb](./plugins/outputs/influxdb)
* [amon](./plugins/outputs/amon)
* [amqp](./plugins/outputs/amqp) (rabbitmq)
* [application_insights](./plugins/outputs/application_insights)
* [aws kinesis](./plugins/outputs/kinesis)
* [aws cloudwatch](./plugins/outputs/cloudwatch)
* [azure_monitor](./plugins/outputs/azure_monitor)
* [cloud_pubsub](./plugins/outputs/cloud_pubsub) Google Cloud Pub/Sub
* [cratedb](./plugins/outputs/cratedb)
* [datadog](./plugins/outputs/datadog)
* [discard](./plugins/outputs/discard)
* [elasticsearch](./plugins/outputs/elasticsearch)
* [exec](./plugins/outputs/exec)
* [file](./plugins/outputs/file)
* [graphite](./plugins/outputs/graphite)
* [graylog](./plugins/outputs/graylog)
* [health](./plugins/outputs/health)
* [http](./plugins/outputs/http)
* [instrumental](./plugins/outputs/instrumental)
* [kafka](./plugins/outputs/kafka)
* [librato](./plugins/outputs/librato)
* [mqtt](./plugins/outputs/mqtt)
* [nats](./plugins/outputs/nats)
* [newrelic](./plugins/outputs/newrelic)
* [nsq](./plugins/outputs/nsq)
* [opentsdb](./plugins/outputs/opentsdb)
* [prometheus](./plugins/outputs/prometheus_client)
* [riemann](./plugins/outputs/riemann)
* [riemann_legacy](./plugins/outputs/riemann_legacy)
* [socket_writer](./plugins/outputs/socket_writer)
* [stackdriver](./plugins/outputs/stackdriver) (Google Cloud Monitoring)
* [syslog](./plugins/outputs/syslog)
* [tcp](./plugins/outputs/socket_writer)
* [udp](./plugins/outputs/socket_writer)
* [warp10](./plugins/outputs/warp10)
* [wavefront](./plugins/outputs/wavefront)

View File

@ -1,14 +1,16 @@
package telegraf
import (
"time"
)
import "time"
// Accumulator allows adding metrics to the processing flow.
// Accumulator is an interface for "accumulating" metrics from plugin(s).
// The metrics are sent down a channel shared between all plugins.
type Accumulator interface {
// AddFields adds a metric to the accumulator with the given measurement
// name, fields, and tags (and timestamp). If a timestamp is not provided,
// then the accumulator sets it to "now".
// Create a point with a value, decorating it with tags
// NOTE: tags is expected to be owned by the caller, don't mutate
// it after passing to Add.
AddFields(measurement string,
fields map[string]interface{},
tags map[string]string,
@ -38,48 +40,7 @@ type Accumulator interface {
tags map[string]string,
t ...time.Time)
// AddMetric adds an metric to the accumulator.
AddMetric(Metric)
SetPrecision(precision, interval time.Duration)
// SetPrecision sets the timestamp rounding precision. All metrics addeds
// added to the accumulator will have their timestamp rounded to the
// nearest multiple of precision.
SetPrecision(precision time.Duration)
// Report an error.
AddError(err error)
// Upgrade to a TrackingAccumulator with space for maxTracked
// metrics/batches.
WithTracking(maxTracked int) TrackingAccumulator
}
// TrackingID uniquely identifies a tracked metric group
type TrackingID uint64
// DeliveryInfo provides the results of a delivered metric group.
type DeliveryInfo interface {
// ID is the TrackingID
ID() TrackingID
// Delivered returns true if the metric was processed successfully.
Delivered() bool
}
// TrackingAccumulator is an Accumulator that provides a signal when the
// metric has been fully processed. Sending more metrics than the accumulator
// has been allocated for without reading status from the Accepted or Rejected
// channels is an error.
type TrackingAccumulator interface {
Accumulator
// Add the Metric and arrange for tracking feedback after processing..
AddTrackingMetric(m Metric) TrackingID
// Add a group of Metrics and arrange for a signal when the group has been
// processed.
AddTrackingMetricGroup(group []Metric) TrackingID
// Delivered returns a channel that will contain the tracking results.
Delivered() <-chan DeliveryInfo
}

View File

@ -1,27 +1,31 @@
package agent
import (
"log"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/selfstat"
)
var (
NErrors = selfstat.Register("agent", "gather_errors", map[string]string{})
)
type MetricMaker interface {
LogName() string
MakeMetric(metric telegraf.Metric) telegraf.Metric
Log() telegraf.Logger
}
type accumulator struct {
maker MetricMaker
metrics chan<- telegraf.Metric
precision time.Duration
Name() string
MakeMetric(
measurement string,
fields map[string]interface{},
tags map[string]string,
mType telegraf.ValueType,
t time.Time,
) telegraf.Metric
}
func NewAccumulator(
maker MetricMaker,
metrics chan<- telegraf.Metric,
metrics chan telegraf.Metric,
) telegraf.Accumulator {
acc := accumulator{
maker: maker,
@ -31,13 +35,23 @@ func NewAccumulator(
return &acc
}
type accumulator struct {
metrics chan telegraf.Metric
maker MetricMaker
precision time.Duration
}
func (ac *accumulator) AddFields(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
ac.addFields(measurement, tags, fields, telegraf.Untyped, t...)
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Untyped, ac.getTime(t)); m != nil {
ac.metrics <- m
}
}
func (ac *accumulator) AddGauge(
@ -46,7 +60,9 @@ func (ac *accumulator) AddGauge(
tags map[string]string,
t ...time.Time,
) {
ac.addFields(measurement, tags, fields, telegraf.Gauge, t...)
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Gauge, ac.getTime(t)); m != nil {
ac.metrics <- m
}
}
func (ac *accumulator) AddCounter(
@ -55,7 +71,9 @@ func (ac *accumulator) AddCounter(
tags map[string]string,
t ...time.Time,
) {
ac.addFields(measurement, tags, fields, telegraf.Counter, t...)
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Counter, ac.getTime(t)); m != nil {
ac.metrics <- m
}
}
func (ac *accumulator) AddSummary(
@ -64,7 +82,9 @@ func (ac *accumulator) AddSummary(
tags map[string]string,
t ...time.Time,
) {
ac.addFields(measurement, tags, fields, telegraf.Summary, t...)
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Summary, ac.getTime(t)); m != nil {
ac.metrics <- m
}
}
func (ac *accumulator) AddHistogram(
@ -73,28 +93,7 @@ func (ac *accumulator) AddHistogram(
tags map[string]string,
t ...time.Time,
) {
ac.addFields(measurement, tags, fields, telegraf.Histogram, t...)
}
func (ac *accumulator) AddMetric(m telegraf.Metric) {
m.SetTime(m.Time().Round(ac.precision))
if m := ac.maker.MakeMetric(m); m != nil {
ac.metrics <- m
}
}
func (ac *accumulator) addFields(
measurement string,
tags map[string]string,
fields map[string]interface{},
tp telegraf.ValueType,
t ...time.Time,
) {
m, err := metric.New(measurement, tags, fields, ac.getTime(t), tp)
if err != nil {
return
}
if m := ac.maker.MakeMetric(m); m != nil {
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Histogram, ac.getTime(t)); m != nil {
ac.metrics <- m
}
}
@ -105,14 +104,33 @@ func (ac *accumulator) AddError(err error) {
if err == nil {
return
}
ac.maker.Log().Errorf("Error in plugin: %v", err)
NErrors.Incr(1)
//TODO suppress/throttle consecutive duplicate errors?
log.Printf("E! Error in plugin [%s]: %s", ac.maker.Name(), err)
}
func (ac *accumulator) SetPrecision(precision time.Duration) {
ac.precision = precision
// SetPrecision takes two time.Duration objects. If the first is non-zero,
// it sets that as the precision. Otherwise, it takes the second argument
// as the order of time that the metrics should be rounded to, with the
// maximum being 1s.
func (ac *accumulator) SetPrecision(precision, interval time.Duration) {
if precision > 0 {
ac.precision = precision
return
}
switch {
case interval >= time.Second:
ac.precision = time.Second
case interval >= time.Millisecond:
ac.precision = time.Millisecond
case interval >= time.Microsecond:
ac.precision = time.Microsecond
default:
ac.precision = time.Nanosecond
}
}
func (ac *accumulator) getTime(t []time.Time) time.Time {
func (ac accumulator) getTime(t []time.Time) time.Time {
var timestamp time.Time
if len(t) > 0 {
timestamp = t[0]
@ -121,43 +139,3 @@ func (ac *accumulator) getTime(t []time.Time) time.Time {
}
return timestamp.Round(ac.precision)
}
func (ac *accumulator) WithTracking(maxTracked int) telegraf.TrackingAccumulator {
return &trackingAccumulator{
Accumulator: ac,
delivered: make(chan telegraf.DeliveryInfo, maxTracked),
}
}
type trackingAccumulator struct {
telegraf.Accumulator
delivered chan telegraf.DeliveryInfo
}
func (a *trackingAccumulator) AddTrackingMetric(m telegraf.Metric) telegraf.TrackingID {
dm, id := metric.WithTracking(m, a.onDelivery)
a.AddMetric(dm)
return id
}
func (a *trackingAccumulator) AddTrackingMetricGroup(group []telegraf.Metric) telegraf.TrackingID {
db, id := metric.WithGroupTracking(group, a.onDelivery)
for _, m := range db {
a.AddMetric(m)
}
return id
}
func (a *trackingAccumulator) Delivered() <-chan telegraf.DeliveryInfo {
return a.delivered
}
func (a *trackingAccumulator) onDelivery(info telegraf.DeliveryInfo) {
select {
case a.delivered <- info:
default:
// This is a programming error in the input. More items were sent for
// tracking than space requested.
panic("channel is full")
}
}

View File

@ -9,7 +9,8 @@ import (
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/models"
"github.com/influxdata/telegraf/metric"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -60,6 +61,7 @@ func TestAccAddError(t *testing.T) {
a.AddError(fmt.Errorf("baz"))
errs := bytes.Split(errBuf.Bytes(), []byte{'\n'})
assert.EqualValues(t, int64(3), NErrors.Get())
require.Len(t, errs, 4) // 4 because of trailing newline
assert.Contains(t, string(errs[0]), "TestPlugin")
assert.Contains(t, string(errs[0]), "foo")
@ -74,6 +76,7 @@ func TestSetPrecision(t *testing.T) {
name string
unset bool
precision time.Duration
interval time.Duration
timestamp time.Time
expected time.Time
}{
@ -85,13 +88,13 @@ func TestSetPrecision(t *testing.T) {
},
{
name: "second interval",
precision: time.Second,
interval: time.Second,
timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
expected: time.Date(2006, time.February, 10, 12, 0, 0, 0, time.UTC),
},
{
name: "microsecond interval",
precision: time.Microsecond,
interval: time.Microsecond,
timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
expected: time.Date(2006, time.February, 10, 12, 0, 0, 82913000, time.UTC),
},
@ -108,7 +111,7 @@ func TestSetPrecision(t *testing.T) {
a := NewAccumulator(&TestMetricMaker{}, metrics)
if !tt.unset {
a.SetPrecision(tt.precision)
a.SetPrecision(tt.precision, tt.interval)
}
a.AddFields("acctest",
@ -125,36 +128,32 @@ func TestSetPrecision(t *testing.T) {
}
}
func TestAddTrackingMetricGroupEmpty(t *testing.T) {
ch := make(chan telegraf.Metric, 10)
metrics := []telegraf.Metric{}
acc := NewAccumulator(&TestMetricMaker{}, ch).WithTracking(1)
id := acc.AddTrackingMetricGroup(metrics)
select {
case tracking := <-acc.Delivered():
require.Equal(t, tracking.ID(), id)
default:
t.Fatal("empty group should be delivered immediately")
}
}
type TestMetricMaker struct {
}
func (tm *TestMetricMaker) Name() string {
return "TestPlugin"
}
func (tm *TestMetricMaker) LogName() string {
return tm.Name()
}
func (tm *TestMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric {
return metric
}
func (tm *TestMetricMaker) Log() telegraf.Logger {
return models.NewLogger("TestPlugin", "test", "")
func (tm *TestMetricMaker) MakeMetric(
measurement string,
fields map[string]interface{},
tags map[string]string,
mType telegraf.ValueType,
t time.Time,
) telegraf.Metric {
switch mType {
case telegraf.Untyped:
if m, err := metric.New(measurement, tags, fields, t); err == nil {
return m
}
case telegraf.Counter:
if m, err := metric.New(measurement, tags, fields, t, telegraf.Counter); err == nil {
return m
}
case telegraf.Gauge:
if m, err := metric.New(measurement, tags, fields, t, telegraf.Gauge); err == nil {
return m
}
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,19 +0,0 @@
// +build !windows
package agent
import (
"os"
"os/signal"
"syscall"
)
const flushSignal = syscall.SIGUSR1
func watchForFlushSignal(flushRequested chan os.Signal) {
signal.Notify(flushRequested, flushSignal)
}
func stopListeningForFlushSignal(flushRequested chan os.Signal) {
defer signal.Stop(flushRequested)
}

View File

@ -2,13 +2,15 @@ package agent
import (
"testing"
"time"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/internal/config"
// needing to load the plugins
_ "github.com/influxdata/telegraf/plugins/inputs/all"
// needing to load the outputs
_ "github.com/influxdata/telegraf/plugins/outputs/all"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAgent_OmitHostname(t *testing.T) {
@ -22,35 +24,35 @@ func TestAgent_OmitHostname(t *testing.T) {
func TestAgent_LoadPlugin(t *testing.T) {
c := config.NewConfig()
c.InputFilters = []string{"mysql"}
err := c.LoadConfig("../config/testdata/telegraf-agent.toml")
err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ := NewAgent(c)
assert.Equal(t, 1, len(a.Config.Inputs))
c = config.NewConfig()
c.InputFilters = []string{"foo"}
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 0, len(a.Config.Inputs))
c = config.NewConfig()
c.InputFilters = []string{"mysql", "foo"}
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 1, len(a.Config.Inputs))
c = config.NewConfig()
c.InputFilters = []string{"mysql", "redis"}
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 2, len(a.Config.Inputs))
c = config.NewConfig()
c.InputFilters = []string{"mysql", "foo", "redis", "bar"}
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 2, len(a.Config.Inputs))
@ -59,42 +61,42 @@ func TestAgent_LoadPlugin(t *testing.T) {
func TestAgent_LoadOutput(t *testing.T) {
c := config.NewConfig()
c.OutputFilters = []string{"influxdb"}
err := c.LoadConfig("../config/testdata/telegraf-agent.toml")
err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ := NewAgent(c)
assert.Equal(t, 2, len(a.Config.Outputs))
c = config.NewConfig()
c.OutputFilters = []string{"kafka"}
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 1, len(a.Config.Outputs))
c = config.NewConfig()
c.OutputFilters = []string{}
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 3, len(a.Config.Outputs))
c = config.NewConfig()
c.OutputFilters = []string{"foo"}
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 0, len(a.Config.Outputs))
c = config.NewConfig()
c.OutputFilters = []string{"influxdb", "foo"}
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 2, len(a.Config.Outputs))
c = config.NewConfig()
c.OutputFilters = []string{"influxdb", "kafka"}
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
assert.Equal(t, 3, len(c.Outputs))
a, _ = NewAgent(c)
@ -102,67 +104,8 @@ func TestAgent_LoadOutput(t *testing.T) {
c = config.NewConfig()
c.OutputFilters = []string{"influxdb", "foo", "kafka", "bar"}
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 3, len(a.Config.Outputs))
}
func TestWindow(t *testing.T) {
parse := func(s string) time.Time {
tm, err := time.Parse(time.RFC3339, s)
if err != nil {
panic(err)
}
return tm
}
tests := []struct {
name string
start time.Time
roundInterval bool
period time.Duration
since time.Time
until time.Time
}{
{
name: "round with exact alignment",
start: parse("2018-03-27T00:00:00Z"),
roundInterval: true,
period: 30 * time.Second,
since: parse("2018-03-27T00:00:00Z"),
until: parse("2018-03-27T00:00:30Z"),
},
{
name: "round with alignment needed",
start: parse("2018-03-27T00:00:05Z"),
roundInterval: true,
period: 30 * time.Second,
since: parse("2018-03-27T00:00:00Z"),
until: parse("2018-03-27T00:00:30Z"),
},
{
name: "no round with exact alignment",
start: parse("2018-03-27T00:00:00Z"),
roundInterval: false,
period: 30 * time.Second,
since: parse("2018-03-27T00:00:00Z"),
until: parse("2018-03-27T00:00:30Z"),
},
{
name: "no found with alignment needed",
start: parse("2018-03-27T00:00:05Z"),
roundInterval: false,
period: 30 * time.Second,
since: parse("2018-03-27T00:00:05Z"),
until: parse("2018-03-27T00:00:35Z"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
since, until := updateWindow(tt.start, tt.roundInterval, tt.period)
require.Equal(t, tt.since, since, "since")
require.Equal(t, tt.until, until, "until")
})
}
}

View File

@ -1,13 +0,0 @@
// +build windows
package agent
import "os"
func watchForFlushSignal(flushRequested chan os.Signal) {
// not supported
}
func stopListeningForFlushSignal(flushRequested chan os.Signal) {
// not supported
}

View File

@ -1,268 +0,0 @@
package agent
import (
"context"
"sync"
"time"
"github.com/benbjohnson/clock"
"github.com/influxdata/telegraf/internal"
)
type empty struct{}
type Ticker interface {
Elapsed() <-chan time.Time
Stop()
}
// AlignedTicker delivers ticks at aligned times plus an optional jitter. Each
// tick is realigned to avoid drift and handle changes to the system clock.
//
// The ticks may have an jitter duration applied to them as an random offset to
// the interval. However the overall pace of is that of the interval, so on
// average you will have one collection each interval.
//
// The first tick is emitted at the next alignment.
//
// Ticks are dropped for slow consumers.
//
// The implementation currently does not recalculate until the next tick with
// no maximum sleep, when using large intervals alignment is not corrected
// until the next tick.
type AlignedTicker struct {
interval time.Duration
jitter time.Duration
ch chan time.Time
cancel context.CancelFunc
wg sync.WaitGroup
}
func NewAlignedTicker(now time.Time, interval, jitter time.Duration) *AlignedTicker {
return newAlignedTicker(now, interval, jitter, clock.New())
}
func newAlignedTicker(now time.Time, interval, jitter time.Duration, clock clock.Clock) *AlignedTicker {
ctx, cancel := context.WithCancel(context.Background())
t := &AlignedTicker{
interval: interval,
jitter: jitter,
ch: make(chan time.Time, 1),
cancel: cancel,
}
d := t.next(now)
timer := clock.Timer(d)
t.wg.Add(1)
go func() {
defer t.wg.Done()
t.run(ctx, timer)
}()
return t
}
func (t *AlignedTicker) next(now time.Time) time.Duration {
next := internal.AlignTime(now, t.interval)
d := next.Sub(now)
if d == 0 {
d = t.interval
}
d += internal.RandomDuration(t.jitter)
return d
}
func (t *AlignedTicker) run(ctx context.Context, timer *clock.Timer) {
for {
select {
case <-ctx.Done():
timer.Stop()
return
case now := <-timer.C:
select {
case t.ch <- now:
default:
}
d := t.next(now)
timer.Reset(d)
}
}
}
func (t *AlignedTicker) Elapsed() <-chan time.Time {
return t.ch
}
func (t *AlignedTicker) Stop() {
t.cancel()
t.wg.Wait()
}
// UnalignedTicker delivers ticks at regular but unaligned intervals. No
// effort is made to avoid drift.
//
// The ticks may have an jitter duration applied to them as an random offset to
// the interval. However the overall pace of is that of the interval, so on
// average you will have one collection each interval.
//
// The first tick is emitted immediately.
//
// Ticks are dropped for slow consumers.
type UnalignedTicker struct {
interval time.Duration
jitter time.Duration
ch chan time.Time
cancel context.CancelFunc
wg sync.WaitGroup
}
func NewUnalignedTicker(interval, jitter time.Duration) *UnalignedTicker {
return newUnalignedTicker(interval, jitter, clock.New())
}
func newUnalignedTicker(interval, jitter time.Duration, clock clock.Clock) *UnalignedTicker {
ctx, cancel := context.WithCancel(context.Background())
t := &UnalignedTicker{
interval: interval,
jitter: jitter,
ch: make(chan time.Time, 1),
cancel: cancel,
}
ticker := clock.Ticker(t.interval)
t.ch <- clock.Now()
t.wg.Add(1)
go func() {
defer t.wg.Done()
t.run(ctx, ticker, clock)
}()
return t
}
func sleep(ctx context.Context, duration time.Duration, clock clock.Clock) error {
if duration == 0 {
return nil
}
t := clock.Timer(duration)
select {
case <-t.C:
return nil
case <-ctx.Done():
t.Stop()
return ctx.Err()
}
}
func (t *UnalignedTicker) run(ctx context.Context, ticker *clock.Ticker, clock clock.Clock) {
for {
select {
case <-ctx.Done():
ticker.Stop()
return
case <-ticker.C:
jitter := internal.RandomDuration(t.jitter)
err := sleep(ctx, jitter, clock)
if err != nil {
ticker.Stop()
return
}
select {
case t.ch <- clock.Now():
default:
}
}
}
}
func (t *UnalignedTicker) InjectTick() {
t.ch <- time.Now()
}
func (t *UnalignedTicker) Elapsed() <-chan time.Time {
return t.ch
}
func (t *UnalignedTicker) Stop() {
t.cancel()
t.wg.Wait()
}
// RollingTicker delivers ticks at regular but unaligned intervals.
//
// Because the next interval is scheduled based on the interval + jitter, you
// are guaranteed at least interval seconds without missing a tick and ticks
// will be evenly scheduled over time.
//
// On average you will have one collection each interval + (jitter/2).
//
// The first tick is emitted after interval+jitter seconds.
//
// Ticks are dropped for slow consumers.
type RollingTicker struct {
interval time.Duration
jitter time.Duration
ch chan time.Time
cancel context.CancelFunc
wg sync.WaitGroup
}
func NewRollingTicker(interval, jitter time.Duration) *RollingTicker {
return newRollingTicker(interval, jitter, clock.New())
}
func newRollingTicker(interval, jitter time.Duration, clock clock.Clock) *RollingTicker {
ctx, cancel := context.WithCancel(context.Background())
t := &RollingTicker{
interval: interval,
jitter: jitter,
ch: make(chan time.Time, 1),
cancel: cancel,
}
d := t.next()
timer := clock.Timer(d)
t.wg.Add(1)
go func() {
defer t.wg.Done()
t.run(ctx, timer)
}()
return t
}
func (t *RollingTicker) next() time.Duration {
return t.interval + internal.RandomDuration(t.jitter)
}
func (t *RollingTicker) run(ctx context.Context, timer *clock.Timer) {
for {
select {
case <-ctx.Done():
timer.Stop()
return
case now := <-timer.C:
select {
case t.ch <- now:
default:
}
d := t.next()
timer.Reset(d)
}
}
}
func (t *RollingTicker) Elapsed() <-chan time.Time {
return t.ch
}
func (t *RollingTicker) Stop() {
t.cancel()
t.wg.Wait()
}

View File

@ -1,251 +0,0 @@
package agent
import (
"fmt"
"strings"
"testing"
"time"
"github.com/benbjohnson/clock"
"github.com/stretchr/testify/require"
)
var format = "2006-01-02T15:04:05.999Z07:00"
func TestAlignedTicker(t *testing.T) {
interval := 10 * time.Second
jitter := 0 * time.Second
clock := clock.NewMock()
since := clock.Now()
until := since.Add(60 * time.Second)
ticker := newAlignedTicker(since, interval, jitter, clock)
expected := []time.Time{
time.Unix(10, 0).UTC(),
time.Unix(20, 0).UTC(),
time.Unix(30, 0).UTC(),
time.Unix(40, 0).UTC(),
time.Unix(50, 0).UTC(),
time.Unix(60, 0).UTC(),
}
actual := []time.Time{}
for !clock.Now().After(until) {
select {
case tm := <-ticker.Elapsed():
actual = append(actual, tm.UTC())
default:
}
clock.Add(10 * time.Second)
}
require.Equal(t, expected, actual)
}
func TestAlignedTickerJitter(t *testing.T) {
interval := 10 * time.Second
jitter := 5 * time.Second
clock := clock.NewMock()
since := clock.Now()
until := since.Add(60 * time.Second)
ticker := newAlignedTicker(since, interval, jitter, clock)
last := since
for !clock.Now().After(until) {
select {
case tm := <-ticker.Elapsed():
require.True(t, tm.Sub(last) <= 15*time.Second)
require.True(t, tm.Sub(last) >= 5*time.Second)
last = last.Add(interval)
default:
}
clock.Add(5 * time.Second)
}
}
func TestAlignedTickerMissedTick(t *testing.T) {
interval := 10 * time.Second
jitter := 0 * time.Second
clock := clock.NewMock()
since := clock.Now()
ticker := newAlignedTicker(since, interval, jitter, clock)
clock.Add(25 * time.Second)
tm := <-ticker.Elapsed()
require.Equal(t, time.Unix(10, 0).UTC(), tm.UTC())
clock.Add(5 * time.Second)
tm = <-ticker.Elapsed()
require.Equal(t, time.Unix(30, 0).UTC(), tm.UTC())
}
func TestUnalignedTicker(t *testing.T) {
interval := 10 * time.Second
jitter := 0 * time.Second
clock := clock.NewMock()
clock.Add(1 * time.Second)
since := clock.Now()
until := since.Add(60 * time.Second)
ticker := newUnalignedTicker(interval, jitter, clock)
expected := []time.Time{
time.Unix(1, 0).UTC(),
time.Unix(11, 0).UTC(),
time.Unix(21, 0).UTC(),
time.Unix(31, 0).UTC(),
time.Unix(41, 0).UTC(),
time.Unix(51, 0).UTC(),
time.Unix(61, 0).UTC(),
}
actual := []time.Time{}
for !clock.Now().After(until) {
select {
case tm := <-ticker.Elapsed():
actual = append(actual, tm.UTC())
default:
}
clock.Add(10 * time.Second)
}
require.Equal(t, expected, actual)
}
func TestRollingTicker(t *testing.T) {
interval := 10 * time.Second
jitter := 0 * time.Second
clock := clock.NewMock()
clock.Add(1 * time.Second)
since := clock.Now()
until := since.Add(60 * time.Second)
ticker := newUnalignedTicker(interval, jitter, clock)
expected := []time.Time{
time.Unix(1, 0).UTC(),
time.Unix(11, 0).UTC(),
time.Unix(21, 0).UTC(),
time.Unix(31, 0).UTC(),
time.Unix(41, 0).UTC(),
time.Unix(51, 0).UTC(),
time.Unix(61, 0).UTC(),
}
actual := []time.Time{}
for !clock.Now().After(until) {
select {
case tm := <-ticker.Elapsed():
actual = append(actual, tm.UTC())
default:
}
clock.Add(10 * time.Second)
}
require.Equal(t, expected, actual)
}
// Simulates running the Ticker for an hour and displays stats about the
// operation.
func TestAlignedTickerDistribution(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
interval := 10 * time.Second
jitter := 5 * time.Second
clock := clock.NewMock()
since := clock.Now()
ticker := newAlignedTicker(since, interval, jitter, clock)
dist := simulatedDist(ticker, clock)
printDist(dist)
require.True(t, 350 < dist.Count)
require.True(t, 9 < dist.Mean() && dist.Mean() < 11)
}
// Simulates running the Ticker for an hour and displays stats about the
// operation.
func TestUnalignedTickerDistribution(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
interval := 10 * time.Second
jitter := 5 * time.Second
clock := clock.NewMock()
ticker := newUnalignedTicker(interval, jitter, clock)
dist := simulatedDist(ticker, clock)
printDist(dist)
require.True(t, 350 < dist.Count)
require.True(t, 9 < dist.Mean() && dist.Mean() < 11)
}
// Simulates running the Ticker for an hour and displays stats about the
// operation.
func TestRollingTickerDistribution(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
interval := 10 * time.Second
jitter := 5 * time.Second
clock := clock.NewMock()
ticker := newRollingTicker(interval, jitter, clock)
dist := simulatedDist(ticker, clock)
printDist(dist)
require.True(t, 275 < dist.Count)
require.True(t, 12 < dist.Mean() && 13 > dist.Mean())
}
type Distribution struct {
Buckets [60]int
Count int
Waittime float64
}
func (d *Distribution) Mean() float64 {
return d.Waittime / float64(d.Count)
}
func printDist(dist Distribution) {
for i, count := range dist.Buckets {
fmt.Printf("%2d %s\n", i, strings.Repeat("x", count))
}
fmt.Printf("Average interval: %f\n", dist.Mean())
fmt.Printf("Count: %d\n", dist.Count)
}
func simulatedDist(ticker Ticker, clock *clock.Mock) Distribution {
since := clock.Now()
until := since.Add(1 * time.Hour)
var dist Distribution
last := clock.Now()
for !clock.Now().After(until) {
select {
case tm := <-ticker.Elapsed():
dist.Buckets[tm.Second()] += 1
dist.Count++
dist.Waittime += tm.Sub(last).Seconds()
last = tm
default:
clock.Add(1 * time.Second)
}
}
return dist
}

View File

@ -5,7 +5,11 @@ package telegraf
// Add, Push, and Reset can not be called concurrently, so locking is not
// required when implementing an Aggregator plugin.
type Aggregator interface {
PluginDescriber
// SampleConfig returns the default configuration of the Input.
SampleConfig() string
// Description returns a one-sentence description on the Input.
Description() string
// Add the metric to the aggregator.
Add(in Metric)

View File

@ -1,35 +1,34 @@
version: "{build}"
image: Visual Studio 2019
cache:
- C:\gopath\pkg\mod -> go.sum
- C:\ProgramData\chocolatey\bin -> appveyor.yml
- C:\ProgramData\chocolatey\lib -> appveyor.yml
- C:\Cache
clone_folder: C:\gopath\src\github.com\influxdata\telegraf
environment:
GOPATH: C:\gopath
stack: go 1.14
platform: x64
install:
- choco install make
- cd "%GOPATH%\src\github.com\influxdata\telegraf"
- git config --system core.longpaths true
- IF NOT EXIST "C:\Cache" mkdir C:\Cache
- IF NOT EXIST "C:\Cache\go1.10.1.msi" curl -o "C:\Cache\go1.10.1.msi" https://storage.googleapis.com/golang/go1.10.1.windows-amd64.msi
- IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip
- IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip
- IF EXIST "C:\Go" rmdir /S /Q C:\Go
- msiexec.exe /i "C:\Cache\go1.10.1.msi" /quiet
- 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
- go version
- go env
- git config --system core.longpaths true
build_script:
- make deps
- make telegraf
- cmd: C:\GnuWin32\bin\make deps
- cmd: C:\GnuWin32\bin\make telegraf
test_script:
- make check
- make test-windows
- cmd: C:\GnuWin32\bin\make test-windows
artifacts:
- path: telegraf.exe

View File

@ -1,8 +1,6 @@
package main
import (
"context"
"errors"
"flag"
"fmt"
"log"
@ -10,15 +8,13 @@ import (
_ "net/http/pprof" // Comment this line to disable pprof endpoint.
"os"
"os/signal"
"sort"
"runtime"
"strings"
"syscall"
"time"
"github.com/influxdata/telegraf/agent"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/goplugin"
"github.com/influxdata/telegraf/internal/config"
"github.com/influxdata/telegraf/logger"
_ "github.com/influxdata/telegraf/plugins/aggregators/all"
"github.com/influxdata/telegraf/plugins/inputs"
@ -26,26 +22,23 @@ import (
"github.com/influxdata/telegraf/plugins/outputs"
_ "github.com/influxdata/telegraf/plugins/outputs/all"
_ "github.com/influxdata/telegraf/plugins/processors/all"
"github.com/kardianos/service"
)
// If you update these, update usage.go and usage_windows.go
var fDebug = flag.Bool("debug", false,
"turn on debug logging")
var pprofAddr = flag.String("pprof-addr", "",
"pprof address to listen on, not activate pprof if empty")
var fQuiet = flag.Bool("quiet", false,
"run in quiet mode")
var fTest = flag.Bool("test", false, "enable test mode: gather metrics, print them out, and exit. Note: Test mode only runs inputs, not processors, aggregators, or outputs")
var fTestWait = flag.Int("test-wait", 0, "wait up to this many seconds for service inputs to complete in test mode")
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
var fConfig = flag.String("config", "", "configuration file to load")
var fConfigDirectory = flag.String("config-directory", "",
"directory containing additional *.conf files")
var fVersion = flag.Bool("version", false, "display the version and exit")
var fVersion = flag.Bool("version", false, "display the version")
var fSampleConfig = flag.Bool("sample-config", false,
"print out full sample configuration")
var fPidfile = flag.String("pidfile", "", "file to write our pid to")
var fSectionFilters = flag.String("section-filter", "",
"filter the sections to print, separator is ':'. Valid values are 'agent', 'global_tags', 'outputs', 'processors', 'aggregators' and 'inputs'")
var fInputFilters = flag.String("input-filter", "",
"filter the inputs to enable, separator is :")
var fInputList = flag.Bool("input-list", false,
@ -61,23 +54,30 @@ var fProcessorFilters = flag.String("processor-filter", "",
var fUsage = flag.String("usage", "",
"print usage for a plugin, ie, 'telegraf --usage mysql'")
var fService = flag.String("service", "",
"operate on the service (windows only)")
var fServiceName = flag.String("service-name", "telegraf", "service name (windows only)")
var fServiceDisplayName = flag.String("service-display-name", "Telegraf Data Collector Service", "service display name (windows only)")
"operate on the service")
var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)")
var fPlugins = flag.String("plugin-directory", "",
"path to directory containing external plugins")
var fRunOnce = flag.Bool("once", false, "run one gather and exit")
var (
version string
commit string
branch string
nextVersion = "1.8.0"
version string
commit string
branch string
)
func init() {
// If commit or branch are not set, make that clear.
if commit == "" {
commit = "unknown"
}
if branch == "" {
branch = "unknown"
}
}
var stop chan struct{}
func reloadLoop(
stop chan struct{},
inputFilters []string,
outputFilters []string,
aggregatorFilters []string,
@ -88,151 +88,148 @@ func reloadLoop(
for <-reload {
reload <- false
ctx, cancel := context.WithCancel(context.Background())
// If no other options are specified, load the config file and run.
c := config.NewConfig()
c.OutputFilters = outputFilters
c.InputFilters = inputFilters
err := c.LoadConfig(*fConfig)
if err != nil {
log.Fatal("E! " + err.Error())
}
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt, syscall.SIGHUP,
syscall.SIGTERM, syscall.SIGINT)
if *fConfigDirectory != "" {
err = c.LoadDirectory(*fConfigDirectory)
if err != nil {
log.Fatal("E! " + err.Error())
}
}
if !*fTest && len(c.Outputs) == 0 {
log.Fatalf("E! Error: no outputs found, did you provide a valid config file?")
}
if len(c.Inputs) == 0 {
log.Fatalf("E! Error: no inputs found, did you provide a valid config file?")
}
if int64(c.Agent.Interval.Duration) <= 0 {
log.Fatalf("E! Agent interval must be positive, found %s",
c.Agent.Interval.Duration)
}
if int64(c.Agent.FlushInterval.Duration) <= 0 {
log.Fatalf("E! Agent flush_interval must be positive; found %s",
c.Agent.Interval.Duration)
}
ag, err := agent.NewAgent(c)
if err != nil {
log.Fatal("E! " + err.Error())
}
// Setup logging
logger.SetupLogging(
ag.Config.Agent.Debug || *fDebug,
ag.Config.Agent.Quiet || *fQuiet,
ag.Config.Agent.Logfile,
)
if *fTest {
err = ag.Test()
if err != nil {
log.Fatal("E! " + err.Error())
}
os.Exit(0)
}
err = ag.Connect()
if err != nil {
log.Fatal("E! " + err.Error())
}
shutdown := make(chan struct{})
signals := make(chan os.Signal)
signal.Notify(signals, os.Interrupt, syscall.SIGHUP, syscall.SIGTERM)
go func() {
select {
case sig := <-signals:
if sig == os.Interrupt || sig == syscall.SIGTERM {
close(shutdown)
}
if sig == syscall.SIGHUP {
log.Printf("I! Reloading Telegraf config")
log.Printf("I! Reloading Telegraf config\n")
<-reload
reload <- true
close(shutdown)
}
cancel()
case <-stop:
cancel()
close(shutdown)
}
}()
err := runAgent(ctx, inputFilters, outputFilters)
if err != nil && err != context.Canceled {
log.Fatalf("E! [telegraf] Error running agent: %v", err)
log.Printf("I! Starting Telegraf %s\n", displayVersion())
log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " "))
log.Printf("I! Loaded aggregators: %s", strings.Join(c.AggregatorNames(), " "))
log.Printf("I! Loaded processors: %s", strings.Join(c.ProcessorNames(), " "))
log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
log.Printf("I! Tags enabled: %s", c.ListTags())
if *fPidfile != "" {
f, err := os.OpenFile(*fPidfile, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.Printf("E! Unable to create pidfile: %s", err)
} else {
fmt.Fprintf(f, "%d\n", os.Getpid())
f.Close()
defer func() {
err := os.Remove(*fPidfile)
if err != nil {
log.Printf("E! Unable to remove pidfile: %s", err)
}
}()
}
}
ag.Run(shutdown)
}
}
func runAgent(ctx context.Context,
inputFilters []string,
outputFilters []string,
) error {
log.Printf("I! Starting Telegraf %s", version)
// If no other options are specified, load the config file and run.
c := config.NewConfig()
c.OutputFilters = outputFilters
c.InputFilters = inputFilters
err := c.LoadConfig(*fConfig)
if err != nil {
return err
}
if *fConfigDirectory != "" {
err = c.LoadDirectory(*fConfigDirectory)
if err != nil {
return err
}
}
if !*fTest && len(c.Outputs) == 0 {
return errors.New("Error: no outputs found, did you provide a valid config file?")
}
if *fPlugins == "" && len(c.Inputs) == 0 {
return errors.New("Error: no inputs found, did you provide a valid config file?")
}
if int64(c.Agent.Interval.Duration) <= 0 {
return fmt.Errorf("Agent interval must be positive, found %s",
c.Agent.Interval.Duration)
}
if int64(c.Agent.FlushInterval.Duration) <= 0 {
return fmt.Errorf("Agent flush_interval must be positive; found %s",
c.Agent.Interval.Duration)
}
ag, err := agent.NewAgent(c)
if err != nil {
return err
}
// Setup logging as configured.
logConfig := logger.LogConfig{
Debug: ag.Config.Agent.Debug || *fDebug,
Quiet: ag.Config.Agent.Quiet || *fQuiet,
LogTarget: ag.Config.Agent.LogTarget,
Logfile: ag.Config.Agent.Logfile,
RotationInterval: ag.Config.Agent.LogfileRotationInterval,
RotationMaxSize: ag.Config.Agent.LogfileRotationMaxSize,
RotationMaxArchives: ag.Config.Agent.LogfileRotationMaxArchives,
}
logger.SetupLogging(logConfig)
if *fRunOnce {
wait := time.Duration(*fTestWait) * time.Second
return ag.Once(ctx, wait)
}
if *fTest || *fTestWait != 0 {
wait := time.Duration(*fTestWait) * time.Second
return ag.Test(ctx, wait)
}
log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " "))
log.Printf("I! Loaded aggregators: %s", strings.Join(c.AggregatorNames(), " "))
log.Printf("I! Loaded processors: %s", strings.Join(c.ProcessorNames(), " "))
log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
log.Printf("I! Tags enabled: %s", c.ListTags())
if *fPidfile != "" {
f, err := os.OpenFile(*fPidfile, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.Printf("E! Unable to create pidfile: %s", err)
} else {
fmt.Fprintf(f, "%d\n", os.Getpid())
f.Close()
defer func() {
err := os.Remove(*fPidfile)
if err != nil {
log.Printf("E! Unable to remove pidfile: %s", err)
}
}()
}
}
return ag.Run(ctx)
}
func usageExit(rc int) {
fmt.Println(internal.Usage)
os.Exit(rc)
}
func formatFullVersion() string {
var parts = []string{"Telegraf"}
type program struct {
inputFilters []string
outputFilters []string
aggregatorFilters []string
processorFilters []string
}
if version != "" {
parts = append(parts, version)
} else {
parts = append(parts, "unknown")
func (p *program) Start(s service.Service) error {
go p.run()
return nil
}
func (p *program) run() {
stop = make(chan struct{})
reloadLoop(
stop,
p.inputFilters,
p.outputFilters,
p.aggregatorFilters,
p.processorFilters,
)
}
func (p *program) Stop(s service.Service) error {
close(stop)
return nil
}
func displayVersion() string {
if version == "" {
return fmt.Sprintf("v%s~%s", nextVersion, commit)
}
if branch != "" || commit != "" {
if branch == "" {
branch = "unknown"
}
if commit == "" {
commit = "unknown"
}
git := fmt.Sprintf("(git: %s %s)", branch, commit)
parts = append(parts, git)
}
return strings.Join(parts, " ")
return "v" + version
}
func main() {
@ -240,10 +237,7 @@ func main() {
flag.Parse()
args := flag.Args()
sectionFilters, inputFilters, outputFilters := []string{}, []string{}, []string{}
if *fSectionFilters != "" {
sectionFilters = strings.Split(":"+strings.TrimSpace(*fSectionFilters)+":", ":")
}
inputFilters, outputFilters := []string{}, []string{}
if *fInputFilters != "" {
inputFilters = strings.Split(":"+strings.TrimSpace(*fInputFilters)+":", ":")
}
@ -259,16 +253,6 @@ func main() {
processorFilters = strings.Split(":"+strings.TrimSpace(*fProcessorFilters)+":", ":")
}
logger.SetupLogging(logger.LogConfig{})
// Load external plugins, if requested.
if *fPlugins != "" {
log.Printf("I! Loading external plugins from: %s", *fPlugins)
if err := goplugin.LoadExternalPlugins(*fPlugins); err != nil {
log.Fatal("E! " + err.Error())
}
}
if *pprofAddr != "" {
go func() {
pprofHostPort := *pprofAddr
@ -289,11 +273,10 @@ func main() {
if len(args) > 0 {
switch args[0] {
case "version":
fmt.Println(formatFullVersion())
fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit)
return
case "config":
config.PrintSampleConfig(
sectionFilters,
inputFilters,
outputFilters,
aggregatorFilters,
@ -306,33 +289,22 @@ func main() {
// switch for flags which just do something and exit immediately
switch {
case *fOutputList:
fmt.Println("Available Output Plugins: ")
names := make([]string, 0, len(outputs.Outputs))
for k := range outputs.Outputs {
names = append(names, k)
}
sort.Strings(names)
for _, k := range names {
fmt.Println("Available Output Plugins:")
for k, _ := range outputs.Outputs {
fmt.Printf(" %s\n", k)
}
return
case *fInputList:
fmt.Println("Available Input Plugins:")
names := make([]string, 0, len(inputs.Inputs))
for k := range inputs.Inputs {
names = append(names, k)
}
sort.Strings(names)
for _, k := range names {
for k, _ := range inputs.Inputs {
fmt.Printf(" %s\n", k)
}
return
case *fVersion:
fmt.Println(formatFullVersion())
fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit)
return
case *fSampleConfig:
config.PrintSampleConfig(
sectionFilters,
inputFilters,
outputFilters,
aggregatorFilters,
@ -348,20 +320,53 @@ func main() {
return
}
shortVersion := version
if shortVersion == "" {
shortVersion = "unknown"
}
if runtime.GOOS == "windows" && !(*fRunAsConsole) {
svcConfig := &service.Config{
Name: "telegraf",
DisplayName: "Telegraf Data Collector Service",
Description: "Collects data using a series of plugins and publishes it to" +
"another series of plugins.",
Arguments: []string{"--config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
}
// Configure version
if err := internal.SetVersion(shortVersion); err != nil {
log.Println("Telegraf version already configured to: " + internal.Version())
prg := &program{
inputFilters: inputFilters,
outputFilters: outputFilters,
aggregatorFilters: aggregatorFilters,
processorFilters: processorFilters,
}
s, err := service.New(prg, svcConfig)
if err != nil {
log.Fatal("E! " + err.Error())
}
// Handle the --service flag here to prevent any issues with tooling that
// may not have an interactive session, e.g. installing from Ansible.
if *fService != "" {
if *fConfig != "" {
(*svcConfig).Arguments = []string{"--config", *fConfig}
}
if *fConfigDirectory != "" {
(*svcConfig).Arguments = append((*svcConfig).Arguments, "--config-directory", *fConfigDirectory)
}
err := service.Control(s, *fService)
if err != nil {
log.Fatal("E! " + err.Error())
}
os.Exit(0)
} else {
err = s.Run()
if err != nil {
log.Println("E! " + err.Error())
}
}
} else {
stop = make(chan struct{})
reloadLoop(
stop,
inputFilters,
outputFilters,
aggregatorFilters,
processorFilters,
)
}
run(
inputFilters,
outputFilters,
aggregatorFilters,
processorFilters,
)
}

View File

@ -1,13 +0,0 @@
// +build !windows
package main
func run(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) {
stop = make(chan struct{})
reloadLoop(
inputFilters,
outputFilters,
aggregatorFilters,
processorFilters,
)
}

View File

@ -1,124 +0,0 @@
// +build windows
package main
import (
"log"
"os"
"runtime"
"github.com/influxdata/telegraf/logger"
"github.com/kardianos/service"
)
func run(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) {
if runtime.GOOS == "windows" && windowsRunAsService() {
runAsWindowsService(
inputFilters,
outputFilters,
aggregatorFilters,
processorFilters,
)
} else {
stop = make(chan struct{})
reloadLoop(
inputFilters,
outputFilters,
aggregatorFilters,
processorFilters,
)
}
}
type program struct {
inputFilters []string
outputFilters []string
aggregatorFilters []string
processorFilters []string
}
func (p *program) Start(s service.Service) error {
go p.run()
return nil
}
func (p *program) run() {
stop = make(chan struct{})
reloadLoop(
p.inputFilters,
p.outputFilters,
p.aggregatorFilters,
p.processorFilters,
)
}
func (p *program) Stop(s service.Service) error {
close(stop)
return nil
}
func runAsWindowsService(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) {
programFiles := os.Getenv("ProgramFiles")
if programFiles == "" { // Should never happen
programFiles = "C:\\Program Files"
}
svcConfig := &service.Config{
Name: *fServiceName,
DisplayName: *fServiceDisplayName,
Description: "Collects data using a series of plugins and publishes it to " +
"another series of plugins.",
Arguments: []string{"--config", programFiles + "\\Telegraf\\telegraf.conf"},
}
prg := &program{
inputFilters: inputFilters,
outputFilters: outputFilters,
aggregatorFilters: aggregatorFilters,
processorFilters: processorFilters,
}
s, err := service.New(prg, svcConfig)
if err != nil {
log.Fatal("E! " + err.Error())
}
// Handle the --service flag here to prevent any issues with tooling that
// may not have an interactive session, e.g. installing from Ansible.
if *fService != "" {
if *fConfig != "" {
svcConfig.Arguments = []string{"--config", *fConfig}
}
if *fConfigDirectory != "" {
svcConfig.Arguments = append(svcConfig.Arguments, "--config-directory", *fConfigDirectory)
}
//set servicename to service cmd line, to have a custom name after relaunch as a service
svcConfig.Arguments = append(svcConfig.Arguments, "--service-name", *fServiceName)
err := service.Control(s, *fService)
if err != nil {
log.Fatal("E! " + err.Error())
}
os.Exit(0)
} else {
winlogger, err := s.Logger(nil)
if err == nil {
//When in service mode, register eventlog target andd setup default logging to eventlog
logger.RegisterEventLogger(winlogger)
logger.SetupLogging(logger.LogConfig{LogTarget: logger.LogTargetEventlog})
}
err = s.Run()
if err != nil {
log.Println("E! " + err.Error())
}
}
}
// Return true if Telegraf should create a Windows service.
func windowsRunAsService() bool {
if *fService != "" {
return true
}
if *fRunAsConsole {
return false
}
return !service.Interactive()
}

View File

@ -1,7 +0,0 @@
[[outputs.http]]
headers = { Authorization = "Token $TOKEN",Content-Type = "application/json" }
taginclude = ["org_id"]
[[outputs.http]]
headers = { Authorization = "Token $TOKEN",Content-Type = "application/json" }
taginclude = ["org_id"]

View File

@ -1,2 +0,0 @@
[[inputs.http_listener_v2]]
not_a_field = true

View File

@ -1,4 +0,0 @@
[[outputs.http]]
[outputs.http.headers]
Content-Type = "application/json"
taginclude = ["org_id"]

View File

@ -1,5 +0,0 @@
[[outputs.http]]
scopes = [
# comment
"test" # comment
]

View File

@ -1,9 +0,0 @@
[[inputs.http_listener_v2]]
write_timeout = "1s"
max_body_size = "1MiB"
tls_cert = """
/path/to/my/cert
"""
tls_key = '''
/path/to/my/key
'''

View File

@ -1,2 +0,0 @@
[[inputs.http_listener_v2]]
port = "80"

View File

@ -1,2 +0,0 @@
[[inputs.http_listener_v2]]
methods = "POST"

View File

@ -1,88 +0,0 @@
package config
import (
"bytes"
"strconv"
"time"
"github.com/alecthomas/units"
)
// Duration is a time.Duration
type Duration time.Duration
// Size is an int64
type Size int64
// Number is a float
type Number float64
// UnmarshalTOML parses the duration from the TOML config file
func (d Duration) UnmarshalTOML(b []byte) error {
var err error
b = bytes.Trim(b, `'`)
// see if we can directly convert it
dur, err := time.ParseDuration(string(b))
if err == nil {
d = Duration(dur)
return nil
}
// Parse string duration, ie, "1s"
if uq, err := strconv.Unquote(string(b)); err == nil && len(uq) > 0 {
dur, err := time.ParseDuration(uq)
if err == nil {
d = Duration(dur)
return nil
}
}
// First try parsing as integer seconds
sI, err := strconv.ParseInt(string(b), 10, 64)
if err == nil {
dur := time.Second * time.Duration(sI)
d = Duration(dur)
return nil
}
// Second try parsing as float seconds
sF, err := strconv.ParseFloat(string(b), 64)
if err == nil {
dur := time.Second * time.Duration(sF)
d = Duration(dur)
return nil
}
return nil
}
func (s Size) UnmarshalTOML(b []byte) error {
var err error
b = bytes.Trim(b, `'`)
val, err := strconv.ParseInt(string(b), 10, 64)
if err == nil {
s = Size(val)
return nil
}
uq, err := strconv.Unquote(string(b))
if err != nil {
return err
}
val, err = units.ParseStrictBytes(uq)
if err != nil {
return err
}
s = Size(val)
return nil
}
func (n Number) UnmarshalTOML(b []byte) error {
value, err := strconv.ParseFloat(string(b), 64)
if err != nil {
return err
}
n = Number(value)
return nil
}

View File

@ -17,20 +17,19 @@ services:
- KAFKA_ADVERTISED_HOST_NAME=localhost
- KAFKA_ADVERTISED_PORT=9092
- KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1
- KAFKA_CREATE_TOPICS="test:1:1"
- JAVA_OPTS="-Xms256m -Xmx256m"
ports:
- "9092:9092"
depends_on:
- zookeeper
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.2.0
image: elasticsearch:5
environment:
- "ES_JAVA_OPTS=-Xms256m -Xmx256m"
- discovery.type=single-node
- xpack.security.enabled=false
- JAVA_OPTS="-Xms256m -Xmx256m"
ports:
- "9200:9200"
- "9300:9300"
mysql:
image: mysql
environment:
@ -39,19 +38,10 @@ services:
- "3306:3306"
memcached:
image: memcached
ports:
ports:
- "11211:11211"
pgbouncer:
image: mbentley/ubuntu-pgbouncer
environment:
- PG_ENV_POSTGRESQL_USER=pgbouncer
- PG_ENV_POSTGRESQL_PASS=pgbouncer
ports:
- "6432:6432"
postgres:
image: postgres:alpine
environment:
- POSTGRES_HOST_AUTH_METHOD=trust
ports:
- "5432:5432"
rabbitmq:
@ -93,10 +83,11 @@ services:
ports:
- "4200:4200"
- "4230:4230"
- "6543:5432"
- "5432:5432"
command:
- crate
- -Cnetwork.host=0.0.0.0
- -Ctransport.host=localhost
- -Clicense.enterprise=false
environment:
- CRATE_HEAP_SIZE=128m

View File

@ -1,132 +0,0 @@
### Aggregator Plugins
This section is for developers who want to create a new aggregator plugin.
### Aggregator Plugin Guidelines
* A aggregator must conform to the [telegraf.Aggregator][] interface.
* Aggregators should call `aggregators.Add` in their `init` function to
register themselves. See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file.
- The `SampleConfig` function should return valid toml that describes how the
plugin can be configured. This is included in `telegraf config`. Please
consult the [SampleConfig][] page for the latest style guidelines.
* The `Description` function should say in one line what this aggregator does.
* The Aggregator plugin will need to keep caches of metrics that have passed
through it. This should be done using the builtin `HashID()` function of
each metric.
* When the `Reset()` function is called, all caches should be cleared.
- Follow the recommended [CodeStyle][].
### Aggregator Plugin Example
```go
package min
// min.go
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
)
type Min struct {
// caches for metric fields, names, and tags
fieldCache map[uint64]map[string]float64
nameCache map[uint64]string
tagCache map[uint64]map[string]string
}
func NewMin() telegraf.Aggregator {
m := &Min{}
m.Reset()
return m
}
var sampleConfig = `
## period is the flush & clear interval of the aggregator.
period = "30s"
## If true drop_original will drop the original metrics and
## only send aggregates.
drop_original = false
`
func (m *Min) Init() error {
return nil
}
func (m *Min) SampleConfig() string {
return sampleConfig
}
func (m *Min) Description() string {
return "Keep the aggregate min of each metric passing through."
}
func (m *Min) Add(in telegraf.Metric) {
id := in.HashID()
if _, ok := m.nameCache[id]; !ok {
// hit an uncached metric, create caches for first time:
m.nameCache[id] = in.Name()
m.tagCache[id] = in.Tags()
m.fieldCache[id] = make(map[string]float64)
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
m.fieldCache[id][k] = fv
}
}
} else {
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
if _, ok := m.fieldCache[id][k]; !ok {
// hit an uncached field of a cached metric
m.fieldCache[id][k] = fv
continue
}
if fv < m.fieldCache[id][k] {
// set new minimum
m.fieldCache[id][k] = fv
}
}
}
}
}
func (m *Min) Push(acc telegraf.Accumulator) {
for id, _ := range m.nameCache {
fields := map[string]interface{}{}
for k, v := range m.fieldCache[id] {
fields[k+"_min"] = v
}
acc.AddFields(m.nameCache[id], fields, m.tagCache[id])
}
}
func (m *Min) Reset() {
m.fieldCache = make(map[uint64]map[string]float64)
m.nameCache = make(map[uint64]string)
m.tagCache = make(map[uint64]map[string]string)
}
func convert(in interface{}) (float64, bool) {
switch v := in.(type) {
case float64:
return v, true
case int64:
return float64(v), true
default:
return 0, false
}
}
func init() {
aggregators.Add("min", func() telegraf.Aggregator {
return NewMin()
})
}
```
[telegraf.Aggregator]: https://godoc.org/github.com/influxdata/telegraf#Aggregator
[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig
[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle

View File

@ -44,15 +44,13 @@ to control which metrics are passed through a processor or aggregator. If a
metric is filtered out the metric bypasses the plugin and is passed downstream
to the next plugin.
### Processor
Processor plugins process metrics as they pass through and immediately emit
**Processor** plugins process metrics as they pass through and immediately emit
results based on the values they process. For example, this could be printing
all metrics or adding a tag to all metrics that pass through.
### Aggregator
Aggregator plugins, on the other hand, are a bit more complicated. Aggregators
**Aggregator** plugins, on the other hand, are a bit more complicated. Aggregators
are typically for emitting new _aggregate_ metrics, such as a running mean,
minimum, maximum, or standard deviation. For this reason, all _aggregator_
minimum, maximum, quantiles, or standard deviation. For this reason, all _aggregator_
plugins are configured with a `period`. The `period` is the size of the window
of metrics that each _aggregate_ represents. In other words, the emitted
_aggregate_ metric will be the aggregated value of the past `period` seconds.
@ -60,8 +58,7 @@ Since many users will only care about their aggregates and not every single metr
gathered, there is also a `drop_original` argument, which tells Telegraf to only
emit the aggregates and not the original metrics.
Since aggregates are created for each measurement, field, and unique tag combination
the plugin receives, you can make use of `taginclude` to group
aggregates by specific tags only.
**Note:** Aggregator plugins only aggregate metrics within their periods (`now() - period`). Data with a timestamp earlier than `now() - period` cannot be included.
**NOTE** That since aggregators only aggregate metrics within their period, that
historical data is not supported. In other words, if your metric timestamp is more
than `now() - period` in the past, it will not be aggregated. If this is a feature
that you need, please comment on this [github issue](https://github.com/influxdata/telegraf/issues/1992)

View File

@ -1,25 +1,33 @@
# Configuration
# Telegraf Configuration
Telegraf's configuration file is written using [TOML][] and is composed of
three sections: [global tags][], [agent][] settings, and [plugins][].
You can see the latest config file with all available plugins here:
[telegraf.conf](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf)
View the default [telegraf.conf][] config file with all available plugins.
## Generating a Configuration File
### Generating a Configuration File
A default Telegraf config file can be auto-generated by telegraf:
A default config file can be generated by telegraf:
```sh
```
telegraf config > telegraf.conf
```
To generate a file with specific inputs and outputs, you can use the
--input-filter and --output-filter flags:
```sh
```
telegraf --input-filter cpu:mem:net:swap --output-filter influxdb:kafka config
```
### Configuration Loading
## Environment Variables
Environment variables can be used anywhere in the config file, simply prepend
them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
When using the `.deb` or `.rpm` packages, you can define environment variables
in the `/etc/default/telegraf` file.
## Configuration file locations
The location of the configuration file can be set via the `--config` command
line flag.
@ -32,431 +40,180 @@ On most systems, the default locations are `/etc/telegraf/telegraf.conf` for
the main configuration file and `/etc/telegraf/telegraf.d` for the directory of
configuration files.
### Environment Variables
# Global Tags
Environment variables can be used anywhere in the config file, simply surround
them with `${}`. Replacement occurs before file parsing. For strings
the variable must be within quotes, e.g., `"${STR_VAR}"`, for numbers and booleans
they should be unquoted, e.g., `${INT_VAR}`, `${BOOL_VAR}`.
Global tags can be specified in the `[global_tags]` section of the config file
in key="value" format. All metrics being gathered on this host will be tagged
with the tags specified here.
When using the `.deb` or `.rpm` packages, you can define environment variables
in the `/etc/default/telegraf` file.
## Agent Configuration
**Example**:
Telegraf has a few options you can configure under the `[agent]` section of the
config.
`/etc/default/telegraf`:
```
USER="alice"
INFLUX_URL="http://localhost:8086"
INFLUX_SKIP_DATABASE_CREATION="true"
INFLUX_PASSWORD="monkey123"
```
* **interval**: Default data collection interval for all inputs
* **round_interval**: Rounds collection interval to 'interval'
ie, if interval="10s" then always collect on :00, :10, :20, etc.
* **metric_batch_size**: Telegraf will send metrics to output in batch of at
most metric_batch_size metrics.
* **metric_buffer_limit**: Telegraf will cache metric_buffer_limit metrics
for each output, and will flush this buffer on a successful write.
This should be a multiple of metric_batch_size and could not be less
than 2 times metric_batch_size.
* **collection_jitter**: Collection jitter is used to jitter
the collection by a random amount.
Each plugin will sleep for a random time within jitter before collecting.
This can be used to avoid many plugins querying things like sysfs at the
same time, which can have a measurable effect on the system.
* **flush_interval**: Default data flushing interval for all outputs.
You should not set this below
interval. Maximum flush_interval will be flush_interval + flush_jitter
* **flush_jitter**: Jitter the flush interval by a random amount.
This is primarily to avoid
large write spikes for users running a large number of telegraf instances.
ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s.
* **precision**:
By default or when set to "0s", precision will be set to the same
timestamp order as the collection interval, with the maximum being 1s.
Precision will NOT be used for service inputs. It is up to each individual
service input to set the timestamp at the appropriate precision.
Valid time units are "ns", "us" (or "µs"), "ms", "s".
* **logfile**: Specify the log file name. The empty string means to log to stderr.
* **debug**: Run telegraf in debug mode.
* **quiet**: Run telegraf in quiet mode (error messages only).
* **hostname**: Override default hostname, if empty use os.Hostname().
* **omit_hostname**: If true, do no set the "host" tag in the telegraf agent.
## Input Configuration
The following config parameters are available for all inputs:
* **interval**: How often to gather this metric. Normal plugins use a single
global interval, but if one particular input should be run less or more often,
you can configure that here.
* **name_override**: Override the base name of the measurement.
(Default is the name of the input).
* **name_prefix**: Specifies a prefix to attach to the measurement name.
* **name_suffix**: Specifies a suffix to attach to the measurement name.
* **tags**: A map of tags to apply to a specific input's measurements.
The [measurement filtering](#measurement-filtering) parameters can be used to
limit what metrics are emitted from the input plugin.
## Output Configuration
The [measurement filtering](#measurement-filtering) parameters can be used to
limit what metrics are emitted from the output plugin.
## Aggregator Configuration
The following config parameters are available for all aggregators:
* **period**: The period on which to flush & clear each aggregator. All metrics
that are sent with timestamps outside of this period will be ignored by the
aggregator.
* **delay**: The delay before each aggregator is flushed. This is to control
how long for aggregators to wait before receiving metrics from input plugins,
in the case that aggregators are flushing and inputs are gathering on the
same interval.
* **drop_original**: If true, the original metric will be dropped by the
aggregator and will not get sent to the output plugins.
* **name_override**: Override the base name of the measurement.
(Default is the name of the input).
* **name_prefix**: Specifies a prefix to attach to the measurement name.
* **name_suffix**: Specifies a suffix to attach to the measurement name.
* **tags**: A map of tags to apply to a specific input's measurements.
The [measurement filtering](#measurement-filtering) parameters can be used to
limit what metrics are handled by the aggregator. Excluded metrics are passed
downstream to the next aggregator.
## Processor Configuration
The following config parameters are available for all processors:
* **order**: This is the order in which the processor(s) get executed. If this
is not specified then processor execution order will be random.
The [measurement filtering](#measurement-filtering) parameters can be used
to limit what metrics are handled by the processor. Excluded metrics are
passed downstream to the next processor.
#### Measurement Filtering
Filters can be configured per input, output, processor, or aggregator,
see below for examples.
* **namepass**:
An array of glob pattern strings. Only points whose measurement name matches
a pattern in this list are emitted.
* **namedrop**:
The inverse of `namepass`. If a match is found the point is discarded. This
is tested on points after they have passed the `namepass` test.
* **fieldpass**:
An array of glob pattern strings. Only fields whose field key matches a
pattern in this list are emitted.
* **fielddrop**:
The inverse of `fieldpass`. Fields with a field key matching one of the
patterns will be discarded from the point. This is tested on points after
they have passed the `fieldpass` test.
* **tagpass**:
A table mapping tag keys to arrays of glob pattern strings. Only points
that contain a tag key in the table and a tag value matching one of its
patterns is emitted.
* **tagdrop**:
The inverse of `tagpass`. If a match is found the point is discarded. This
is tested on points after they have passed the `tagpass` test.
* **taginclude**:
An array of glob pattern strings. Only tags with a tag key matching one of
the patterns are emitted. In contrast to `tagpass`, which will pass an entire
point based on its tag, `taginclude` removes all non matching tags from the
point. This filter can be used on both inputs & outputs, but it is
_recommended_ to be used on inputs, as it is more efficient to filter out tags
at the ingestion point.
* **tagexclude**:
The inverse of `taginclude`. Tags with a tag key matching one of the patterns
will be discarded from the point.
**NOTE** Due to the way TOML is parsed, `tagpass` and `tagdrop` parameters
must be defined at the _end_ of the plugin definition, otherwise subsequent
plugin config options will be interpreted as part of the tagpass/tagdrop
tables.
#### Input Configuration Examples
This is a full working config that will output CPU data to an InfluxDB instance
at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output
measurements at a 10s interval and will collect per-cpu data, dropping any
fields which begin with `time_`.
`/etc/telegraf.conf`:
```toml
[global_tags]
user = "${USER}"
dc = "denver-1"
[[inputs.mem]]
[[outputs.influxdb]]
urls = ["${INFLUX_URL}"]
skip_database_creation = ${INFLUX_SKIP_DATABASE_CREATION}
password = "${INFLUX_PASSWORD}"
```
The above files will produce the following effective configuration file to be
parsed:
```toml
[global_tags]
user = "alice"
[[outputs.influxdb]]
urls = "http://localhost:8086"
skip_database_creation = true
password = "monkey123"
```
### Intervals
Intervals are durations of time and can be specified for supporting settings by
combining an integer value and time unit as a string value. Valid time units are
`ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`.
```toml
[agent]
interval = "10s"
```
### Global Tags
Global tags can be specified in the `[global_tags]` table in key="value"
format. All metrics that are gathered will be tagged with the tags specified.
```toml
[global_tags]
dc = "us-east-1"
```
### Agent
The agent table configures Telegraf and the defaults used across all plugins.
- **interval**: Default data collection [interval][] for all inputs.
- **round_interval**: Rounds collection interval to [interval][]
ie, if interval="10s" then always collect on :00, :10, :20, etc.
- **metric_batch_size**:
Telegraf will send metrics to outputs in batches of at most
metric_batch_size metrics.
This controls the size of writes that Telegraf sends to output plugins.
- **metric_buffer_limit**:
Maximum number of unwritten metrics per output. Increasing this value
allows for longer periods of output downtime without dropping metrics at the
cost of higher maximum memory usage.
- **collection_jitter**:
Collection jitter is used to jitter the collection by a random [interval][].
Each plugin will sleep for a random time within jitter before collecting.
This can be used to avoid many plugins querying things like sysfs at the
same time, which can have a measurable effect on the system.
- **flush_interval**:
Default flushing [interval][] for all outputs. Maximum flush_interval will be
flush_interval + flush_jitter.
- **flush_jitter**:
Default flush jitter for all outputs. This jitters the flush [interval][]
by a random amount. This is primarily to avoid large write spikes for users
running a large number of telegraf instances. ie, a jitter of 5s and interval
10s means flushes will happen every 10-15s.
- **precision**:
Collected metrics are rounded to the precision specified as an [interval][].
Precision will NOT be used for service inputs. It is up to each individual
service input to set the timestamp at the appropriate precision.
- **debug**:
Log at debug level.
- **quiet**:
Log only error level messages.
- **logtarget**:
Log target controls the destination for logs and can be one of "file",
"stderr" or, on Windows, "eventlog". When set to "file", the output file is
determined by the "logfile" setting.
- **logfile**:
Name of the file to be logged to when using the "file" logtarget. If set to
the empty string then logs are written to stderr.
- **logfile_rotation_interval**:
The logfile will be rotated after the time interval specified. When set to
0 no time based rotation is performed.
- **logfile_rotation_max_size**:
The logfile will be rotated when it becomes larger than the specified size.
When set to 0 no size based rotation is performed.
- **logfile_rotation_max_archives**:
Maximum number of rotated archives to keep, any older logs are deleted. If
set to -1, no archives are removed.
- **hostname**:
Override default hostname, if empty use os.Hostname()
- **omit_hostname**:
If set to true, do no set the "host" tag in the telegraf agent.
### Plugins
Telegraf plugins are divided into 4 types: [inputs][], [outputs][],
[processors][], and [aggregators][].
Unlike the `global_tags` and `agent` tables, any plugin can be defined
multiple times and each instance will run independently. This allows you to
have plugins defined with differing configurations as needed within a single
Telegraf process.
Each plugin has a unique set of configuration options, reference the
sample configuration for details. Additionally, several options are available
on any plugin depending on its type.
### Input Plugins
Input plugins gather and create metrics. They support both polling and event
driven operation.
Parameters that can be used with any input plugin:
- **alias**: Name an instance of a plugin.
- **interval**: How often to gather this metric. Normal plugins use a single
global interval, but if one particular input should be run less or more
often, you can configure that here.
- **name_override**: Override the base name of the measurement. (Default is
the name of the input).
- **name_prefix**: Specifies a prefix to attach to the measurement name.
- **name_suffix**: Specifies a suffix to attach to the measurement name.
- **tags**: A map of tags to apply to a specific input's measurements.
The [metric filtering][] parameters can be used to limit what metrics are
emitted from the input plugin.
#### Examples
Use the name_suffix parameter to emit measurements with the name `cpu_total`:
```toml
[[inputs.cpu]]
name_suffix = "_total"
percpu = false
totalcpu = true
```
Use the name_override parameter to emit measurements with the name `foobar`:
```toml
[[inputs.cpu]]
name_override = "foobar"
percpu = false
totalcpu = true
```
Emit measurements with two additional tags: `tag1=foo` and `tag2=bar`
> **NOTE**: With TOML, order matters. Parameters belong to the last defined
> table header, place `[inputs.cpu.tags]` table at the _end_ of the plugin
> definition.
```toml
[[inputs.cpu]]
percpu = false
totalcpu = true
[inputs.cpu.tags]
tag1 = "foo"
tag2 = "bar"
```
Utilize `name_override`, `name_prefix`, or `name_suffix` config options to
avoid measurement collisions when defining multiple plugins:
```toml
[[inputs.cpu]]
percpu = false
totalcpu = true
# OUTPUTS
[[outputs.influxdb]]
url = "http://192.168.59.103:8086" # required.
database = "telegraf" # required.
# INPUTS
[[inputs.cpu]]
percpu = true
totalcpu = false
name_override = "percpu_usage"
fielddrop = ["cpu_time*"]
# filter all fields beginning with 'time_'
fielddrop = ["time_*"]
```
### Output Plugins
#### Input Config: tagpass and tagdrop
Output plugins write metrics to a location. Outputs commonly write to
databases, network services, and messaging systems.
**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of
the plugin definition, otherwise subsequent plugin config options will be
interpreted as part of the tagpass/tagdrop map.
Parameters that can be used with any output plugin:
- **alias**: Name an instance of a plugin.
- **flush_interval**: The maximum time between flushes. Use this setting to
override the agent `flush_interval` on a per plugin basis.
- **flush_jitter**: The amount of time to jitter the flush interval. Use this
setting to override the agent `flush_jitter` on a per plugin basis.
- **metric_batch_size**: The maximum number of metrics to send at once. Use
this setting to override the agent `metric_batch_size` on a per plugin basis.
- **metric_buffer_limit**: The maximum number of unsent metrics to buffer.
Use this setting to override the agent `metric_buffer_limit` on a per plugin
basis.
- **name_override**: Override the original name of the measurement.
- **name_prefix**: Specifies a prefix to attach to the measurement name.
- **name_suffix**: Specifies a suffix to attach to the measurement name.
The [metric filtering][] parameters can be used to limit what metrics are
emitted from the output plugin.
#### Examples
Override flush parameters for a single output:
```toml
[agent]
flush_interval = "10s"
flush_jitter = "5s"
metric_batch_size = 1000
[[outputs.influxdb]]
urls = [ "http://example.org:8086" ]
database = "telegraf"
[[outputs.file]]
files = [ "stdout" ]
flush_interval = "1s"
flush_jitter = "1s"
metric_batch_size = 10
```
### Processor Plugins
Processor plugins perform processing tasks on metrics and are commonly used to
rename or apply transformations to metrics. Processors are applied after the
input plugins and before any aggregator plugins.
Parameters that can be used with any processor plugin:
- **alias**: Name an instance of a plugin.
- **order**: The order in which the processor(s) are executed. If this is not
specified then processor execution order will be random.
The [metric filtering][] parameters can be used to limit what metrics are
handled by the processor. Excluded metrics are passed downstream to the next
processor.
#### Examples
If the order processors are applied matters you must set order on all involved
processors:
```toml
[[processors.rename]]
order = 1
[[processors.rename.replace]]
tag = "path"
dest = "resource"
[[processors.strings]]
order = 2
[[processors.strings.trim_prefix]]
tag = "resource"
prefix = "/api/"
```
### Aggregator Plugins
Aggregator plugins produce new metrics after examining metrics over a time
period, as the name suggests they are commonly used to produce new aggregates
such as mean/max/min metrics. Aggregators operate on metrics after any
processors have been applied.
Parameters that can be used with any aggregator plugin:
- **alias**: Name an instance of a plugin.
- **period**: The period on which to flush & clear each aggregator. All
metrics that are sent with timestamps outside of this period will be ignored
by the aggregator.
- **delay**: The delay before each aggregator is flushed. This is to control
how long for aggregators to wait before receiving metrics from input
plugins, in the case that aggregators are flushing and inputs are gathering
on the same interval.
- **grace**: The duration when the metrics will still be aggregated
by the plugin, even though they're outside of the aggregation period. This
is needed in a situation when the agent is expected to receive late metrics
and it's acceptable to roll them up into next aggregation period.
- **drop_original**: If true, the original metric will be dropped by the
aggregator and will not get sent to the output plugins.
- **name_override**: Override the base name of the measurement. (Default is
the name of the input).
- **name_prefix**: Specifies a prefix to attach to the measurement name.
- **name_suffix**: Specifies a suffix to attach to the measurement name.
- **tags**: A map of tags to apply to a specific input's measurements.
The [metric filtering][] parameters can be used to limit what metrics are
handled by the aggregator. Excluded metrics are passed downstream to the next
aggregator.
#### Examples
Collect and emit the min/max of the system load1 metric every 30s, dropping
the originals.
```toml
[[inputs.system]]
fieldpass = ["load1"] # collects system load1 metric.
[[aggregators.minmax]]
period = "30s" # send & clear the aggregate every 30s.
drop_original = true # drop the original metrics.
[[outputs.file]]
files = ["stdout"]
```
Collect and emit the min/max of the swap metrics every 30s, dropping the
originals. The aggregator will not be applied to the system load metrics due
to the `namepass` parameter.
```toml
[[inputs.swap]]
[[inputs.system]]
fieldpass = ["load1"] # collects system load1 metric.
[[aggregators.minmax]]
period = "30s" # send & clear the aggregate every 30s.
drop_original = true # drop the original metrics.
namepass = ["swap"] # only "pass" swap metrics through the aggregator.
[[outputs.file]]
files = ["stdout"]
```
<a id="measurement-filtering"></a>
### Metric Filtering
Metric filtering can be configured per plugin on any input, output, processor,
and aggregator plugin. Filters fall under two categories: Selectors and
Modifiers.
#### Selectors
Selector filters include or exclude entire metrics. When a metric is excluded
from a Input or an Output plugin, the metric is dropped. If a metric is
excluded from a Processor or Aggregator plugin, it is skips the plugin and is
sent onwards to the next stage of processing.
- **namepass**:
An array of glob pattern strings. Only metrics whose measurement name matches
a pattern in this list are emitted.
- **namedrop**:
The inverse of `namepass`. If a match is found the metric is discarded. This
is tested on metrics after they have passed the `namepass` test.
- **tagpass**:
A table mapping tag keys to arrays of glob pattern strings. Only metrics
that contain a tag key in the table and a tag value matching one of its
patterns is emitted.
- **tagdrop**:
The inverse of `tagpass`. If a match is found the metric is discarded. This
is tested on metrics after they have passed the `tagpass` test.
#### Modifiers
Modifier filters remove tags and fields from a metric. If all fields are
removed the metric is removed.
- **fieldpass**:
An array of glob pattern strings. Only fields whose field key matches a
pattern in this list are emitted.
- **fielddrop**:
The inverse of `fieldpass`. Fields with a field key matching one of the
patterns will be discarded from the metric. This is tested on metrics after
they have passed the `fieldpass` test.
- **taginclude**:
An array of glob pattern strings. Only tags with a tag key matching one of
the patterns are emitted. In contrast to `tagpass`, which will pass an entire
metric based on its tag, `taginclude` removes all non matching tags from the
metric. Any tag can be filtered including global tags and the agent `host`
tag.
- **tagexclude**:
The inverse of `taginclude`. Tags with a tag key matching one of the patterns
will be discarded from the metric. Any tag can be filtered including global
tags and the agent `host` tag.
#### Filtering Examples
##### Using tagpass and tagdrop:
```toml
[[inputs.cpu]]
percpu = true
@ -474,22 +231,10 @@ tags and the agent `host` tag.
fstype = [ "ext4", "xfs" ]
# Globs can also be used on the tag values
path = [ "/opt", "/home*" ]
[[inputs.win_perf_counters]]
[[inputs.win_perf_counters.object]]
ObjectName = "Network Interface"
Instances = ["*"]
Counters = [
"Bytes Received/sec",
"Bytes Sent/sec"
]
Measurement = "win_net"
# Don't send metrics where the Windows interface name (instance) begins with isatap or Local
[inputs.win_perf_counters.tagdrop]
instance = ["isatap*", "Local*"]
```
##### Using fieldpass and fielddrop:
#### Input Config: fieldpass and fielddrop
```toml
# Drop all metrics for guest & steal CPU usage
[[inputs.cpu]]
@ -502,7 +247,8 @@ tags and the agent `host` tag.
fieldpass = ["inodes*"]
```
##### Using namepass and namedrop:
#### Input Config: namepass and namedrop
```toml
# Drop all metrics about containers for kubelet
[[inputs.prometheus]]
@ -515,7 +261,8 @@ tags and the agent `host` tag.
namepass = ["rest_client_*"]
```
##### Using taginclude and tagexclude:
#### Input Config: taginclude and tagexclude
```toml
# Only include the "cpu" tag in the measurements for the cpu plugin.
[[inputs.cpu]]
@ -528,7 +275,64 @@ tags and the agent `host` tag.
tagexclude = ["fstype"]
```
##### Metrics can be routed to different outputs using the metric name and tags:
#### Input config: prefix, suffix, and override
This plugin will emit measurements with the name `cpu_total`
```toml
[[inputs.cpu]]
name_suffix = "_total"
percpu = false
totalcpu = true
```
This will emit measurements with the name `foobar`
```toml
[[inputs.cpu]]
name_override = "foobar"
percpu = false
totalcpu = true
```
#### Input config: tags
This plugin will emit measurements with two additional tags: `tag1=foo` and
`tag2=bar`
NOTE: Order matters, the `[inputs.cpu.tags]` table must be at the _end_ of the
plugin definition.
```toml
[[inputs.cpu]]
percpu = false
totalcpu = true
[inputs.cpu.tags]
tag1 = "foo"
tag2 = "bar"
```
#### Multiple inputs of the same type
Additional inputs (or outputs) of the same type can be specified,
just define more instances in the config file. It is highly recommended that
you utilize `name_override`, `name_prefix`, or `name_suffix` config options
to avoid measurement collisions:
```toml
[[inputs.cpu]]
percpu = false
totalcpu = true
[[inputs.cpu]]
percpu = true
totalcpu = false
name_override = "percpu_usage"
fielddrop = ["cpu_time*"]
```
#### Output Configuration Examples:
```toml
[[outputs.influxdb]]
urls = [ "http://localhost:8086" ]
@ -550,43 +354,50 @@ tags and the agent `host` tag.
cpu = ["cpu0"]
```
##### Routing metrics to different outputs based on the input.
#### Aggregator Configuration Examples:
Metrics are tagged with `influxdb_database` in the input, which is then used to
select the output. The tag is removed in the outputs before writing.
This will collect and emit the min/max of the system load1 metric every
30s, dropping the originals.
```toml
[[outputs.influxdb]]
urls = ["http://influxdb.example.com"]
database = "db_default"
[outputs.influxdb.tagdrop]
influxdb_database = ["*"]
[[inputs.system]]
fieldpass = ["load1"] # collects system load1 metric.
[[outputs.influxdb]]
urls = ["http://influxdb.example.com"]
database = "db_other"
tagexclude = ["influxdb_database"]
[outputs.influxdb.tagpass]
influxdb_database = ["other"]
[[aggregators.minmax]]
period = "30s" # send & clear the aggregate every 30s.
drop_original = true # drop the original metrics.
[[inputs.disk]]
[inputs.disk.tags]
influxdb_database = "other"
[[outputs.file]]
files = ["stdout"]
```
### Transport Layer Security (TLS)
This will collect and emit the min/max of the swap metrics every
30s, dropping the originals. The aggregator will not be applied
to the system load metrics due to the `namepass` parameter.
Reference the detailed [TLS][] documentation.
```toml
[[inputs.swap]]
[TOML]: https://github.com/toml-lang/toml#toml
[global tags]: #global-tags
[interval]: #intervals
[agent]: #agent
[plugins]: #plugins
[inputs]: #input-plugins
[outputs]: #output-plugins
[processors]: #processor-plugins
[aggregators]: #aggregator-plugins
[metric filtering]: #metric-filtering
[telegraf.conf]: /etc/telegraf.conf
[TLS]: /docs/TLS.md
[[inputs.system]]
fieldpass = ["load1"] # collects system load1 metric.
[[aggregators.minmax]]
period = "30s" # send & clear the aggregate every 30s.
drop_original = true # drop the original metrics.
namepass = ["swap"] # only "pass" swap metrics through the aggregator.
[[outputs.file]]
files = ["stdout"]
```
#### Processor Configuration Examples:
Print only the metrics with `cpu` as the measurement name, all metrics are
passed to the output:
```toml
[[processors.printer]]
namepass = "cpu"
[[outputs.file]]
files = ["/tmp/metrics.out"]
```

View File

@ -1,24 +1,39 @@
# Input Data Formats
# Telegraf Input Data Formats
Telegraf contains many general purpose plugins that support parsing input data
using a configurable parser into [metrics][]. This allows, for example, the
`kafka_consumer` input plugin to process messages in either InfluxDB Line
Protocol or in JSON format.
Telegraf is able to parse the following input data formats into metrics:
- [InfluxDB Line Protocol](/plugins/parsers/influx)
- [Collectd](/plugins/parsers/collectd)
- [CSV](/plugins/parsers/csv)
- [Dropwizard](/plugins/parsers/dropwizard)
- [Graphite](/plugins/parsers/graphite)
- [Grok](/plugins/parsers/grok)
- [JSON](/plugins/parsers/json)
- [Logfmt](/plugins/parsers/logfmt)
- [Nagios](/plugins/parsers/nagios)
- [Value](/plugins/parsers/value), ie: 45 or "booyah"
- [Wavefront](/plugins/parsers/wavefront)
1. [InfluxDB Line Protocol](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx)
1. [JSON](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#json)
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite)
1. [Value](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#value), ie: 45 or "booyah"
1. [Nagios](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#nagios) (exec input only)
1. [Collectd](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#collectd)
1. [Dropwizard](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#dropwizard)
1. [Grok](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#grok)
Any input plugin containing the `data_format` option can use it to select the
desired parser:
Telegraf metrics, like InfluxDB
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
are a combination of four basic parts:
1. Measurement Name
1. Tags
1. Fields
1. Timestamp
These four parts are easily defined when using InfluxDB line-protocol as a
data format. But there are other data formats that users may want to use which
require more advanced configuration to create usable Telegraf metrics.
Plugins such as `exec` and `kafka_consumer` parse textual data. Up until now,
these plugins were statically configured to parse just a single
data format. `exec` mostly only supported parsing JSON, and `kafka_consumer` only
supported data in InfluxDB line-protocol.
But now we are normalizing the parsing of various data formats across all
plugins that can support it. You will be able to identify a plugin that supports
different data formats by the presence of a `data_format` config option, for
example, in the exec plugin:
```toml
[[inputs.exec]]
@ -33,6 +48,642 @@ desired parser:
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "json"
## Additional configuration options go here
```
[metrics]: /docs/METRICS.md
Each data_format has an additional set of configuration options available, which
I'll go over below.
# Influx:
There are no additional configuration options for InfluxDB line-protocol. The
metrics are parsed directly into Telegraf metrics.
#### Influx Configuration:
```toml
[[inputs.exec]]
## Commands array
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
## measurement name suffix (for separating different commands)
name_suffix = "_mycollector"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
```
# JSON:
The JSON data format flattens JSON into metric _fields_.
NOTE: Only numerical values are converted to fields, and they are converted
into a float. strings are ignored unless specified as a tag_key (see below).
So for example, this JSON:
```json
{
"a": 5,
"b": {
"c": 6
},
"ignored": "I'm a string"
}
```
Would get translated into _fields_ of a measurement:
```
myjsonmetric a=5,b_c=6
```
The _measurement_ _name_ is usually the name of the plugin,
but can be overridden using the `name_override` config option.
#### JSON Configuration:
The JSON data format supports specifying "tag keys". If specified, keys
will be searched for in the root-level of the JSON blob. If the key(s) exist,
they will be applied as tags to the Telegraf metrics.
For example, if you had this configuration:
```toml
[[inputs.exec]]
## Commands array
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
## measurement name suffix (for separating different commands)
name_suffix = "_mycollector"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "json"
## List of tag names to extract from top-level of JSON server response
tag_keys = [
"my_tag_1",
"my_tag_2"
]
```
with this JSON output from a command:
```json
{
"a": 5,
"b": {
"c": 6
},
"my_tag_1": "foo"
}
```
Your Telegraf metrics would get tagged with "my_tag_1"
```
exec_mycollector,my_tag_1=foo a=5,b_c=6
```
If the JSON data is an array, then each element of the array is parsed with the configured settings.
Each resulting metric will be output with the same timestamp.
For example, if the following configuration:
```toml
[[inputs.exec]]
## Commands array
commands = ["/usr/bin/mycollector --foo=bar"]
## measurement name suffix (for separating different commands)
name_suffix = "_mycollector"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "json"
## List of tag names to extract from top-level of JSON server response
tag_keys = [
"my_tag_1",
"my_tag_2"
]
```
with this JSON output from a command:
```json
[
{
"a": 5,
"b": {
"c": 6
},
"my_tag_1": "foo",
"my_tag_2": "baz"
},
{
"a": 7,
"b": {
"c": 8
},
"my_tag_1": "bar",
"my_tag_2": "baz"
}
]
```
Your Telegraf metrics would get tagged with "my_tag_1" and "my_tag_2"
```
exec_mycollector,my_tag_1=foo,my_tag_2=baz a=5,b_c=6
exec_mycollector,my_tag_1=bar,my_tag_2=baz a=7,b_c=8
```
# Value:
The "value" data format translates single values into Telegraf metrics. This
is done by assigning a measurement name and setting a single field ("value")
as the parsed metric.
#### Value Configuration:
You **must** tell Telegraf what type of metric to collect by using the
`data_type` configuration option. Available options are:
1. integer
2. float or long
3. string
4. boolean
**Note:** It is also recommended that you set `name_override` to a measurement
name that makes sense for your metric, otherwise it will just be set to the
name of the plugin.
```toml
[[inputs.exec]]
## Commands array
commands = ["cat /proc/sys/kernel/random/entropy_avail"]
## override the default metric name of "exec"
name_override = "entropy_available"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "value"
data_type = "integer" # required
```
# Graphite:
The Graphite data format translates graphite _dot_ buckets directly into
telegraf measurement names, with a single value field, and without any tags.
By default, the separator is left as ".", but this can be changed using the
"separator" argument. For more advanced options,
Telegraf supports specifying "templates" to translate
graphite buckets into Telegraf metrics.
Templates are of the form:
```
"host.mytag.mytag.measurement.measurement.field*"
```
Where the following keywords exist:
1. `measurement`: specifies that this section of the graphite bucket corresponds
to the measurement name. This can be specified multiple times.
2. `field`: specifies that this section of the graphite bucket corresponds
to the field name. This can be specified multiple times.
3. `measurement*`: specifies that all remaining elements of the graphite bucket
correspond to the measurement name.
4. `field*`: specifies that all remaining elements of the graphite bucket
correspond to the field name.
Any part of the template that is not a keyword is treated as a tag key. This
can also be specified multiple times.
NOTE: `field*` cannot be used in conjunction with `measurement*`!
#### Measurement & Tag Templates:
The most basic template is to specify a single transformation to apply to all
incoming metrics. So the following template:
```toml
templates = [
"region.region.measurement*"
]
```
would result in the following Graphite -> Telegraf transformation.
```
us.west.cpu.load 100
=> cpu.load,region=us.west value=100
```
Multiple templates can also be specified, but these should be differentiated
using _filters_ (see below for more details)
```toml
templates = [
"*.*.* region.region.measurement", # <- all 3-part measurements will match this one.
"*.*.*.* region.region.host.measurement", # <- all 4-part measurements will match this one.
]
```
#### Field Templates:
The field keyword tells Telegraf to give the metric that field name.
So the following template:
```toml
separator = "_"
templates = [
"measurement.measurement.field.field.region"
]
```
would result in the following Graphite -> Telegraf transformation.
```
cpu.usage.idle.percent.eu-east 100
=> cpu_usage,region=eu-east idle_percent=100
```
The field key can also be derived from all remaining elements of the graphite
bucket by specifying `field*`:
```toml
separator = "_"
templates = [
"measurement.measurement.region.field*"
]
```
which would result in the following Graphite -> Telegraf transformation.
```
cpu.usage.eu-east.idle.percentage 100
=> cpu_usage,region=eu-east idle_percentage=100
```
#### Filter Templates:
Users can also filter the template(s) to use based on the name of the bucket,
using glob matching, like so:
```toml
templates = [
"cpu.* measurement.measurement.region",
"mem.* measurement.measurement.host"
]
```
which would result in the following transformation:
```
cpu.load.eu-east 100
=> cpu_load,region=eu-east value=100
mem.cached.localhost 256
=> mem_cached,host=localhost value=256
```
#### Adding Tags:
Additional tags can be added to a metric that don't exist on the received metric.
You can add additional tags by specifying them after the pattern.
Tags have the same format as the line protocol.
Multiple tags are separated by commas.
```toml
templates = [
"measurement.measurement.field.region datacenter=1a"
]
```
would result in the following Graphite -> Telegraf transformation.
```
cpu.usage.idle.eu-east 100
=> cpu_usage,region=eu-east,datacenter=1a idle=100
```
There are many more options available,
[More details can be found here](https://github.com/influxdata/influxdb/tree/master/services/graphite#templates)
#### Graphite Configuration:
```toml
[[inputs.exec]]
## Commands array
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
## measurement name suffix (for separating different commands)
name_suffix = "_mycollector"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "graphite"
## This string will be used to join the matched values.
separator = "_"
## Each template line requires a template pattern. It can have an optional
## filter before the template and separated by spaces. It can also have optional extra
## tags following the template. Multiple tags should be separated by commas and no spaces
## similar to the line protocol format. There can be only one default template.
## Templates support below format:
## 1. filter + template
## 2. filter + template + extra tag(s)
## 3. filter + template with field key
## 4. default template
templates = [
"*.app env.service.resource.measurement",
"stats.* .host.measurement* region=eu-east,agent=sensu",
"stats2.* .host.measurement.field",
"measurement*"
]
```
# Nagios:
There are no additional configuration options for Nagios line-protocol. The
metrics are parsed directly into Telegraf metrics.
Note: Nagios Input Data Formats is only supported in `exec` input plugin.
#### Nagios Configuration:
```toml
[[inputs.exec]]
## Commands array
commands = ["/usr/lib/nagios/plugins/check_load -w 5,6,7 -c 7,8,9"]
## measurement name suffix (for separating different commands)
name_suffix = "_mycollector"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "nagios"
```
# Collectd:
The collectd format parses the collectd binary network protocol. Tags are
created for host, instance, type, and type instance. All collectd values are
added as float64 fields.
For more information about the binary network protocol see
[here](https://collectd.org/wiki/index.php/Binary_protocol).
You can control the cryptographic settings with parser options. Create an
authentication file and set `collectd_auth_file` to the path of the file, then
set the desired security level in `collectd_security_level`.
Additional information including client setup can be found
[here](https://collectd.org/wiki/index.php/Networking_introduction#Cryptographic_setup).
You can also change the path to the typesdb or add additional typesdb using
`collectd_typesdb`.
#### Collectd Configuration:
```toml
[[inputs.socket_listener]]
service_address = "udp://127.0.0.1:25826"
name_prefix = "collectd_"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "collectd"
## Authentication file for cryptographic security levels
collectd_auth_file = "/etc/collectd/auth_file"
## One of none (default), sign, or encrypt
collectd_security_level = "encrypt"
## Path of to TypesDB specifications
collectd_typesdb = ["/usr/share/collectd/types.db"]
```
# Dropwizard:
The dropwizard format can parse the JSON representation of a single dropwizard metric registry. By default, tags are parsed from metric names as if they were actual influxdb line protocol keys (`measurement<,tag_set>`) which can be overriden by defining custom [measurement & tag templates](./DATA_FORMATS_INPUT.md#measurement--tag-templates). All field value types are supported, `string`, `number` and `boolean`.
A typical JSON of a dropwizard metric registry:
```json
{
"version": "3.0.0",
"counters" : {
"measurement,tag1=green" : {
"count" : 1
}
},
"meters" : {
"measurement" : {
"count" : 1,
"m15_rate" : 1.0,
"m1_rate" : 1.0,
"m5_rate" : 1.0,
"mean_rate" : 1.0,
"units" : "events/second"
}
},
"gauges" : {
"measurement" : {
"value" : 1
}
},
"histograms" : {
"measurement" : {
"count" : 1,
"max" : 1.0,
"mean" : 1.0,
"min" : 1.0,
"p50" : 1.0,
"p75" : 1.0,
"p95" : 1.0,
"p98" : 1.0,
"p99" : 1.0,
"p999" : 1.0,
"stddev" : 1.0
}
},
"timers" : {
"measurement" : {
"count" : 1,
"max" : 1.0,
"mean" : 1.0,
"min" : 1.0,
"p50" : 1.0,
"p75" : 1.0,
"p95" : 1.0,
"p98" : 1.0,
"p99" : 1.0,
"p999" : 1.0,
"stddev" : 1.0,
"m15_rate" : 1.0,
"m1_rate" : 1.0,
"m5_rate" : 1.0,
"mean_rate" : 1.0,
"duration_units" : "seconds",
"rate_units" : "calls/second"
}
}
}
```
Would get translated into 4 different measurements:
```
measurement,metric_type=counter,tag1=green count=1
measurement,metric_type=meter count=1,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0
measurement,metric_type=gauge value=1
measurement,metric_type=histogram count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0
measurement,metric_type=timer count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0,stddev=1.0,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0
```
You may also parse a dropwizard registry from any JSON document which contains a dropwizard registry in some inner field.
Eg. to parse the following JSON document:
```json
{
"time" : "2017-02-22T14:33:03.662+02:00",
"tags" : {
"tag1" : "green",
"tag2" : "yellow"
},
"metrics" : {
"counters" : {
"measurement" : {
"count" : 1
}
},
"meters" : {},
"gauges" : {},
"histograms" : {},
"timers" : {}
}
}
```
and translate it into:
```
measurement,metric_type=counter,tag1=green,tag2=yellow count=1 1487766783662000000
```
you simply need to use the following additional configuration properties:
```toml
dropwizard_metric_registry_path = "metrics"
dropwizard_time_path = "time"
dropwizard_time_format = "2006-01-02T15:04:05Z07:00"
dropwizard_tags_path = "tags"
## tag paths per tag are supported too, eg.
#[inputs.yourinput.dropwizard_tag_paths]
# tag1 = "tags.tag1"
# tag2 = "tags.tag2"
```
For more information about the dropwizard json format see
[here](http://metrics.dropwizard.io/3.1.0/manual/json/).
#### Dropwizard Configuration:
```toml
[[inputs.exec]]
## Commands array
commands = ["curl http://localhost:8080/sys/metrics"]
timeout = "5s"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "dropwizard"
## Used by the templating engine to join matched values when cardinality is > 1
separator = "_"
## Each template line requires a template pattern. It can have an optional
## filter before the template and separated by spaces. It can also have optional extra
## tags following the template. Multiple tags should be separated by commas and no spaces
## similar to the line protocol format. There can be only one default template.
## Templates support below format:
## 1. filter + template
## 2. filter + template + extra tag(s)
## 3. filter + template with field key
## 4. default template
## By providing an empty template array, templating is disabled and measurements are parsed as influxdb line protocol keys (measurement<,tag_set>)
templates = []
## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax)
## to locate the metric registry within the JSON document
# dropwizard_metric_registry_path = "metrics"
## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax)
## to locate the default time of the measurements within the JSON document
# dropwizard_time_path = "time"
# dropwizard_time_format = "2006-01-02T15:04:05Z07:00"
## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax)
## to locate the tags map within the JSON document
# dropwizard_tags_path = "tags"
## You may even use tag paths per tag
# [inputs.exec.dropwizard_tag_paths]
# tag1 = "tags.tag1"
# tag2 = "tags.tag2"
```
#### Grok
Parse logstash-style "grok" patterns:
```toml
[inputs.reader]
## This is a list of patterns to check the given log file(s) for.
## Note that adding patterns here increases processing time. The most
## efficient configuration is to have one pattern per logparser.
## Other common built-in patterns are:
## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
patterns = ["%{COMBINED_LOG_FORMAT}"]
## Name of the outputted measurement name.
name_override = "apache_access_log"
## Full path(s) to custom pattern files.
custom_pattern_files = []
## Custom patterns can also be defined here. Put one pattern per line.
custom_patterns = '''
## Timezone allows you to provide an override for timestamps that
## don't already include an offset
## e.g. 04/06/2016 12:41:45 data one two 5.43µs
##
## Default: "" which renders UTC
## Options are as follows:
## 1. Local -- interpret based on machine localtime
## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
## 3. UTC -- or blank/unspecified, will return timestamp in UTC
timezone = "Canada/Eastern"
```

View File

@ -4,17 +4,12 @@ In addition to output specific data formats, Telegraf supports a set of
standard data formats that may be selected from when configuring many output
plugins.
1. [InfluxDB Line Protocol](/plugins/serializers/influx)
1. [Carbon2](/plugins/serializers/carbon2)
1. [Graphite](/plugins/serializers/graphite)
1. [JSON](/plugins/serializers/json)
1. [Prometheus](/plugins/serializers/prometheus)
1. [SplunkMetric](/plugins/serializers/splunkmetric)
1. [Wavefront](/plugins/serializers/wavefront)
1. [InfluxDB Line Protocol](#influx)
1. [JSON](#json)
1. [Graphite](#graphite)
You will be able to identify the plugins with support by the presence of a
`data_format` config option, for example, in the `file` output plugin:
```toml
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file.
@ -26,3 +21,191 @@ You will be able to identify the plugins with support by the presence of a
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
```
## Influx
The `influx` data format outputs metrics using
[InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/latest/write_protocols/line_protocol_tutorial/).
This is the recommended format unless another format is required for
interoperability.
### Influx Configuration
```toml
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"]
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
## Maximum line length in bytes. Useful only for debugging.
# influx_max_line_bytes = 0
## When true, fields will be output in ascending lexical order. Enabling
## this option will result in decreased performance and is only recommended
## when you need predictable ordering while debugging.
# influx_sort_fields = false
## When true, Telegraf will output unsigned integers as unsigned values,
## i.e.: `42u`. You will need a version of InfluxDB supporting unsigned
## integer values. Enabling this option will result in field type errors if
## existing data has been written.
# influx_uint_support = false
```
## Graphite
The Graphite data format is translated from Telegraf Metrics using either the
template pattern or tag support method. You can select between the two
methods using the [`graphite_tag_support`](#graphite-tag-support) option. When set, the tag support
method is used, otherwise the [`template` pattern](#template-pattern) is used.
#### Template Pattern
The `template` option describes how Telegraf traslates metrics into _dot_
buckets. The default template is:
```
template = "host.tags.measurement.field"
```
In the above template, we have four parts:
1. _host_ is a tag key. This can be any tag key that is in the Telegraf
metric(s). If the key doesn't exist, it will be ignored. If it does exist, the
tag value will be filled in.
1. _tags_ is a special keyword that outputs all remaining tag values, separated
by dots and in alphabetical order (by tag key). These will be filled after all
tag keys are filled.
1. _measurement_ is a special keyword that outputs the measurement name.
1. _field_ is a special keyword that outputs the field name.
**Example Conversion**:
```
cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758
=>
tars.cpu-total.us-east-1.cpu.usage_user 0.89 1455320690
tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
```
Fields with string values will be skipped. Boolean fields will be converted
to 1 (true) or 0 (false).
#### Graphite Tag Support
When the `graphite_tag_support` option is enabled, the template pattern is not
used. Instead, tags are encoded using
[Graphite tag support](http://graphite.readthedocs.io/en/latest/tags.html)
added in Graphite 1.1. The `metric_path` is a combination of the optional
`prefix` option, measurement name, and field name.
The tag `name` is reserved by Graphite, any conflicting tags and will be encoded as `_name`.
**Example Conversion**:
```
cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758
=>
cpu.usage_user;cpu=cpu-total;dc=us-east-1;host=tars 0.89 1455320690
cpu.usage_idle;cpu=cpu-total;dc=us-east-1;host=tars 98.09 1455320690
```
### Graphite Configuration
```toml
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"]
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "graphite"
## Prefix added to each graphite bucket
prefix = "telegraf"
## Graphite template pattern
template = "host.tags.measurement.field"
## Support Graphite tags, recommended to enable when using Graphite 1.1 or later.
# graphite_tag_support = false
```
## JSON
The JSON output data format output for a single metric is in the
form:
```json
{
"fields": {
"field_1": 30,
"field_2": 4,
"field_N": 59,
"n_images": 660
},
"name": "docker",
"tags": {
"host": "raynor"
},
"timestamp": 1458229140
}
```
When an output plugin needs to emit multiple metrics at one time, it may use
the batch format. The use of batch format is determined by the plugin,
reference the documentation for the specific plugin.
```json
{
"metrics": [
{
"fields": {
"field_1": 30,
"field_2": 4,
"field_N": 59,
"n_images": 660
},
"name": "docker",
"tags": {
"host": "raynor"
},
"timestamp": 1458229140
},
{
"fields": {
"field_1": 30,
"field_2": 4,
"field_N": 59,
"n_images": 660
},
"name": "docker",
"tags": {
"host": "raynor"
},
"timestamp": 1458229140
}
]
}
```
### JSON Configuration
```toml
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"]
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "json"
## The resolution to use for the metric timestamp. Must be a duration string
## such as "1ns", "1us", "1ms", "10ms", "1s". Durations are truncated to
## the power of 10 less than the specified units.
json_timestamp_units = "1s"
```

View File

@ -10,13 +10,10 @@ docker run --name telegraf
-v /etc:/hostfs/etc:ro
-v /proc:/hostfs/proc:ro
-v /sys:/hostfs/sys:ro
-v /var:/hostfs/var:ro
-v /run:/hostfs/run:ro
-v /var/run/utmp:/var/run/utmp:ro
-e HOST_ETC=/hostfs/etc
-e HOST_PROC=/hostfs/proc
-e HOST_SYS=/hostfs/sys
-e HOST_VAR=/hostfs/var
-e HOST_RUN=/hostfs/run
-e HOST_MOUNT_PREFIX=/hostfs
telegraf
```
@ -43,33 +40,6 @@ If running as a service add the environment variable to `/etc/default/telegraf`:
GODEBUG=netdns=cgo
```
### Q: How can I manage series cardinality?
High [series cardinality][], when not properly managed, can cause high load on
your database. Telegraf attempts to avoid creating series with high
cardinality, but some monitoring workloads such as tracking containers are are
inherently high cardinality. These workloads can still be monitored, but care
must be taken to manage cardinality growth.
You can use the following techniques to avoid cardinality issues:
- Use [metric filtering][] options to exclude unneeded measurements and tags.
- Write to a database with an appropriate [retention policy][].
- Limit series cardinality in your database using the
[max-series-per-database][] and [max-values-per-tag][] settings.
- Consider using the [Time Series Index][tsi].
- Monitor your databases using the [show cardinality][] commands.
- Consult the [InfluxDB documentation][influx docs] for the most up-to-date techniques.
[series cardinality]: https://docs.influxdata.com/influxdb/v1.7/concepts/glossary/#series-cardinality
[metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering
[retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/
[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000
[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000
[tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/
[show cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality
[influx docs]: https://docs.influxdata.com/influxdb/latest/
### Q: When will the next version be released?
The latest release date estimate can be viewed on the

View File

@ -1,149 +0,0 @@
### Input Plugins
This section is for developers who want to create new collection inputs.
Telegraf is entirely plugin driven. This interface allows for operators to
pick and chose what is gathered and makes it easy for developers
to create new ways of generating metrics.
Plugin authorship is kept as simple as possible to promote people to develop
and submit new inputs.
### Input Plugin Guidelines
- A plugin must conform to the [telegraf.Input][] interface.
- Input Plugins should call `inputs.Add` in their `init` function to register
themselves. See below for a quick example.
- Input Plugins must be added to the
`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file.
- The `SampleConfig` function should return valid toml that describes how the
plugin can be configured. This is included in `telegraf config`. Please
consult the [SampleConfig][] page for the latest style
guidelines.
- The `Description` function should say in one line what this plugin does.
- Follow the recommended [CodeStyle][].
Let's say you've written a plugin that emits metrics about processes on the
current host.
### Input Plugin Example
```go
package simple
// simple.go
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
type Simple struct {
Ok bool `toml:"ok"`
}
func (s *Simple) Description() string {
return "a demo plugin"
}
func (s *Simple) SampleConfig() string {
return `
## Indicate if everything is fine
ok = true
`
}
func (s *Simple) Init() error {
return nil
}
func (s *Simple) Gather(acc telegraf.Accumulator) error {
if s.Ok {
acc.AddFields("state", map[string]interface{}{"value": "pretty good"}, nil)
} else {
acc.AddFields("state", map[string]interface{}{"value": "not great"}, nil)
}
return nil
}
func init() {
inputs.Add("simple", func() telegraf.Input { return &Simple{} })
}
```
### Development
* Run `make static` followed by `make plugin-[pluginName]` to spin up a docker
dev environment using docker-compose.
* ***[Optional]*** When developing a plugin, add a `dev` directory with a
`docker-compose.yml` and `telegraf.conf` as well as any other supporting
files, where sensible.
### Typed Metrics
In addition the the `AddFields` function, the accumulator also supports
functions to add typed metrics: `AddGauge`, `AddCounter`, etc. Metric types
are ignored by the InfluxDB output, but can be used for other outputs, such as
[prometheus][prom metric types].
### Data Formats
Some input plugins, such as the [exec][] plugin, can accept any supported
[input data formats][].
In order to enable this, you must specify a `SetParser(parser parsers.Parser)`
function on the plugin object (see the exec plugin for an example), as well as
defining `parser` as a field of the object.
You can then utilize the parser internally in your plugin, parsing data as you
see fit. Telegraf's configuration layer will take care of instantiating and
creating the `Parser` object.
Add the following to the `SampleConfig()`:
```toml
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
```
### Service Input Plugins
This section is for developers who want to create new "service" collection
inputs. A service plugin differs from a regular plugin in that it operates a
background service while Telegraf is running. One example would be the
`statsd` plugin, which operates a statsd server.
Service Input Plugins are substantially more complicated than a regular
plugin, as they will require threads and locks to verify data integrity.
Service Input Plugins should be avoided unless there is no way to create their
behavior with a regular plugin.
To create a Service Input implement the [telegraf.ServiceInput][] interface.
### Metric Tracking
Metric Tracking provides a system to be notified when metrics have been
successfully written to their outputs or otherwise discarded. This allows
inputs to be created that function as reliable queue consumers.
To get started with metric tracking begin by calling `WithTracking` on the
[telegraf.Accumulator][]. Add metrics using the `AddTrackingMetricGroup`
function on the returned [telegraf.TrackingAccumulator][] and store the
`TrackingID`. The `Delivered()` channel will return a type with information
about the final delivery status of the metric group.
Check the [amqp_consumer][] for an example implementation.
[exec]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec
[amqp_consumer]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/amqp_consumer
[prom metric types]: https://prometheus.io/docs/concepts/metric_types/
[input data formats]: https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig
[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle
[telegraf.Input]: https://godoc.org/github.com/influxdata/telegraf#Input
[telegraf.ServiceInput]: https://godoc.org/github.com/influxdata/telegraf#ServiceInput
[telegraf.Accumulator]: https://godoc.org/github.com/influxdata/telegraf#Accumulator
[telegraf.TrackingAccumulator]: https://godoc.org/github.com/influxdata/telegraf#Accumulator

View File

@ -3,170 +3,110 @@
When distributed in a binary form, Telegraf may contain portions of the
following works:
- cloud.google.com/go [Apache License 2.0](https://github.com/googleapis/google-cloud-go/blob/master/LICENSE)
- code.cloudfoundry.org/clock [Apache License 2.0](https://github.com/cloudfoundry/clock/blob/master/LICENSE)
- collectd.org [MIT License](https://git.octo.it/?p=collectd.git;a=blob;f=COPYING;hb=HEAD)
- github.com/Azure/azure-amqp-common-go [MIT License](https://github.com/Azure/azure-amqp-common-go/blob/master/LICENSE)
- github.com/Azure/azure-event-hubs-go [MIT License](https://github.com/Azure/azure-event-hubs-go/blob/master/LICENSE)
- github.com/Azure/azure-pipeline-go [MIT License](https://github.com/Azure/azure-pipeline-go/blob/master/LICENSE)
- github.com/Azure/azure-sdk-for-go [Apache License 2.0](https://github.com/Azure/azure-sdk-for-go/blob/master/LICENSE)
- github.com/Azure/azure-storage-queue-go [MIT License](https://github.com/Azure/azure-storage-queue-go/blob/master/LICENSE)
- github.com/Azure/go-amqp [MIT License](https://github.com/Azure/go-amqp/blob/master/LICENSE)
- github.com/Azure/go-autorest [Apache License 2.0](https://github.com/Azure/go-autorest/blob/master/LICENSE)
- github.com/Mellanox/rdmamap [Apache License 2.0](https://github.com/Mellanox/rdmamap/blob/master/LICENSE)
- github.com/Microsoft/ApplicationInsights-Go [MIT License](https://github.com/Microsoft/ApplicationInsights-Go/blob/master/LICENSE)
- github.com/Microsoft/go-winio [MIT License](https://github.com/Microsoft/go-winio/blob/master/LICENSE)
- github.com/Shopify/sarama [MIT License](https://github.com/Shopify/sarama/blob/master/LICENSE)
- github.com/StackExchange/wmi [MIT License](https://github.com/StackExchange/wmi/blob/master/LICENSE)
- github.com/aerospike/aerospike-client-go [Apache License 2.0](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE)
- github.com/alecthomas/units [MIT License](https://github.com/alecthomas/units/blob/master/COPYING)
- github.com/amir/raidman [The Unlicense](https://github.com/amir/raidman/blob/master/UNLICENSE)
- github.com/apache/thrift [Apache License 2.0](https://github.com/apache/thrift/blob/master/LICENSE)
- github.com/aristanetworks/glog [Apache License 2.0](https://github.com/aristanetworks/glog/blob/master/LICENSE)
- github.com/aristanetworks/goarista [Apache License 2.0](https://github.com/aristanetworks/goarista/blob/master/COPYING)
- github.com/aws/aws-sdk-go [Apache License 2.0](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt)
- github.com/benbjohnson/clock [MIT License](https://github.com/benbjohnson/clock/blob/master/LICENSE)
- github.com/beorn7/perks [MIT License](https://github.com/beorn7/perks/blob/master/LICENSE)
- github.com/caio/go-tdigest [MIT License](https://github.com/caio/go-tdigest/blob/master/LICENSE)
- github.com/cenkalti/backoff [MIT License](https://github.com/cenkalti/backoff/blob/master/LICENSE)
- github.com/cespare/xxhash [MIT License](https://github.com/cespare/xxhash/blob/master/LICENSE.txt)
- github.com/cisco-ie/nx-telemetry-proto [Apache License 2.0](https://github.com/cisco-ie/nx-telemetry-proto/blob/master/LICENSE)
- github.com/couchbase/go-couchbase [MIT License](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
- github.com/couchbase/gomemcached [MIT License](https://github.com/couchbase/gomemcached/blob/master/LICENSE)
- github.com/couchbase/goutils [COUCHBASE INC. COMMUNITY EDITION LICENSE](https://github.com/couchbase/goutils/blob/master/LICENSE.md)
- github.com/davecgh/go-spew [ISC License](https://github.com/davecgh/go-spew/blob/master/LICENSE)
- github.com/denisenkom/go-mssqldb [BSD 3-Clause "New" or "Revised" License](https://github.com/denisenkom/go-mssqldb/blob/master/LICENSE.txt)
- github.com/devigned/tab [MIT License](https://github.com/devigned/tab/blob/master/LICENSE)
- github.com/dgrijalva/jwt-go [MIT License](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE)
- github.com/dimchansky/utfbom [Apache License 2.0](https://github.com/dimchansky/utfbom/blob/master/LICENSE)
- github.com/docker/distribution [Apache License 2.0](https://github.com/docker/distribution/blob/master/LICENSE)
- github.com/docker/docker [Apache License 2.0](https://github.com/docker/docker/blob/master/LICENSE)
- github.com/docker/go-connections [Apache License 2.0](https://github.com/docker/go-connections/blob/master/LICENSE)
- github.com/docker/go-units [Apache License 2.0](https://github.com/docker/go-units/blob/master/LICENSE)
- github.com/docker/libnetwork [Apache License 2.0](https://github.com/docker/libnetwork/blob/master/LICENSE)
- github.com/eapache/go-resiliency [MIT License](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
- github.com/eapache/go-xerial-snappy [MIT License](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE)
- github.com/eapache/queue [MIT License](https://github.com/eapache/queue/blob/master/LICENSE)
- github.com/eclipse/paho.mqtt.golang [Eclipse Public License - v 1.0](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE)
- github.com/ericchiang/k8s [Apache License 2.0](https://github.com/ericchiang/k8s/blob/master/LICENSE)
- github.com/ghodss/yaml [MIT License](https://github.com/ghodss/yaml/blob/master/LICENSE)
- github.com/glinton/ping [MIT License](https://github.com/glinton/ping/blob/master/LICENSE)
- github.com/go-logfmt/logfmt [MIT License](https://github.com/go-logfmt/logfmt/blob/master/LICENSE)
- github.com/go-ole/go-ole [MIT License](https://github.com/go-ole/go-ole/blob/master/LICENSE)
- github.com/go-redis/redis [BSD 2-Clause "Simplified" License](https://github.com/go-redis/redis/blob/master/LICENSE)
- github.com/go-sql-driver/mysql [Mozilla Public License 2.0](https://github.com/go-sql-driver/mysql/blob/master/LICENSE)
- github.com/goburrow/modbus [BSD 3-Clause "New" or "Revised" License](https://github.com/goburrow/modbus/blob/master/LICENSE)
- github.com/goburrow/serial [MIT License](https://github.com/goburrow/serial/LICENSE)
- github.com/gobwas/glob [MIT License](https://github.com/gobwas/glob/blob/master/LICENSE)
- github.com/gofrs/uuid [MIT License](https://github.com/gofrs/uuid/blob/master/LICENSE)
- github.com/gogo/protobuf [BSD 3-Clause Clear License](https://github.com/gogo/protobuf/blob/master/LICENSE)
- github.com/golang/geo [Apache License 2.0](https://github.com/golang/geo/blob/master/LICENSE)
- github.com/golang/groupcache [Apache License 2.0](https://github.com/golang/groupcache/blob/master/LICENSE)
- github.com/golang/mock [Apache License 2.0](https://github.com/golang/mock/blob/master/LICENSE)
- github.com/golang/protobuf [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/protobuf/blob/master/LICENSE)
- github.com/golang/snappy [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/snappy/blob/master/LICENSE)
- github.com/google/go-cmp [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-cmp/blob/master/LICENSE)
- github.com/google/go-github [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-github/blob/master/LICENSE)
- github.com/google/go-querystring [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-querystring/blob/master/LICENSE)
- github.com/googleapis/gax-go [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/gax-go/blob/master/LICENSE)
- github.com/gorilla/mux [BSD 3-Clause "New" or "Revised" License](https://github.com/gorilla/mux/blob/master/LICENSE)
- github.com/hailocab/go-hostpool [MIT License](https://github.com/hailocab/go-hostpool/blob/master/LICENSE)
- github.com/harlow/kinesis-consumer [MIT License](https://github.com/harlow/kinesis-consumer/blob/master/MIT-LICENSE)
- github.com/hashicorp/consul [Mozilla Public License 2.0](https://github.com/hashicorp/consul/blob/master/LICENSE)
- github.com/hashicorp/go-cleanhttp [Mozilla Public License 2.0](https://github.com/hashicorp/go-cleanhttp/blob/master/LICENSE)
- github.com/hashicorp/go-rootcerts [Mozilla Public License 2.0](https://github.com/hashicorp/go-rootcerts/blob/master/LICENSE)
- github.com/hashicorp/go-uuid [Mozilla Public License 2.0](https://github.com/hashicorp/go-uuid/LICENSE)
- github.com/hashicorp/serf [Mozilla Public License 2.0](https://github.com/hashicorp/serf/blob/master/LICENSE)
- github.com/influxdata/go-syslog [MIT License](https://github.com/influxdata/go-syslog/blob/develop/LICENSE)
- github.com/influxdata/tail [MIT License](https://github.com/influxdata/tail/blob/master/LICENSE.txt)
- github.com/influxdata/toml [MIT License](https://github.com/influxdata/toml/blob/master/LICENSE)
- github.com/influxdata/wlog [MIT License](https://github.com/influxdata/wlog/blob/master/LICENSE)
- github.com/jackc/pgx [MIT License](https://github.com/jackc/pgx/blob/master/LICENSE)
- github.com/jcmturner/gofork [BSD 3-Clause "New" or "Revised" License](https://github.com/jcmturner/gofork/blob/master/LICENSE)
- github.com/jmespath/go-jmespath [Apache License 2.0](https://github.com/jmespath/go-jmespath/blob/master/LICENSE)
- github.com/jpillora/backoff [MIT License](https://github.com/jpillora/backoff/blob/master/LICENSE)
- github.com/kardianos/service [zlib License](https://github.com/kardianos/service/blob/master/LICENSE)
- github.com/karrick/godirwalk [BSD 2-Clause "Simplified" License](https://github.com/karrick/godirwalk/blob/master/LICENSE)
- github.com/kballard/go-shellquote [MIT License](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
- github.com/klauspost/compress [BSD 3-Clause Clear License](https://github.com/klauspost/compress/blob/master/LICENSE)
- github.com/konsorten/go-windows-terminal-sequences [MIT License](https://github.com/konsorten/go-windows-terminal-sequences/blob/master/LICENSE)
- github.com/kubernetes/apimachinery [Apache License 2.0](https://github.com/kubernetes/apimachinery/blob/master/LICENSE)
- github.com/leodido/ragel-machinery [MIT License](https://github.com/leodido/ragel-machinery/blob/develop/LICENSE)
- github.com/mailru/easyjson [MIT License](https://github.com/mailru/easyjson/blob/master/LICENSE)
- github.com/matttproud/golang_protobuf_extensions [Apache License 2.0](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
- github.com/mdlayher/apcupsd [MIT License](https://github.com/mdlayher/apcupsd/blob/master/LICENSE.md)
- github.com/mdlayher/genetlink [MIT License](https://github.com/mdlayher/genetlink/blob/master/LICENSE.md)
- github.com/mdlayher/netlink [MIT License](https://github.com/mdlayher/netlink/blob/master/LICENSE.md)
- github.com/miekg/dns [BSD 3-Clause Clear License](https://github.com/miekg/dns/blob/master/LICENSE)
- github.com/mitchellh/go-homedir [MIT License](https://github.com/mitchellh/go-homedir/blob/master/LICENSE)
- github.com/mitchellh/mapstructure [MIT License](https://github.com/mitchellh/mapstructure/blob/master/LICENSE)
- github.com/multiplay/go-ts3 [BSD 2-Clause "Simplified" License](https://github.com/multiplay/go-ts3/blob/master/LICENSE)
- github.com/naoina/go-stringutil [MIT License](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
- github.com/nats-io/jwt [Apache License 2.0](https://github.com/nats-io/jwt/blob/master/LICENSE)
- github.com/nats-io/nats-server [Apache License 2.0](https://github.com/nats-io/nats-server/blob/master/LICENSE)
- github.com/nats-io/nats.go [Apache License 2.0](https://github.com/nats-io/nats.go/blob/master/LICENSE)
- github.com/nats-io/nkeys [Apache License 2.0](https://github.com/nats-io/nkeys/blob/master/LICENSE)
- github.com/nats-io/nuid [Apache License 2.0](https://github.com/nats-io/nuid/blob/master/LICENSE)
- github.com/newrelic/newrelic-telemetry-sdk-go [Apache License 2.0](https://github.com/newrelic/newrelic-telemetry-sdk-go/blob/master/LICENSE.md)
- github.com/nsqio/go-nsq [MIT License](https://github.com/nsqio/go-nsq/blob/master/LICENSE)
- github.com/openconfig/gnmi [Apache License 2.0](https://github.com/openconfig/gnmi/blob/master/LICENSE)
- github.com/opencontainers/go-digest [Apache License 2.0](https://github.com/opencontainers/go-digest/blob/master/LICENSE)
- github.com/opencontainers/image-spec [Apache License 2.0](https://github.com/opencontainers/image-spec/blob/master/LICENSE)
- github.com/openzipkin/zipkin-go-opentracing [MIT License](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE)
- github.com/pierrec/lz4 [BSD 3-Clause "New" or "Revised" License](https://github.com/pierrec/lz4/blob/master/LICENSE)
- github.com/pkg/errors [BSD 2-Clause "Simplified" License](https://github.com/pkg/errors/blob/master/LICENSE)
- github.com/pmezard/go-difflib [BSD 3-Clause Clear License](https://github.com/pmezard/go-difflib/blob/master/LICENSE)
- github.com/prometheus/client_golang [Apache License 2.0](https://github.com/prometheus/client_golang/blob/master/LICENSE)
- github.com/prometheus/client_model [Apache License 2.0](https://github.com/prometheus/client_model/blob/master/LICENSE)
- github.com/prometheus/common [Apache License 2.0](https://github.com/prometheus/common/blob/master/LICENSE)
- github.com/prometheus/procfs [Apache License 2.0](https://github.com/prometheus/procfs/blob/master/LICENSE)
- github.com/rcrowley/go-metrics [MIT License](https://github.com/rcrowley/go-metrics/blob/master/LICENSE)
- github.com/safchain/ethtool [Apache License 2.0](https://github.com/safchain/ethtool/blob/master/LICENSE)
- github.com/samuel/go-zookeeper [BSD 3-Clause Clear License](https://github.com/samuel/go-zookeeper/blob/master/LICENSE)
- github.com/shirou/gopsutil [BSD 3-Clause Clear License](https://github.com/shirou/gopsutil/blob/master/LICENSE)
- github.com/sirupsen/logrus [MIT License](https://github.com/sirupsen/logrus/blob/master/LICENSE)
- github.com/soniah/gosnmp [BSD 2-Clause "Simplified" License](https://github.com/soniah/gosnmp/blob/master/LICENSE)
- github.com/streadway/amqp [BSD 2-Clause "Simplified" License](https://github.com/streadway/amqp/blob/master/LICENSE)
- github.com/stretchr/objx [MIT License](https://github.com/stretchr/objx/blob/master/LICENSE)
- github.com/stretchr/testify [custom -- permissive](https://github.com/stretchr/testify/blob/master/LICENSE)
- github.com/tidwall/gjson [MIT License](https://github.com/tidwall/gjson/blob/master/LICENSE)
- github.com/tidwall/match [MIT License](https://github.com/tidwall/match/blob/master/LICENSE)
- github.com/tidwall/pretty [MIT License](https://github.com/tidwall/pretty/blob/master/LICENSE)
- github.com/vishvananda/netlink [Apache License 2.0](https://github.com/vishvananda/netlink/blob/master/LICENSE)
- github.com/vishvananda/netns [Apache License 2.0](https://github.com/vishvananda/netns/blob/master/LICENSE)
- github.com/vjeantet/grok [Apache License 2.0](https://github.com/vjeantet/grok/blob/master/LICENSE)
- github.com/vmware/govmomi [Apache License 2.0](https://github.com/vmware/govmomi/blob/master/LICENSE.txt)
- github.com/wavefronthq/wavefront-sdk-go [Apache License 2.0](https://github.com/wavefrontHQ/wavefront-sdk-go/blob/master/LICENSE)
- github.com/wvanbergen/kafka [MIT License](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
- github.com/wvanbergen/kazoo-go [MIT License](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
- github.com/yuin/gopher-lua [MIT License](https://github.com/yuin/gopher-lua/blob/master/LICENSE)
- go.opencensus.io [Apache License 2.0](https://github.com/census-instrumentation/opencensus-go/blob/master/LICENSE)
- golang.org/x/crypto [BSD 3-Clause Clear License](https://github.com/golang/crypto/blob/master/LICENSE)
- golang.org/x/net [BSD 3-Clause Clear License](https://github.com/golang/net/blob/master/LICENSE)
- golang.org/x/oauth2 [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/oauth2/blob/master/LICENSE)
- golang.org/x/sync [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/sync/blob/master/LICENSE)
- golang.org/x/sys [BSD 3-Clause Clear License](https://github.com/golang/sys/blob/master/LICENSE)
- golang.org/x/text [BSD 3-Clause Clear License](https://github.com/golang/text/blob/master/LICENSE)
- golang.org/x/time [BSD 3-Clause Clear License](https://github.com/golang/time/blob/master/LICENSE)
- golang.zx2c4.com/wireguard [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md)
- golang.zx2c4.com/wireguard/wgctrl [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md)
- google.golang.org/api [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/google-api-go-client/blob/master/LICENSE)
- google.golang.org/genproto [Apache License 2.0](https://github.com/google/go-genproto/blob/master/LICENSE)
- google.golang.org/grpc [Apache License 2.0](https://github.com/grpc/grpc-go/blob/master/LICENSE)
- gopkg.in/asn1-ber.v1 [MIT License](https://github.com/go-asn1-ber/asn1-ber/blob/v1.3/LICENSE)
- gopkg.in/fatih/pool.v2 [MIT License](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)
- gopkg.in/fsnotify.v1 [BSD 3-Clause "New" or "Revised" License](https://github.com/fsnotify/fsnotify/blob/v1.4.7/LICENSE)
- gopkg.in/gorethink/gorethink.v3 [Apache License 2.0](https://github.com/rethinkdb/rethinkdb-go/blob/v3.0.5/LICENSE)
- gopkg.in/inf.v0 [BSD 3-Clause "New" or "Revised" License](https://github.com/go-inf/inf/blob/v0.9.1/LICENSE)
- gopkg.in/jcmturner/aescts.v1 [Apache License 2.0](https://github.com/jcmturner/aescts/blob/v1.0.1/LICENSE)
- gopkg.in/jcmturner/dnsutils.v1 [Apache License 2.0](https://github.com/jcmturner/dnsutils/blob/v1.0.1/LICENSE)
- gopkg.in/jcmturner/gokrb5.v7 [Apache License 2.0](https://github.com/jcmturner/gokrb5/tree/v7.5.0/LICENSE)
- gopkg.in/jcmturner/rpc.v1 [Apache License 2.0](https://github.com/jcmturner/rpc/blob/v1.1.0/LICENSE)
- gopkg.in/ldap.v3 [MIT License](https://github.com/go-ldap/ldap/blob/v3.1.7/LICENSE)
- gopkg.in/mgo.v2 [BSD 2-Clause "Simplified" License](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
- gopkg.in/olivere/elastic.v5 [MIT License](https://github.com/olivere/elastic/blob/v5.0.76/LICENSE)
- gopkg.in/tomb.v1 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v1/LICENSE)
- gopkg.in/yaml.v2 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v2.2.2/LICENSE)
## telegraf used and modified code from these projects
- github.com/DataDog/datadog-agent [Apache License 2.0](https://github.com/DataDog/datadog-agent/LICENSE)
- code.cloudfoundry.org/clock [APACHE](https://github.com/cloudfoundry/clock/blob/master/LICENSE)
- collectd.org [MIT](https://github.com/collectd/go-collectd/blob/master/LICENSE)
- github.com/aerospike/aerospike-client-go [APACHE](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE)
- github.com/amir/raidman [PUBLIC DOMAIN](https://github.com/amir/raidman/blob/master/UNLICENSE)
- github.com/armon/go-metrics [MIT](https://github.com/armon/go-metrics/blob/master/LICENSE)
- github.com/aws/aws-sdk-go [APACHE](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt)
- github.com/beorn7/perks [MIT](https://github.com/beorn7/perks/blob/master/LICENSE)
- github.com/boltdb/bolt [MIT](https://github.com/boltdb/bolt/blob/master/LICENSE)
- github.com/bsm/sarama-cluster [MIT](https://github.com/bsm/sarama-cluster/blob/master/LICENSE)
- github.com/cenkalti/backoff [MIT](https://github.com/cenkalti/backoff/blob/master/LICENSE)
- github.com/chuckpreslar/rcon [MIT](https://github.com/chuckpreslar/rcon#license)
- github.com/couchbase/go-couchbase [MIT](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
- github.com/couchbase/gomemcached [MIT](https://github.com/couchbase/gomemcached/blob/master/LICENSE)
- github.com/couchbase/goutils [MIT](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
- github.com/dancannon/gorethink [APACHE](https://github.com/dancannon/gorethink/blob/master/LICENSE)
- github.com/davecgh/go-spew [ISC](https://github.com/davecgh/go-spew/blob/master/LICENSE)
- github.com/docker/docker [APACHE](https://github.com/docker/docker/blob/master/LICENSE)
- github.com/docker/cli [APACHE](https://github.com/docker/cli/blob/master/LICENSE)
- github.com/eapache/go-resiliency [MIT](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
- github.com/eapache/go-xerial-snappy [MIT](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE)
- github.com/eapache/queue [MIT](https://github.com/eapache/queue/blob/master/LICENSE)
- github.com/eclipse/paho.mqtt.golang [ECLIPSE](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE)
- github.com/fsnotify/fsnotify [BSD](https://github.com/fsnotify/fsnotify/blob/master/LICENSE)
- github.com/fsouza/go-dockerclient [BSD](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE)
- github.com/gobwas/glob [MIT](https://github.com/gobwas/glob/blob/master/LICENSE)
- github.com/google/go-cmp [BSD](https://github.com/google/go-cmp/blob/master/LICENSE)
- github.com/gogo/protobuf [BSD](https://github.com/gogo/protobuf/blob/master/LICENSE)
- github.com/golang/protobuf [BSD](https://github.com/golang/protobuf/blob/master/LICENSE)
- github.com/golang/snappy [BSD](https://github.com/golang/snappy/blob/master/LICENSE)
- github.com/go-logfmt/logfmt [MIT](https://github.com/go-logfmt/logfmt/blob/master/LICENSE)
- github.com/gorilla/mux [BSD](https://github.com/gorilla/mux/blob/master/LICENSE)
- github.com/go-ini/ini [APACHE](https://github.com/go-ini/ini/blob/master/LICENSE)
- github.com/go-ole/go-ole [MPL](http://mattn.mit-license.org/2013)
- github.com/go-sql-driver/mysql [MPL](https://github.com/go-sql-driver/mysql/blob/master/LICENSE)
- github.com/hailocab/go-hostpool [MIT](https://github.com/hailocab/go-hostpool/blob/master/LICENSE)
- github.com/hashicorp/consul [MPL](https://github.com/hashicorp/consul/blob/master/LICENSE)
- github.com/hashicorp/go-msgpack [BSD](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
- github.com/hashicorp/raft-boltdb [MPL](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
- github.com/hashicorp/raft [MPL](https://github.com/hashicorp/raft/blob/master/LICENSE)
- github.com/influxdata/tail [MIT](https://github.com/influxdata/tail/blob/master/LICENSE.txt)
- github.com/influxdata/toml [MIT](https://github.com/influxdata/toml/blob/master/LICENSE)
- github.com/influxdata/go-syslog [MIT](https://github.com/influxdata/go-syslog/blob/develop/LICENSE)
- github.com/influxdata/wlog [MIT](https://github.com/influxdata/wlog/blob/master/LICENSE)
- github.com/jackc/pgx [MIT](https://github.com/jackc/pgx/blob/master/LICENSE)
- github.com/jmespath/go-jmespath [APACHE](https://github.com/jmespath/go-jmespath/blob/master/LICENSE)
- github.com/kardianos/osext [BSD](https://github.com/kardianos/osext/blob/master/LICENSE)
- github.com/kardianos/service [ZLIB](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib)
- github.com/kballard/go-shellquote [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
- github.com/lib/pq [MIT](https://github.com/lib/pq/blob/master/LICENSE.md)
- github.com/matttproud/golang_protobuf_extensions [APACHE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
- github.com/Microsoft/ApplicationInsights-Go [APACHE](https://github.com/Microsoft/ApplicationInsights-Go/blob/master/LICENSE)
- github.com/Microsoft/go-winio [MIT](https://github.com/Microsoft/go-winio/blob/master/LICENSE)
- github.com/miekg/dns [BSD](https://github.com/miekg/dns/blob/master/LICENSE)
- github.com/naoina/go-stringutil [MIT](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
- github.com/naoina/toml [MIT](https://github.com/naoina/toml/blob/master/LICENSE)
- github.com/nats-io/gnatsd [MIT](https://github.com/nats-io/gnatsd/blob/master/LICENSE)
- github.com/nats-io/go-nats [MIT](https://github.com/nats-io/go-nats/blob/master/LICENSE)
- github.com/nats-io/nats [MIT](https://github.com/nats-io/nats/blob/master/LICENSE)
- github.com/nats-io/nuid [MIT](https://github.com/nats-io/nuid/blob/master/LICENSE)
- github.com/nsqio/go-nsq [MIT](https://github.com/nsqio/go-nsq/blob/master/LICENSE)
- github.com/opentracing-contrib/go-observer [APACHE](https://github.com/opentracing-contrib/go-observer/blob/master/LICENSE)
- github.com/opentracing/opentracing-go [MIT](https://github.com/opentracing/opentracing-go/blob/master/LICENSE)
- github.com/openzipkin/zipkin-go-opentracing [MIT](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE)
- github.com/pierrec/lz4 [BSD](https://github.com/pierrec/lz4/blob/master/LICENSE)
- github.com/pierrec/xxHash [BSD](https://github.com/pierrec/xxHash/blob/master/LICENSE)
- github.com/pkg/errors [BSD](https://github.com/pkg/errors/blob/master/LICENSE)
- github.com/pmezard/go-difflib [BSD](https://github.com/pmezard/go-difflib/blob/master/LICENSE)
- github.com/prometheus/client_golang [APACHE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
- github.com/prometheus/client_model [APACHE](https://github.com/prometheus/client_model/blob/master/LICENSE)
- github.com/prometheus/common [APACHE](https://github.com/prometheus/common/blob/master/LICENSE)
- github.com/prometheus/procfs [APACHE](https://github.com/prometheus/procfs/blob/master/LICENSE)
- github.com/rcrowley/go-metrics [BSD](https://github.com/rcrowley/go-metrics/blob/master/LICENSE)
- github.com/samuel/go-zookeeper [BSD](https://github.com/samuel/go-zookeeper/blob/master/LICENSE)
- github.com/satori/go.uuid [MIT](https://github.com/satori/go.uuid/blob/master/LICENSE)
- github.com/shirou/gopsutil [BSD](https://github.com/shirou/gopsutil/blob/master/LICENSE)
- github.com/shirou/w32 [BSD](https://github.com/shirou/w32/blob/master/LICENSE)
- github.com/Shopify/sarama [MIT](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE)
- github.com/Sirupsen/logrus [MIT](https://github.com/Sirupsen/logrus/blob/master/LICENSE)
- github.com/StackExchange/wmi [MIT](https://github.com/StackExchange/wmi/blob/master/LICENSE)
- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md)
- github.com/soniah/gosnmp [BSD](https://github.com/soniah/gosnmp/blob/master/LICENSE)
- github.com/streadway/amqp [BSD](https://github.com/streadway/amqp/blob/master/LICENSE)
- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md)
- github.com/stretchr/testify [MIT](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
- github.com/tidwall/gjson [MIT](https://github.com/tidwall/gjson/blob/master/LICENSE)
- github.com/tidwall/match [MIT](https://github.com/tidwall/match/blob/master/LICENSE)
- github.com/mitchellh/mapstructure [MIT](https://github.com/mitchellh/mapstructure/blob/master/LICENSE)
- github.com/multiplay/go-ts3 [BSD](https://github.com/multiplay/go-ts3/blob/master/LICENSE)
- github.com/vjeantet/grok [APACHE](https://github.com/vjeantet/grok/blob/master/LICENSE)
- github.com/wvanbergen/kafka [MIT](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
- github.com/wvanbergen/kazoo-go [MIT](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
- github.com/yuin/gopher-lua [MIT](https://github.com/yuin/gopher-lua/blob/master/LICENSE)
- github.com/zensqlmonitor/go-mssqldb [BSD](https://github.com/zensqlmonitor/go-mssqldb/blob/master/LICENSE.txt)
- golang.org/x/crypto [BSD](https://github.com/golang/crypto/blob/master/LICENSE)
- golang.org/x/net [BSD](https://go.googlesource.com/net/+/master/LICENSE)
- golang.org/x/text [BSD](https://go.googlesource.com/text/+/master/LICENSE)
- golang.org/x/sys [BSD](https://go.googlesource.com/sys/+/master/LICENSE)
- google.golang.org/grpc [APACHE](https://github.com/google/grpc-go/blob/master/LICENSE)
- google.golang.org/genproto [APACHE](https://github.com/google/go-genproto/blob/master/LICENSE)
- gopkg.in/asn1-ber.v1 [MIT](https://github.com/go-asn1-ber/asn1-ber/blob/v1.2/LICENSE)
- gopkg.in/dancannon/gorethink.v1 [APACHE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
- gopkg.in/fatih/pool.v2 [MIT](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)
- gopkg.in/ldap.v2 [MIT](https://github.com/go-ldap/ldap/blob/v2.5.0/LICENSE)
- gopkg.in/mgo.v2 [BSD](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
- gopkg.in/olivere/elastic.v5 [MIT](https://github.com/olivere/elastic/blob/v5.0.38/LICENSE)
- gopkg.in/tomb.v1 [BSD](https://github.com/go-tomb/tomb/blob/v1/LICENSE)
- gopkg.in/yaml.v2 [APACHE](https://github.com/go-yaml/yaml/blob/v2/LICENSE)

View File

@ -1,22 +0,0 @@
# Metrics
Telegraf metrics are the internal representation used to model data during
processing. Metrics are closely based on InfluxDB's data model and contain
four main components:
- **Measurement Name**: Description and namespace for the metric.
- **Tags**: Key/Value string pairs and usually used to identify the
metric.
- **Fields**: Key/Value pairs that are typed and usually contain the
metric data.
- **Timestamp**: Date and time associated with the fields.
This metric type exists only in memory and must be converted to a concrete
representation in order to be transmitted or viewed. To achieve this we
provide several [output data formats][] sometimes referred to as
*serializers*. Our default serializer converts to [InfluxDB Line
Protocol][line protocol] which provides a high performance and one-to-one
direct mapping from Telegraf metrics.
[output data formats]: /docs/DATA_FORMATS_OUTPUT.md
[line protocol]: /plugins/serializers/influx

View File

@ -1,114 +0,0 @@
### Output Plugins
This section is for developers who want to create a new output sink. Outputs
are created in a similar manner as collection plugins, and their interface has
similar constructs.
### Output Plugin Guidelines
- An output must conform to the [telegraf.Output][] interface.
- Outputs should call `outputs.Add` in their `init` function to register
themselves. See below for a quick example.
- To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file.
- The `SampleConfig` function should return valid toml that describes how the
plugin can be configured. This is included in `telegraf config`. Please
consult the [SampleConfig][] page for the latest style guidelines.
- The `Description` function should say in one line what this output does.
- Follow the recommended [CodeStyle][].
### Output Plugin Example
```go
package simpleoutput
// simpleoutput.go
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs"
)
type Simple struct {
Ok bool `toml:"ok"`
}
func (s *Simple) Description() string {
return "a demo output"
}
func (s *Simple) SampleConfig() string {
return `
ok = true
`
}
func (s *Simple) Init() error {
return nil
}
func (s *Simple) Connect() error {
// Make a connection to the URL here
return nil
}
func (s *Simple) Close() error {
// Close connection to the URL here
return nil
}
func (s *Simple) Write(metrics []telegraf.Metric) error {
for _, metric := range metrics {
// write `metric` to the output sink here
}
return nil
}
func init() {
outputs.Add("simpleoutput", func() telegraf.Output { return &Simple{} })
}
```
## Data Formats
Some output plugins, such as the [file][] plugin, can write in any supported
[output data formats][].
In order to enable this, you must specify a
`SetSerializer(serializer serializers.Serializer)`
function on the plugin object (see the file plugin for an example), as well as
defining `serializer` as a field of the object.
You can then utilize the serializer internally in your plugin, serializing data
before it's written. Telegraf's configuration layer will take care of
instantiating and creating the `Serializer` object.
You should also add the following to your `SampleConfig()`:
```toml
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
```
## Flushing Metrics to Outputs
Metrics are flushed to outputs when any of the following events happen:
- `flush_interval + rand(flush_jitter)` has elapsed since start or the last flush interval
- At least `metric_batch_size` count of metrics are waiting in the buffer
- The telegraf process has received a SIGUSR1 signal
Note that if the flush takes longer than the `agent.interval` to write the metrics
to the output, you'll see a message saying the output `did not complete within its
flush interval`. This may mean your output is not keeping up with the flow of metrics,
and you may want to look into enabling compression, reducing the size of your metrics,
or investigate other reasons why the writes might be taking longer than expected.
[file]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/file
[output data formats]: https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig
[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle
[telegraf.Output]: https://godoc.org/github.com/influxdata/telegraf#Output

View File

@ -1,69 +0,0 @@
### Processor Plugins
This section is for developers who want to create a new processor plugin.
### Processor Plugin Guidelines
* A processor must conform to the [telegraf.Processor][] interface.
* Processors should call `processors.Add` in their `init` function to register
themselves. See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/processors/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
processor can be configured. This is include in the output of `telegraf
config`.
- The `SampleConfig` function should return valid toml that describes how the
plugin can be configured. This is included in `telegraf config`. Please
consult the [SampleConfig][] page for the latest style guidelines.
* The `Description` function should say in one line what this processor does.
- Follow the recommended [CodeStyle][].
### Processor Plugin Example
```go
package printer
// printer.go
import (
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/processors"
)
type Printer struct {
}
var sampleConfig = `
`
func (p *Printer) SampleConfig() string {
return sampleConfig
}
func (p *Printer) Description() string {
return "Print all metrics that pass through this filter."
}
func (p *Printer) Init() error {
return nil
}
func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric {
for _, metric := range in {
fmt.Println(metric.String())
}
return in
}
func init() {
processors.Add("printer", func() telegraf.Processor {
return &Printer{}
})
}
```
[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig
[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle
[telegraf.Processor]: https://godoc.org/github.com/influxdata/telegraf#Processor

View File

@ -1,21 +0,0 @@
# Telegraf
- Concepts
- [Metrics][metrics]
- [Input Data Formats][parsers]
- [Output Data Formats][serializers]
- [Aggregators & Processors][aggproc]
- Administration
- [Configuration][conf]
- [Profiling][profiling]
- [Windows Service][winsvc]
- [FAQ][faq]
[conf]: /docs/CONFIGURATION.md
[metrics]: /docs/METRICS.md
[parsers]: /docs/DATA_FORMATS_INPUT.md
[serializers]: /docs/DATA_FORMATS_OUTPUT.md
[aggproc]: /docs/AGGREGATORS_AND_PROCESSORS.md
[profiling]: /docs/PROFILING.md
[winsvc]: /docs/WINDOWS_SERVICE.md
[faq]: /docs/FAQ.md

View File

@ -1,135 +0,0 @@
# Template Patterns
Template patterns are a mini language that describes how a dot delimited
string should be mapped to and from [metrics][].
A template has the form:
```
"host.mytag.mytag.measurement.measurement.field*"
```
Where the following keywords can be set:
1. `measurement`: specifies that this section of the graphite bucket corresponds
to the measurement name. This can be specified multiple times.
2. `field`: specifies that this section of the graphite bucket corresponds
to the field name. This can be specified multiple times.
3. `measurement*`: specifies that all remaining elements of the graphite bucket
correspond to the measurement name.
4. `field*`: specifies that all remaining elements of the graphite bucket
correspond to the field name.
Any part of the template that is not a keyword is treated as a tag key. This
can also be specified multiple times.
**NOTE:** `field*` cannot be used in conjunction with `measurement*`.
### Examples
#### Measurement & Tag Templates
The most basic template is to specify a single transformation to apply to all
incoming metrics. So the following template:
```toml
templates = [
"region.region.measurement*"
]
```
would result in the following Graphite -> Telegraf transformation.
```
us.west.cpu.load 100
=> cpu.load,region=us.west value=100
```
Multiple templates can also be specified, but these should be differentiated
using _filters_ (see below for more details)
```toml
templates = [
"*.*.* region.region.measurement", # <- all 3-part measurements will match this one.
"*.*.*.* region.region.host.measurement", # <- all 4-part measurements will match this one.
]
```
#### Field Templates
The field keyword tells Telegraf to give the metric that field name.
So the following template:
```toml
separator = "_"
templates = [
"measurement.measurement.field.field.region"
]
```
would result in the following Graphite -> Telegraf transformation.
```
cpu.usage.idle.percent.eu-east 100
=> cpu_usage,region=eu-east idle_percent=100
```
The field key can also be derived from all remaining elements of the graphite
bucket by specifying `field*`:
```toml
separator = "_"
templates = [
"measurement.measurement.region.field*"
]
```
which would result in the following Graphite -> Telegraf transformation.
```
cpu.usage.eu-east.idle.percentage 100
=> cpu_usage,region=eu-east idle_percentage=100
```
#### Filter Templates
Users can also filter the template(s) to use based on the name of the bucket,
using glob matching, like so:
```toml
templates = [
"cpu.* measurement.measurement.region",
"mem.* measurement.measurement.host"
]
```
which would result in the following transformation:
```
cpu.load.eu-east 100
=> cpu_load,region=eu-east value=100
mem.cached.localhost 256
=> mem_cached,host=localhost value=256
```
#### Adding Tags
Additional tags can be added to a metric that don't exist on the received metric.
You can add additional tags by specifying them after the pattern.
Tags have the same format as the line protocol.
Multiple tags are separated by commas.
```toml
templates = [
"measurement.measurement.field.region datacenter=1a"
]
```
would result in the following Graphite -> Telegraf transformation.
```
cpu.usage.idle.eu-east 100
=> cpu_usage,region=eu-east,datacenter=1a idle=100
```
[metrics]: /docs/METRICS.md

View File

@ -1,105 +0,0 @@
# Transport Layer Security
There is an ongoing effort to standardize TLS options across plugins. When
possible, plugins will provide the standard settings described below. With the
exception of the advanced configuration available TLS settings will be
documented in the sample configuration.
### Client Configuration
For client TLS support we have the following options:
```toml
## Root certificates for verifying server certificates encoded in PEM format.
# tls_ca = "/etc/telegraf/ca.pem"
## The public and private keypairs for the client encoded in PEM format. May
## contain intermediate certificates.
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Skip TLS verification.
# insecure_skip_verify = false
```
### Server Configuration
The server TLS configuration provides support for TLS mutual authentication:
```toml
## Set one or more allowed client CA certificate file names to
## enable mutually authenticated TLS connections.
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
## Add service certificate and key.
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
```
#### Advanced Configuration
For plugins using the standard server configuration you can also set several
advanced settings. These options are not included in the sample configuration
for the interest of brevity.
```toml
## Define list of allowed ciphers suites. If not defined the default ciphers
## supported by Go will be used.
## ex: tls_cipher_suites = [
## "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
## "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
## "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
## "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
## "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
## "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
## "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
## "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
## "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
## "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
## "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
## "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
## "TLS_RSA_WITH_AES_128_GCM_SHA256",
## "TLS_RSA_WITH_AES_256_GCM_SHA384",
## "TLS_RSA_WITH_AES_128_CBC_SHA256",
## "TLS_RSA_WITH_AES_128_CBC_SHA",
## "TLS_RSA_WITH_AES_256_CBC_SHA"
## ]
# tls_cipher_suites = []
## Minimum TLS version that is acceptable.
# tls_min_version = "TLS10"
## Maximum SSL/TLS version that is acceptable.
# tls_max_version = "TLS13"
```
Cipher suites for use with `tls_cipher_suites`:
- `TLS_RSA_WITH_RC4_128_SHA`
- `TLS_RSA_WITH_3DES_EDE_CBC_SHA`
- `TLS_RSA_WITH_AES_128_CBC_SHA`
- `TLS_RSA_WITH_AES_256_CBC_SHA`
- `TLS_RSA_WITH_AES_128_CBC_SHA256`
- `TLS_RSA_WITH_AES_128_GCM_SHA256`
- `TLS_RSA_WITH_AES_256_GCM_SHA384`
- `TLS_ECDHE_ECDSA_WITH_RC4_128_SHA`
- `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA`
- `TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA`
- `TLS_ECDHE_RSA_WITH_RC4_128_SHA`
- `TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA`
- `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA`
- `TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA`
- `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256`
- `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256`
- `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`
- `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`
- `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384`
- `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384`
- `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305`
- `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305`
- `TLS_AES_128_GCM_SHA256`
- `TLS_AES_256_GCM_SHA384`
- `TLS_CHACHA20_POLY1305_SHA256`
TLS versions for use with `tls_min_version` or `tls_max_version`:
- `TLS10`
- `TLS11`
- `TLS12`
- `TLS13`

View File

@ -46,26 +46,8 @@ Telegraf can manage its own service through the --service flag:
| `telegraf.exe --service start` | Start the telegraf service |
| `telegraf.exe --service stop` | Stop the telegraf service |
## Install multiple services
Running multiple instances of Telegraf is seldom needed, as you can run
multiple instances of each plugin and route metric flow using the metric
filtering options. However, if you do need to run multiple telegraf instances
on a single system, you can install the service with the `--service-name` and
`--service-display-name` flags to give the services unique names:
```
> C:\"Program Files"\Telegraf\telegraf.exe --service install --service-name telegraf-1 --service-display-name "Telegraf 1"
> C:\"Program Files"\Telegraf\telegraf.exe --service install --service-name telegraf-2 --service-display-name "Telegraf 2"
```
## Troubleshooting
When Telegraf runs as a Windows service, Telegraf logs messages to Windows events log before configuration file with logging settings is loaded.
Check event log for an error reported by `telegraf` service in case of Telegraf service reports failure on its start: Event Viewer->Windows Logs->Application
**Troubleshooting common error #1067**
Troubleshooting common error #1067
When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start
--config "C:\Program Files\Telegraf\telegraf.conf"
--config C:\"Program Files"\Telegraf\telegraf.conf

File diff suppressed because it is too large Load Diff

View File

@ -1,26 +1,18 @@
# Telegraf Configuration
#
# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
#
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
#
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
#
# Environment variables can be used anywhere in this config file, simply surround
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
# Global tags can be specified here in key="value" format.
[global_tags]
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
# rack = "1a"
## Environment variables can be used as tags, and throughout the config file
# user = "$USER"
# Configuration for telegraf agent
[agent]
@ -30,15 +22,11 @@
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## Maximum number of unwritten metrics per output. Increasing this value
## allows for longer periods of output downtime without dropping metrics at the
## cost of higher maximum memory usage.
metric_buffer_limit = 10000
## Telegraf will cache metric_buffer_limit metrics for each output, and will
## flush this buffer on a successful write.
metric_buffer_limit = 1000
## Flush the buffer whenever full, regardless of flush_interval.
flush_buffer_when_full = true
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
@ -46,197 +34,58 @@
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
## Default flushing interval for all outputs. You shouldn't set this below
## interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s.
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
precision = ""
## Log at debug level.
# debug = false
## Log only error level messages.
# quiet = false
## Log target controls the destination for logs and can be one of "file",
## "stderr" or, on Windows, "eventlog". When set to "file", the output file
## is determined by the "logfile" setting.
# logtarget = "file"
## Name of the file to be logged to when using the "file" logtarget. If set to
## the empty string then logs are written to stderr.
# logfile = ""
## The logfile will be rotated after the time interval specified. When set
## to 0 no time based rotation is performed. Logs are rotated only when
## written to, if there is no log activity rotation may be delayed.
# logfile_rotation_interval = "0d"
## The logfile will be rotated when it becomes larger than the specified
## size. When set to 0 no size based rotation is performed.
# logfile_rotation_max_size = "0MB"
## Maximum number of rotated archives to keep, any older logs are deleted.
## If set to -1, no archives are removed.
# logfile_rotation_max_archives = 5
## Logging configuration:
## Run telegraf in debug mode
debug = false
## Run telegraf in quiet mode
quiet = false
## Specify the log file name. The empty string means to log to stdout.
logfile = "/Program Files/Telegraf/telegraf.log"
## Override default hostname, if empty use os.Hostname()
hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
omit_hostname = false
###############################################################################
# OUTPUT PLUGINS #
# OUTPUTS #
###############################################################################
# Configuration for sending metrics to InfluxDB
# Configuration for influxdb server to send metrics to
[[outputs.influxdb]]
## The full HTTP or UDP URL for your InfluxDB instance.
##
## Multiple URLs can be specified for a single cluster, only ONE of the
## urls will be written to each interval.
# urls = ["unix:///var/run/influxdb.sock"]
# urls = ["udp://127.0.0.1:8089"]
# urls = ["http://127.0.0.1:8086"]
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
# Multiple urls can be specified but it is assumed that they are part of the same
# cluster, this means that only ONE of the urls will be written to each interval.
# urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
urls = ["http://127.0.0.1:8086"] # required
# The target database for metrics (telegraf will create it if not exists)
database = "telegraf" # required
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
# note: using second precision greatly helps InfluxDB compression
precision = "s"
## The target database for metrics; will be created as needed.
## For UDP url endpoint database needs to be configured on server side.
# database = "telegraf"
## The value of this tag will be used to determine the database. If this
## tag is not set the 'database' option is used as the default.
# database_tag = ""
## If true, the 'database_tag' will not be included in the written metric.
# exclude_database_tag = false
## If true, no CREATE DATABASE queries will be sent. Set to true when using
## Telegraf with a user without permissions to create databases or when the
## database already exists.
# skip_database_creation = false
## Name of existing retention policy to write to. Empty string writes to
## the default retention policy. Only takes effect when using HTTP.
# retention_policy = ""
## The value of this tag will be used to determine the retention policy. If this
## tag is not set the 'retention_policy' option is used as the default.
# retention_policy_tag = ""
## If true, the 'retention_policy_tag' will not be included in the written metric.
# exclude_retention_policy_tag = false
## Write consistency (clusters only), can be: "any", "one", "quorum", "all".
## Only takes effect when using HTTP.
# write_consistency = "any"
## Timeout for HTTP messages.
# timeout = "5s"
## HTTP Basic Auth
## Write timeout (for the InfluxDB client), formatted as a string.
## If not provided, will default to 5s. 0s means no timeout (not recommended).
timeout = "5s"
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
## HTTP User-Agent
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
## UDP payload size is the maximum packet size to send.
# udp_payload = "512B"
## Optional TLS Config for use on HTTP connections.
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## HTTP Proxy override, if unset values the standard proxy environment
## variables are consulted to determine which proxy, if any, should be used.
# http_proxy = "http://corporate.proxy:3128"
## Additional HTTP headers
# http_headers = {"X-Special-Header" = "Special-Value"}
## HTTP Content-Encoding for write request body, can be set to "gzip" to
## compress body or "identity" to apply no encoding.
# content_encoding = "identity"
## When true, Telegraf will output unsigned integers as unsigned values,
## i.e.: "42u". You will need a version of InfluxDB supporting unsigned
## integer values. Enabling this option will result in field type errors if
## existing data has been written.
# influx_uint_support = false
# # Configuration for sending metrics to InfluxDB
# [[outputs.influxdb_v2]]
# ## The URLs of the InfluxDB cluster nodes.
# ##
# ## Multiple URLs can be specified for a single cluster, only ONE of the
# ## urls will be written to each interval.
# ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"]
# urls = ["http://127.0.0.1:9999"]
#
# ## Token for authentication.
# token = ""
#
# ## Organization is the name of the organization you wish to write to; must exist.
# organization = ""
#
# ## Destination bucket to write into.
# bucket = ""
#
# ## The value of this tag will be used to determine the bucket. If this
# ## tag is not set the 'bucket' option is used as the default.
# # bucket_tag = ""
#
# ## If true, the bucket tag will not be added to the metric.
# # exclude_bucket_tag = false
#
# ## Timeout for HTTP messages.
# # timeout = "5s"
#
# ## Additional HTTP headers
# # http_headers = {"X-Special-Header" = "Special-Value"}
#
# ## HTTP Proxy override, if unset values the standard proxy environment
# ## variables are consulted to determine which proxy, if any, should be used.
# # http_proxy = "http://corporate.proxy:3128"
#
# ## HTTP User-Agent
# # user_agent = "telegraf"
#
# ## Content-Encoding for write request body, can be set to "gzip" to
# ## compress body or "identity" to apply no encoding.
# # content_encoding = "gzip"
#
# ## Enable or disable uint support for writing uints influxdb 2.0.
# # influx_uint_support = false
#
# ## Optional TLS Config for use on HTTP connections.
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
# udp_payload = 512
###############################################################################
# INPUT PLUGINS #
# INPUTS #
###############################################################################
# Windows Performance Counters plugin.
# These are the recommended method of monitoring system metrics on windows,
# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI,
@ -271,8 +120,8 @@
"% Disk Time",
"% Disk Read Time",
"% Disk Write Time",
"% Free Space",
"Current Disk Queue Length",
"% Free Space",
"Free Megabytes",
]
Measurement = "win_disk"
@ -338,6 +187,7 @@
"Standby Cache Reserve Bytes",
"Standby Cache Normal Priority Bytes",
"Standby Cache Core Bytes",
]
# Use 6 x - to remove the Instance bit from the query.
Instances = ["------"]
@ -355,31 +205,44 @@
Instances = ["_Total"]
Measurement = "win_swap"
[[inputs.win_perf_counters.object]]
ObjectName = "Network Interface"
Instances = ["*"]
Counters = [
"Bytes Sent/sec",
"Bytes Received/sec",
"Packets Sent/sec",
"Packets Received/sec",
"Packets Received Discarded",
"Packets Received Errors",
"Packets Outbound Discarded",
"Packets Outbound Errors",
]
# Windows system plugins using WMI (disabled by default, using
# win_perf_counters over WMI is recommended)
# # Read metrics about cpu usage
# [[inputs.cpu]]
# ## Whether to report per-cpu stats or not
# percpu = true
# ## Whether to report total system cpu stats or not
# totalcpu = true
# ## If true, collect raw CPU time metrics.
# collect_cpu_time = false
# ## If true, compute and report the sum of all non-idle CPU states.
# report_active = false
# ## Comment this line if you want the raw CPU time metrics
# fielddrop = ["time_*"]
# # Read metrics about disk usage by mount point
# [[inputs.disk]]
# ## By default stats will be gathered for all mount points.
# ## Set mount_points will restrict the stats to only the specified mount points.
# # mount_points = ["/"]
# ## By default, telegraf gather stats for all mountpoints.
# ## Setting mountpoints will restrict the stats to the specified mountpoints.
# ## mount_points=["/"]
#
# ## Ignore mount points by filesystem type.
# ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
# ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
# ## present on /run, /var/run, /dev/shm or /dev).
# # ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
# # Read metrics about disk IO by device
@ -387,26 +250,9 @@
# ## By default, telegraf will gather stats for all devices including
# ## disk partitions.
# ## Setting devices will restrict the stats to the specified devices.
# # devices = ["sda", "sdb", "vd*"]
# ## Uncomment the following line if you need disk serial numbers.
# # skip_serial_number = false
# #
# ## On systems which support it, device metadata can be added in the form of
# ## tags.
# ## Currently only Linux is supported via udev properties. You can view
# ## available properties for a device by running:
# ## 'udevadm info -q property -n /dev/sda'
# # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
# #
# ## Using the same metadata source as device_tags, you can also customize the
# ## name of the device via templates.
# ## The 'name_templates' parameter is a list of templates to try and apply to
# ## the device. The template may contain variables in the form of '$PROPERTY' or
# ## '${PROPERTY}'. The first template which does not contain any variables not
# ## present for the device is used as the device name tag.
# ## The typical use case is for LVM volumes, to get the VG/LV name instead of
# ## the near-meaningless DM-0 name.
# # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
# ## devices = ["sda", "sdb"]
# ## Uncomment the following line if you do not need disk serial numbers.
# ## skip_serial_number = true
# # Read metrics about memory usage
@ -417,3 +263,4 @@
# # Read metrics about swap memory usage
# [[inputs.swap]]
# # no configuration

View File

@ -37,24 +37,6 @@ func TestCompile(t *testing.T) {
assert.True(t, f.Match("network"))
}
func TestIncludeExclude(t *testing.T) {
tags := []string{}
labels := []string{"best", "com_influxdata", "timeseries", "com_influxdata_telegraf", "ever"}
filter, err := NewIncludeExcludeFilter([]string{}, []string{"com_influx*"})
if err != nil {
t.Fatalf("Failed to create include/exclude filter - %v", err)
}
for i := range labels {
if filter.Match(labels[i]) {
tags = append(tags, labels[i])
}
}
assert.Equal(t, []string{"best", "timeseries", "ever"}, tags)
}
var benchbool bool
func BenchmarkFilterSingleNoGlobFalse(b *testing.B) {

161
go.mod
View File

@ -1,161 +0,0 @@
module github.com/influxdata/telegraf
go 1.12
require (
cloud.google.com/go v0.53.0
cloud.google.com/go/datastore v1.1.0 // indirect
cloud.google.com/go/pubsub v1.2.0
code.cloudfoundry.org/clock v1.0.0 // indirect
collectd.org v0.3.0
github.com/Azure/azure-event-hubs-go/v3 v3.2.0
github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687
github.com/Azure/go-autorest/autorest v0.9.3
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2
github.com/BurntSushi/toml v0.3.1
github.com/ChimeraCoder/anaconda v2.0.0+incompatible
github.com/ChimeraCoder/tokenbucket v0.0.0-20131201223612-c5a927568de7 // indirect
github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee
github.com/Microsoft/ApplicationInsights-Go v0.4.2
github.com/Microsoft/go-winio v0.4.9 // indirect
github.com/Shopify/sarama v1.24.1
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6
github.com/aerospike/aerospike-client-go v1.27.0
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4
github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9
github.com/apache/thrift v0.12.0
github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect
github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740
github.com/armon/go-metrics v0.3.0 // indirect
github.com/aws/aws-sdk-go v1.30.9
github.com/azr/backoff v0.0.0-20160115115103-53511d3c7330 // indirect
github.com/benbjohnson/clock v1.0.2
github.com/bitly/go-hostpool v0.1.0 // indirect
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
github.com/caio/go-tdigest v2.3.0+incompatible // indirect
github.com/cenkalti/backoff v2.0.0+incompatible // indirect
github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6
github.com/cockroachdb/apd v1.1.0 // indirect
github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037
github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 // indirect
github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a // indirect
github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4
github.com/dgrijalva/jwt-go v3.2.0+incompatible
github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible // indirect
github.com/docker/docker v1.4.2-0.20180327123150-ed7b6428c133
github.com/docker/go-connections v0.3.0 // indirect
github.com/docker/go-units v0.3.3 // indirect
github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166
github.com/dustin/go-jsonpointer v0.0.0-20160814072949-ba0abeacc3dc // indirect
github.com/dustin/gojson v0.0.0-20160307161227-2e71ec9dd5ad // indirect
github.com/eclipse/paho.mqtt.golang v1.2.0
github.com/ericchiang/k8s v1.2.0
github.com/garyburd/go-oauth v0.0.0-20180319155456-bca2e7f09a17 // indirect
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32
github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96
github.com/go-logfmt/logfmt v0.4.0
github.com/go-ole/go-ole v1.2.1 // indirect
github.com/go-redis/redis v6.12.0+incompatible
github.com/go-sql-driver/mysql v1.5.0
github.com/goburrow/modbus v0.1.0
github.com/goburrow/serial v0.1.0 // indirect
github.com/gobwas/glob v0.2.3
github.com/gofrs/uuid v2.1.0+incompatible
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec
github.com/golang/protobuf v1.3.5
github.com/google/go-cmp v0.4.0
github.com/google/go-github v17.0.0+incompatible
github.com/google/go-querystring v1.0.0 // indirect
github.com/gorilla/mux v1.6.2
github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0
github.com/hashicorp/consul v1.2.1
github.com/hashicorp/go-msgpack v0.5.5 // indirect
github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 // indirect
github.com/hashicorp/memberlist v0.1.5 // indirect
github.com/hashicorp/serf v0.8.1 // indirect
github.com/influxdata/go-syslog/v2 v2.0.1
github.com/influxdata/tail v1.0.1-0.20180327235535-c43482518d41
github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65
github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect
github.com/jackc/pgx v3.6.0+incompatible
github.com/jcmturner/gofork v1.0.0 // indirect
github.com/kardianos/service v1.0.0
github.com/karrick/godirwalk v1.12.0
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
github.com/klauspost/compress v1.9.2 // indirect
github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 // indirect
github.com/lib/pq v1.3.0 // indirect
github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1
github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe
github.com/miekg/dns v1.0.14
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
github.com/mmcdole/gofeed v1.0.0
github.com/multiplay/go-ts3 v1.0.0
github.com/naoina/go-stringutil v0.1.0 // indirect
github.com/nats-io/nats-server/v2 v2.1.4
github.com/nats-io/nats.go v1.9.1
github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0
github.com/nsqio/go-nsq v1.0.7
github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect
github.com/opentracing/opentracing-go v1.0.2 // indirect
github.com/openzipkin/zipkin-go-opentracing v0.3.4
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.5.1
github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.9.1
github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664
github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec // indirect
github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect
github.com/shirou/gopsutil v2.20.2+incompatible
github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect
github.com/sirupsen/logrus v1.4.2
github.com/soniah/gosnmp v1.25.0
github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8
github.com/stretchr/testify v1.5.1
github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62
github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 // indirect
github.com/tidwall/gjson v1.3.0
github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e // indirect
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect
github.com/vjeantet/grok v1.0.0
github.com/vmware/govmomi v0.19.0
github.com/wavefronthq/wavefront-sdk-go v0.9.2
github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf
github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a // indirect
github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 // indirect
golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
golang.org/x/net v0.0.0-20200301022130-244492dfa37a
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a // indirect
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4
golang.org/x/tools v0.0.0-20200317043434-63da46f3035e // indirect
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4
gonum.org/v1/gonum v0.6.2 // indirect
google.golang.org/api v0.20.0
google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24
google.golang.org/grpc v1.28.0
gopkg.in/fatih/pool.v2 v2.0.0 // indirect
gopkg.in/gorethink/gorethink.v3 v3.0.5
gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect
gopkg.in/ldap.v3 v3.1.0
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce
gopkg.in/olivere/elastic.v5 v5.0.70
gopkg.in/yaml.v2 v2.2.5
gotest.tools v2.2.0+incompatible // indirect
honnef.co/go/tools v0.0.1-2020.1.3 // indirect
k8s.io/apimachinery v0.17.1 // indirect
)
// replaced due to https://github.com/satori/go.uuid/issues/73
replace github.com/satori/go.uuid => github.com/gofrs/uuid v3.2.0+incompatible

915
go.sum
View File

@ -1,915 +0,0 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU=
cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0 h1:MZQCQQaRwOrAcuKjiHWHrgKykt4fZyuwF2dtiG3fGW8=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o=
code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8=
collectd.org v0.3.0 h1:iNBHGw1VvPJxH2B6RiFWFZ+vsjo1lCdRszBeOuwGi00=
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
gitea.statsd.de/dom/telegraf v0.10.1 h1:RZNof67areTIGhj1hZW1cAZ/4Dbz7HyKVAZ5dTphbuw=
github.com/Azure/azure-amqp-common-go/v3 v3.0.0 h1:j9tjcwhypb/jek3raNrwlCIl7iKQYOug7CLpSyBBodc=
github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg=
github.com/Azure/azure-event-hubs-go/v3 v3.2.0 h1:CQlxKH5a4NX1ZmbdqXUPRwuNGh2XvtgmhkZvkEuWzhs=
github.com/Azure/azure-event-hubs-go/v3 v3.2.0/go.mod h1:BPIIJNH/l/fVHYq3Rm6eg4clbrULrQ3q7+icmqHyyLc=
github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
github.com/Azure/azure-pipeline-go v0.1.9 h1:u7JFb9fFTE6Y/j8ae2VK33ePrRqJqoCM/IWkQdAZ+rg=
github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
github.com/Azure/azure-sdk-for-go v37.1.0+incompatible h1:aFlw3lP7ZHQi4m1kWCpcwYtczhDkGhDoRaMTaxcOf68=
github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y=
github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687 h1:7MiZ6Th+YTmwUdrKmFg5OMsGYz7IdQwjqL0RPxkhhOQ=
github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8=
github.com/Azure/go-amqp v0.12.6 h1:34yItuwhA/nusvq2sPSNPQxZLCf/CtaogYH8n578mnY=
github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.9.3 h1:OZEIaBbMdUE/Js+BQKlpO81XlISgipr6yDJ+PSwsgi4=
github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
github.com/Azure/go-autorest/autorest/adal v0.8.1 h1:pZdL8o72rK+avFWl+p9nE8RWi1JInZrWJYlnpfXJwHk=
github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk=
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM=
github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U=
github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8=
github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4=
github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI=
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/ChimeraCoder/anaconda v1.0.0 h1:B7KZV+CE2iwbC15sh+rh5vaWs4+XJx1XC4iHvHtsZrQ=
github.com/ChimeraCoder/anaconda v2.0.0+incompatible h1:F0eD7CHXieZ+VLboCD5UAqCeAzJZxcr90zSCcuJopJs=
github.com/ChimeraCoder/anaconda v2.0.0+incompatible/go.mod h1:TCt3MijIq3Qqo9SBtuW/rrM4x7rDfWqYWHj8T7hLcLg=
github.com/ChimeraCoder/tokenbucket v0.0.0-20131201223612-c5a927568de7 h1:r+EmXjfPosKO4wfiMLe1XQictsIlhErTufbWUsjOTZs=
github.com/ChimeraCoder/tokenbucket v0.0.0-20131201223612-c5a927568de7/go.mod h1:b2EuEMLSG9q3bZ95ql1+8oVqzzrTNSiOQqSXWFBzxeI=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee h1:atI/FFjXh6hIVlPE1Jup9m8N4B9q/OSbMUe2EBahs+w=
github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0TUYrFEIAE5uGJ29LQOeONIgMdP4Rkqb8HUnPM=
github.com/Microsoft/ApplicationInsights-Go v0.4.2 h1:HIZoGXMiKNwAtMAgCSSX35j9mP+DjGF9ezfBvxMDLLg=
github.com/Microsoft/ApplicationInsights-Go v0.4.2/go.mod h1:CukZ/G66zxXtI+h/VcVn3eVVDGDHfXM2zVILF7bMmsg=
github.com/Microsoft/go-winio v0.4.9 h1:3RbgqgGVqmcpbOiwrjbVtDHLlJBGF6aE+yHmNtBNsFQ=
github.com/Microsoft/go-winio v0.4.9/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/PuerkitoBio/goquery v1.5.0 h1:uGvmFXOA73IKluu/F84Xd1tt/z07GYm8X49XKHP7EJk=
github.com/PuerkitoBio/goquery v1.5.0/go.mod h1:qD2PgZ9lccMbQlc7eEOjaeRlFQON7xY8kdmcsrnKqMg=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/sarama v1.24.1 h1:svn9vfN3R1Hz21WR2Gj0VW9ehaDGkiOS+VqlIcZOkMI=
github.com/Shopify/sarama v1.24.1/go.mod h1:fGP8eQ6PugKEI0iUETYYtnP6d1pH/bdDMTel1X5ajsU=
github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/aerospike/aerospike-client-go v1.27.0 h1:VC6/Wqqm3Qlp4/utM7Zts3cv4A2HPn8rVFp/XZKTWgE=
github.com/aerospike/aerospike-client-go v1.27.0/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ=
github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc=
github.com/andybalholm/cascadia v1.0.0 h1:hOCXnnZ5A+3eVDX8pvgl4kofXv2ELss0bKcqRySc45o=
github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 h1:Bmjk+DjIi3tTAU0wxGaFbfjGUqlxxSXARq9A96Kgoos=
github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm+qXFKs/xjSoWn30NrWBBvdTTQq+UjkhjEJHfSFA=
github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 h1:FD4/ikKOFxwP8muWDypbmBWc634+YcAs3eBrYAmRdZY=
github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU=
github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs=
github.com/aws/aws-sdk-go v1.30.9 h1:DntpBUKkchINPDbhEzDRin1eEn1TG9TZFlzWPf0i8to=
github.com/aws/aws-sdk-go v1.30.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/azr/backoff v0.0.0-20160115115103-53511d3c7330 h1:ekDALXAVvY/Ub1UtNta3inKQwZ/jMB/zpOtD8rAYh78=
github.com/azr/backoff v0.0.0-20160115115103-53511d3c7330/go.mod h1:nH+k0SvAt3HeiYyOlJpLLv1HG1p7KWP7qU9QPp2/pCo=
github.com/benbjohnson/clock v1.0.2 h1:Z0CN0Yb4ig9sGPXkvAQcGJfnrrMQ5QYLCMPRi9iD7YE=
github.com/benbjohnson/clock v1.0.2/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0=
github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/caio/go-tdigest v2.3.0+incompatible h1:zP6nR0nTSUzlSqqr7F/LhslPlSZX/fZeGmgmwj2cxxY=
github.com/caio/go-tdigest v2.3.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI=
github.com/cenkalti/backoff v2.0.0+incompatible h1:5IIPUHhlnUZbcHQsQou5k1Tn58nJkeJL9U+ig5CHJbY=
github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 h1:57RI0wFkG/smvVTcz7F43+R0k+Hvci3jAVQF9lyMoOo=
github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6/go.mod h1:ugEfq4B8T8ciw/h5mCkgdiDRFS4CkqqhH2dymDB4knc=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA=
github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 h1:Dbz60fpCq04vRxVVVJLbQuL0G7pRt0Gyo2BkozFc4SQ=
github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U=
github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 h1:F8nmbiuX+gCz9xvWMi6Ak8HQntB4ATFXP46gaxifbp4=
github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c=
github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a h1:Y5XsLCEhtEI8qbD9RP3Qlv5FXdTDHxZM9UPUnMRgBp8=
github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs=
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4 h1:YcpmyvADGYw5LqMnHqSkyIELsHCGF6PkrmM31V8rF7o=
github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM=
github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA=
github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible h1:357nGVUC8gSpeSc2Axup8HfrfTLLUfWfCsCUhiQSKIg=
github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20180327123150-ed7b6428c133 h1:Kus8nU6ctI/u/l86ljUJl6GpUtmO7gtD/krn4u5dr0M=
github.com/docker/docker v1.4.2-0.20180327123150-ed7b6428c133/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.3.0 h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF+n1M6o=
github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166 h1:KgEcrKF0NWi9GT/OvDp9ioXZIrHRbP8S5o+sot9gznQ=
github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/dustin/go-jsonpointer v0.0.0-20160814072949-ba0abeacc3dc h1:tP7tkU+vIsEOKiK+l/NSLN4uUtkyuxc6hgYpQeCWAeI=
github.com/dustin/go-jsonpointer v0.0.0-20160814072949-ba0abeacc3dc/go.mod h1:ORH5Qp2bskd9NzSfKqAF7tKfONsEkCarTE5ESr/RVBw=
github.com/dustin/gojson v0.0.0-20160307161227-2e71ec9dd5ad h1:Qk76DOWdOp+GlyDKBAG3Klr9cn7N+LcYc82AZ2S7+cA=
github.com/dustin/gojson v0.0.0-20160307161227-2e71ec9dd5ad/go.mod h1:mPKfmRa823oBIgl2r20LeMSpTAteW5j7FLkc0vjmzyQ=
github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0=
github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ericchiang/k8s v1.2.0 h1:vxrMwEzY43oxu8aZyD/7b1s8tsBM+xoUoxjWECWFbPI=
github.com/ericchiang/k8s v1.2.0/go.mod h1:/OmBgSq2cd9IANnsGHGlEz27nwMZV2YxlpXuQtU3Bz4=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/frankban/quicktest v1.4.1 h1:Wv2VwvNn73pAdFIVUQRXYDFp31lXKbqblIXo/Q5GPSg=
github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/garyburd/go-oauth v0.0.0-20180319155456-bca2e7f09a17 h1:GOfMz6cRgTJ9jWV0qAezv642OhPnKEG7gtUjJSdStHE=
github.com/garyburd/go-oauth v0.0.0-20180319155456-bca2e7f09a17/go.mod h1:HfkOCN6fkKKaPSAeNq/er3xObxTW4VLeY6UUK895gLQ=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew=
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I=
github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96 h1:YpooqMW354GG47PXNBiaCv6yCQizyP3MXD9NUPrCEQ8=
github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96/go.mod h1:uY+1eqFUyotrQxF1wYFNtMeHp/swbYRsoGzfcPZ8x3o=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-redis/redis v6.12.0+incompatible h1:s+64XI+z/RXqGHz2fQSgRJOEwqqSXeX3dliF7iVkMbE=
github.com/go-redis/redis v6.12.0+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/goburrow/modbus v0.1.0 h1:DejRZY73nEM6+bt5JSP6IsFolJ9dVcqxsYbpLbeW/ro=
github.com/goburrow/modbus v0.1.0/go.mod h1:Kx552D5rLIS8E7TyUwQ/UdHEqvX5T8tyiGBTlzMcZBg=
github.com/goburrow/serial v0.1.0 h1:v2T1SQa/dlUqQiYIT8+Cu7YolfqAi3K96UmhwYyuSrA=
github.com/goburrow/serial v0.1.0/go.mod h1:sAiqG0nRVswsm1C97xsttiYCzSLBmUZ/VSlVLZJ8haA=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/gofrs/uuid v2.1.0+incompatible h1:8oEj3gioPmmDAOLQUZdnW+h4FZu9aSE/SQIas1E9pzA=
github.com/gofrs/uuid v2.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE=
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw=
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI=
github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 h1:U0KvGD9CJIl1nbgu9yLsfWxMT6WqL8fG0IBB7RvOZZQ=
github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0/go.mod h1:dk23l2BruuUzRP8wbybQbPn3J7sZga2QHICCeaEy5rQ=
github.com/hashicorp/consul v1.2.1 h1:66MuuTfV4aOXTQM7cjAIKUWFOITSk4XZlMhE09ymVbg=
github.com/hashicorp/consul v1.2.1/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 h1:VBj0QYQ0u2MCJzBfeYXGexnAl17GsH1yidnoxCqqD9E=
github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg=
github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/memberlist v0.1.5 h1:AYBsgJOW9gab/toO5tEB8lWetVgDKZycqkebJ8xxpqM=
github.com/hashicorp/memberlist v0.1.5/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.1 h1:mYs6SMzu72+90OcPa5wr3nfznA4Dw9UyR791ZFNOIf4=
github.com/hashicorp/serf v0.8.1/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/influxdata/go-syslog/v2 v2.0.1 h1:l44S4l4Q8MhGQcoOxJpbo+QQYxJqp0vdgIVHh4+DO0s=
github.com/influxdata/go-syslog/v2 v2.0.1/go.mod h1:hjvie1UTaD5E1fTnDmxaCw8RRDrT4Ve+XHr5O2dKSCo=
github.com/influxdata/tail v1.0.1-0.20180327235535-c43482518d41 h1:HxQo1NpNXQDpvEBzthbQLmePvTLFTa5GzSFUjL03aEs=
github.com/influxdata/tail v1.0.1-0.20180327235535-c43482518d41/go.mod h1:xTFF2SILpIYc5N+Srb0d5qpx7d+f733nBrbasb13DtQ=
github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 h1:vvyMtD5LTJc1W9sQKjDkAWdcg0478CszSdzlHtiAXCY=
github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP/bTpQItGZNNUMISDMDAnTXu9UqJ4yT3ocz8=
github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 h1:W2IgzRCb0L9VzMujq/QuTaZUKcH8096jWwP519mHN6Q=
github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8/go.mod h1:/2NMgWB1DHM1ti/gqhOlg+LJeBVk6FqR5aVGYY0hlwI=
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc=
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ=
github.com/jackc/pgx v3.6.0+incompatible h1:bJeo4JdVbDAW8KB2m8XkFeo8CPipREoG37BwEoKGz+Q=
github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=
github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7 h1:K//n/AqR5HjG3qxbrBCL4vJPW0MVFSs9CPK1OOJdRME=
github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0=
github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw=
github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4 h1:nwOc1YaOrYJ37sEBrtWZrdqzK22hiJs3GpDmP3sR2Yw=
github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ=
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/kardianos/service v1.0.0 h1:HgQS3mFfOlyntWX8Oke98JcJLqt1DBcHR4kxShpYef0=
github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo=
github.com/karrick/godirwalk v1.12.0 h1:nkS4xxsjiZMvVlazd0mFyiwD4BR9f3m6LXGhM2TUx3Y=
github.com/karrick/godirwalk v1.12.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.2 h1:LfVyl+ZlLlLDeQ/d2AqfGIIH4qEDu0Ed2S5GyhCWIWY=
github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee h1:MB75LRhfeLER2RF7neSVpYuX/lL8aPi3yPtv5vdOJmk=
github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee/go.mod h1:Pe/YBTPc3vqoMkbuIWPH8CF9ehINdvNyS0dP3J6HC0s=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 h1:X/79QL0b4YJVO5+OsPH9rF2u428CIrGL/jLmPsoOQQ4=
github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy+E1PZ3C9ctsPRlvOPAFPkCNlcPBDkt0N3U=
github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 h1:bCiVCRCs1Heq84lurVinUPy19keqGEe4jh5vtK37jcg=
github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg=
github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU=
github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 h1:8/+Y8SKf0xCZ8cCTfnrMdY7HNzlEjPAt3bPjalNb6CA=
github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe h1:yMrL+YorbzaBpj/h3BbLMP+qeslPZYMbzcpHFBNy1Yk=
github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe/go.mod h1:y3mw3VG+t0m20OMqpG8RQqw8cDXvShVb+L8Z8FEnebw=
github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0=
github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc=
github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA=
github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M=
github.com/mdlayher/netlink v1.1.0 h1:mpdLgm+brq10nI9zM1BpX1kpDbh3NLl3RSnVq6ZSkfg=
github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY=
github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws=
github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mmcdole/gofeed v1.0.0 h1:PHqwr8fsEm8xarj9s53XeEAFYhRM3E9Ib7Ie766/LTE=
github.com/mmcdole/gofeed v1.0.0/go.mod h1:tkVcyzS3qVMlQrQxJoEH1hkTiuo9a8emDzkMi7TZBu0=
github.com/mmcdole/goxpp v0.0.0-20181012175147-0068e33feabf h1:sWGE2v+hO0Nd4yFU/S/mDBM5plIU8v/Qhfz41hkDIAI=
github.com/mmcdole/goxpp v0.0.0-20181012175147-0068e33feabf/go.mod h1:pasqhqstspkosTneA62Nc+2p9SOBBYAPbnmRRWPQ0V8=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/multiplay/go-ts3 v1.0.0 h1:loxtEFqvYtpoGh1jOqEt6aDzctYuQsi3vb3dMpvWiWw=
github.com/multiplay/go-ts3 v1.0.0/go.mod h1:14S6cS3fLNT3xOytrA/DkRyAFNuQLMLEqOYAsf87IbQ=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks=
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
github.com/nats-io/jwt v0.3.2 h1:+RB5hMpXUUA2dfxuhBTEkMOrYmM+gKIZYS1KjSostMI=
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
github.com/nats-io/nats-server/v2 v2.1.4 h1:BILRnsJ2Yb/fefiFbBWADpViGF69uh4sxe8poVDQ06g=
github.com/nats-io/nats-server/v2 v2.1.4/go.mod h1:Jw1Z28soD/QasIA2uWjXyM9El1jly3YwyFOuR8tH1rg=
github.com/nats-io/nats.go v1.9.1 h1:ik3HbLhZ0YABLto7iX80pZLPw/6dx3T+++MZJwLnMrQ=
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.1.3 h1:6JrEfig+HzTH85yxzhSVbjHRJv9cn0p6n3IngIcM5/k=
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0 h1:W8+lNIfAldCScGiikToSprbf3DCaMXk0VIM9l73BIpY=
github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0/go.mod h1:G9MqE/cHGv3Hx3qpYhfuyFUsGx2DpVcGi1iJIqTg+JQ=
github.com/nsqio/go-nsq v1.0.7 h1:O0pIZJYTf+x7cZBA0UMY8WxFG79lYTURmWzAAh48ljY=
github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 h1:lXQqyLroROhwR2Yq/kXbLzVecgmVeZh2TFLg6OxCd+w=
github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU=
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/openzipkin/zipkin-go-opentracing v0.3.4 h1:x/pBv/5VJNWkcHF1G9xqhug8Iw7X1y1zOMzDmyuvP2g=
github.com/openzipkin/zipkin-go-opentracing v0.3.4/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw=
github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA=
github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK6/oHqxCHD5TEYfsWNBGidOeG6kvpk=
github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec h1:6ncX5ko6B9LntYM0YBRXkiSaZMmLYeZ/NWcmeB43mMY=
github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/shirou/gopsutil v2.20.2+incompatible h1:ucK79BhBpgqQxPASyS2cu9HX8cfDVljBN1WWFvbNvgY=
github.com/shirou/gopsutil v2.20.2+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A=
github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/soniah/gosnmp v1.25.0 h1:0y8vpjD07NPmnT+wojnUrKkYLX9Fxw1jI4cGTumWugQ=
github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5QeUzVuQ=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 h1:l6epF6yBwuejBfhGkM5m8VSNM/QAm7ApGyH35ehA7eQ=
github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8/go.mod h1:1WNBiOZtZQLpVAyu0iTduoJL9hEsMloAK5XWrtW0xdY=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOsk3ij21QjjEgAcVSeo9nkp0dI//cD2o=
github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw=
github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 h1:mujcChM89zOHwgZBBNr5WZ77mBXP1yR+gLThGCYZgAg=
github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0=
github.com/tidwall/gjson v1.3.0 h1:kfpsw1W3trbg4Xm6doUtqSl9+LhLB6qJ9PkltVAQZYs=
github.com/tidwall/gjson v1.3.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls=
github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=
github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e h1:f1yevOHP+Suqk0rVc13fIkzcLULJbyQcXDba2klljD0=
github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc h1:R83G5ikgLMxrBvLh22JhdfI8K6YXEPHx5P03Uu3DRs4=
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
github.com/vjeantet/grok v1.0.0 h1:uxMqatJP6MOFXsj6C1tZBnqqAThQEeqnizUZ48gSJQQ=
github.com/vjeantet/grok v1.0.0/go.mod h1:/FWYEVYekkm+2VjcFmO9PufDU5FgXHUz9oy2EGqmQBo=
github.com/vmware/govmomi v0.19.0 h1:CR6tEByWCPOnRoRyhLzuHaU+6o2ybF3qufNRWS/MGrY=
github.com/vmware/govmomi v0.19.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU=
github.com/wavefronthq/wavefront-sdk-go v0.9.2 h1:/LvWgZYNjHFUg+ZUX+qv+7e+M8sEMi0lM15zPp681Gk=
github.com/wavefronthq/wavefront-sdk-go v0.9.2/go.mod h1:hQI6y8M9OtTCtc0xdwh+dCER4osxXdEAeCpacjpDZEU=
github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf h1:TOV5PC6fIWwFOFra9xJfRXZcL2pLhMI8oNuDugNxg9Q=
github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf/go.mod h1:nxx7XRXbR9ykhnC8lXqQyJS0rfvJGxKyKw/sT1YOttg=
github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a h1:ILoU84rj4AQ3q6cjQvtb9jBjx4xzR/Riq/zYhmDQiOk=
github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a/go.mod h1:vQQATAGxVK20DC1rRubTJbZDDhhpA4QfU02pMdPxGO4=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 h1:f6CCNiTjQZ0uWK4jPwhwYB8QIGGfn0ssD9kVzRUUUpk=
github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU=
go.opencensus.io v0.20.1 h1:pMEjRZ1M4ebWGikflH7nQpV6+Zr88KBMA2XJD3sbijw=
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 h1:+ELyKg6m8UBf0nPFSqD0mi7zUfwPyXo23HNjMnXPz7w=
golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2 h1:y102fOLFqhV41b+4GPiJoa0k/x+pJcEi2/HB1Y5T6fU=
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200317043434-63da46f3035e h1:8ogAbHWoJTPepnVbNRqXLOpzMkl0rtRsM7crbflc4XM=
golang.org/x/tools v0.0.0-20200317043434-63da46f3035e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.zx2c4.com/wireguard v0.0.20200121 h1:vcswa5Q6f+sylDfjqyrVNNrjsFUUbPsgAQTBCAg/Qf8=
golang.zx2c4.com/wireguard v0.0.20200121/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49/HlU7DGuelXsU4=
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 h1:KTi97NIQGgSMaN0v/oxniJV0MEzfzmrDUOAWxombQVc=
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4/go.mod h1:UdS9frhv65KTfwxME1xE8+rHYoFpbm36gOud1GhBe9c=
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.6.2 h1:4r+yNT0+8SWcOkXP+63H2zQbN+USnC73cjGUxnDF94Q=
gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8=
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo=
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24 h1:IGPykv426z7LZSVPlaPufOyphngM4at5uZ7x5alaFvE=
google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fatih/pool.v2 v2.0.0 h1:xIFeWtxifuQJGk/IEPKsTduEKcKvPmhoiVDGpC40nKg=
gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gorethink/gorethink.v3 v3.0.5 h1:e2Uc/Xe+hpcVQFsj6MuHlYog3r0JYpnTzwDj/y2O4MU=
gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDfZdqTlK3Y9q8I=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=
gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM=
gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI=
gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
gopkg.in/jcmturner/gokrb5.v7 v7.3.0 h1:0709Jtq/6QXEuWRfAm260XqlpcwL1vxtO1tUE2qK8Z4=
gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=
gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
gopkg.in/ldap.v3 v3.1.0 h1:DIDWEjI7vQWREh0S8X5/NFPCZ3MCVd55LmXKPW4XLGE=
gopkg.in/ldap.v3 v3.1.0/go.mod h1:dQjCc0R0kfyFjIlWNMH1DORwUASZyDxo2Ry1B51dXaQ=
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU=
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
gopkg.in/olivere/elastic.v5 v5.0.70 h1:DqFG2Odzs74JCz6SssgJjd6qpGnsOAzNc7+l5EnvsnE=
gopkg.in/olivere/elastic.v5 v5.0.70/go.mod h1:FylZT6jQWtfHsicejzOm3jIMVPOAksa80i3o+6qtQRk=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/apimachinery v0.17.1 h1:zUjS3szTxoUjTDYNvdFkYt2uMEXLcthcbp+7uZvWhYM=
k8s.io/apimachinery v0.17.1/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=

View File

@ -1,7 +1,11 @@
package telegraf
type Input interface {
PluginDescriber
// SampleConfig returns the default configuration of the Input
SampleConfig() string
// Description returns a one-sentence description on the Input
Description() string
// Gather takes in an accumulator and adds the metrics that the Input
// gathers. This is called every "interval"
@ -9,10 +13,17 @@ type Input interface {
}
type ServiceInput interface {
Input
// SampleConfig returns the default configuration of the Input
SampleConfig() string
// Start the ServiceInput. The Accumulator may be retained and used until
// Stop returns.
// Description returns a one-sentence description on the Input
Description() string
// Gather takes in an accumulator and adds the metrics that the Input
// gathers. This is called every "interval"
Gather(Accumulator) error
// Start starts the ServiceInput's service, whatever that may be
Start(Accumulator) error
// Stop stops the services and closes any necessary channels and connections

76
internal/buffer/buffer.go Normal file
View File

@ -0,0 +1,76 @@
package buffer
import (
"sync"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/selfstat"
)
var (
MetricsWritten = selfstat.Register("agent", "metrics_written", map[string]string{})
MetricsDropped = selfstat.Register("agent", "metrics_dropped", map[string]string{})
)
// Buffer is an object for storing metrics in a circular buffer.
type Buffer struct {
buf chan telegraf.Metric
mu sync.Mutex
}
// NewBuffer returns a Buffer
// size is the maximum number of metrics that Buffer will cache. If Add is
// called when the buffer is full, then the oldest metric(s) will be dropped.
func NewBuffer(size int) *Buffer {
return &Buffer{
buf: make(chan telegraf.Metric, size),
}
}
// IsEmpty returns true if Buffer is empty.
func (b *Buffer) IsEmpty() bool {
return len(b.buf) == 0
}
// Len returns the current length of the buffer.
func (b *Buffer) Len() int {
return len(b.buf)
}
// Add adds metrics to the buffer.
func (b *Buffer) Add(metrics ...telegraf.Metric) {
for i, _ := range metrics {
MetricsWritten.Incr(1)
select {
case b.buf <- metrics[i]:
default:
b.mu.Lock()
MetricsDropped.Incr(1)
<-b.buf
b.buf <- metrics[i]
b.mu.Unlock()
}
}
}
// Batch returns a batch of metrics of size batchSize.
// the batch will be of maximum length batchSize. It can be less than batchSize,
// if the length of Buffer is less than batchSize.
func (b *Buffer) Batch(batchSize int) []telegraf.Metric {
b.mu.Lock()
n := min(len(b.buf), batchSize)
out := make([]telegraf.Metric, n)
for i := 0; i < n; i++ {
out[i] = <-b.buf
}
b.mu.Unlock()
return out
}
func min(a, b int) int {
if b < a {
return b
}
return a
}

View File

@ -0,0 +1,100 @@
package buffer
import (
"testing"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
var metricList = []telegraf.Metric{
testutil.TestMetric(2, "mymetric1"),
testutil.TestMetric(1, "mymetric2"),
testutil.TestMetric(11, "mymetric3"),
testutil.TestMetric(15, "mymetric4"),
testutil.TestMetric(8, "mymetric5"),
}
func BenchmarkAddMetrics(b *testing.B) {
buf := NewBuffer(10000)
m := testutil.TestMetric(1, "mymetric")
for n := 0; n < b.N; n++ {
buf.Add(m)
}
}
func TestNewBufferBasicFuncs(t *testing.T) {
b := NewBuffer(10)
MetricsDropped.Set(0)
MetricsWritten.Set(0)
assert.True(t, b.IsEmpty())
assert.Zero(t, b.Len())
assert.Zero(t, MetricsDropped.Get())
assert.Zero(t, MetricsWritten.Get())
m := testutil.TestMetric(1, "mymetric")
b.Add(m)
assert.False(t, b.IsEmpty())
assert.Equal(t, b.Len(), 1)
assert.Equal(t, int64(0), MetricsDropped.Get())
assert.Equal(t, int64(1), MetricsWritten.Get())
b.Add(metricList...)
assert.False(t, b.IsEmpty())
assert.Equal(t, b.Len(), 6)
assert.Equal(t, int64(0), MetricsDropped.Get())
assert.Equal(t, int64(6), MetricsWritten.Get())
}
func TestDroppingMetrics(t *testing.T) {
b := NewBuffer(10)
MetricsDropped.Set(0)
MetricsWritten.Set(0)
// Add up to the size of the buffer
b.Add(metricList...)
b.Add(metricList...)
assert.False(t, b.IsEmpty())
assert.Equal(t, b.Len(), 10)
assert.Equal(t, int64(0), MetricsDropped.Get())
assert.Equal(t, int64(10), MetricsWritten.Get())
// Add 5 more and verify they were dropped
b.Add(metricList...)
assert.False(t, b.IsEmpty())
assert.Equal(t, b.Len(), 10)
assert.Equal(t, int64(5), MetricsDropped.Get())
assert.Equal(t, int64(15), MetricsWritten.Get())
}
func TestGettingBatches(t *testing.T) {
b := NewBuffer(20)
MetricsDropped.Set(0)
MetricsWritten.Set(0)
// Verify that the buffer returned is smaller than requested when there are
// not as many items as requested.
b.Add(metricList...)
batch := b.Batch(10)
assert.Len(t, batch, 5)
// Verify that the buffer is now empty
assert.True(t, b.IsEmpty())
assert.Zero(t, b.Len())
assert.Zero(t, MetricsDropped.Get())
assert.Equal(t, int64(5), MetricsWritten.Get())
// Verify that the buffer returned is not more than the size requested
b.Add(metricList...)
batch = b.Batch(3)
assert.Len(t, batch, 3)
// Verify that buffer is not empty
assert.False(t, b.IsEmpty())
assert.Equal(t, b.Len(), 2)
assert.Equal(t, int64(0), MetricsDropped.Get())
assert.Equal(t, int64(10), MetricsWritten.Get())
}

View File

@ -1,36 +0,0 @@
// Package choice provides basic functions for working with
// plugin options that must be one of several values.
package choice
import "fmt"
// Contains return true if the choice in the list of choices.
func Contains(choice string, choices []string) bool {
for _, item := range choices {
if item == choice {
return true
}
}
return false
}
// CheckSContains returns an error if a choice is not one of
// the available choices.
func Check(choice string, available []string) error {
if !Contains(choice, available) {
return fmt.Errorf("unknown choice %s", choice)
}
return nil
}
// CheckSliceContains returns an error if the choices is not a subset of
// available.
func CheckSlice(choices, available []string) error {
for _, choice := range choices {
err := Check(choice, available)
if err != nil {
return err
}
}
return nil
}

View File

@ -9,14 +9,13 @@ import (
)
type CredentialConfig struct {
Region string
AccessKey string
SecretKey string
RoleARN string
Profile string
Filename string
Token string
EndpointURL string
Region string
AccessKey string
SecretKey string
RoleARN string
Profile string
Filename string
Token string
}
func (c *CredentialConfig) Credentials() client.ConfigProvider {
@ -29,8 +28,7 @@ func (c *CredentialConfig) Credentials() client.ConfigProvider {
func (c *CredentialConfig) rootCredentials() client.ConfigProvider {
config := &aws.Config{
Region: aws.String(c.Region),
Endpoint: &c.EndpointURL,
Region: aws.String(c.Region),
}
if c.AccessKey != "" || c.SecretKey != "" {
config.Credentials = credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token)
@ -44,8 +42,7 @@ func (c *CredentialConfig) rootCredentials() client.ConfigProvider {
func (c *CredentialConfig) assumeCredentials() client.ConfigProvider {
rootCredentials := c.rootCredentials()
config := &aws.Config{
Region: aws.String(c.Region),
Endpoint: &c.EndpointURL,
Region: aws.String(c.Region),
}
config.Credentials = stscreds.NewCredentials(rootCredentials, c.RoleARN)
return session.New(config)

File diff suppressed because it is too large Load Diff

View File

@ -5,17 +5,14 @@ import (
"testing"
"time"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/models"
"github.com/influxdata/telegraf/internal/models"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/inputs/exec"
"github.com/influxdata/telegraf/plugins/inputs/http_listener_v2"
"github.com/influxdata/telegraf/plugins/inputs/memcached"
"github.com/influxdata/telegraf/plugins/inputs/procstat"
httpOut "github.com/influxdata/telegraf/plugins/outputs/http"
"github.com/influxdata/telegraf/plugins/parsers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) {
@ -31,17 +28,17 @@ func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) {
filter := models.Filter{
NameDrop: []string{"metricname2"},
NamePass: []string{"metricname1", "ip_192.168.1.1_name"},
NamePass: []string{"metricname1"},
FieldDrop: []string{"other", "stuff"},
FieldPass: []string{"some", "strings"},
TagDrop: []models.TagFilter{
{
models.TagFilter{
Name: "badtag",
Filter: []string{"othertag"},
},
},
TagPass: []models.TagFilter{
{
models.TagFilter{
Name: "goodtag",
Filter: []string{"mytag"},
},
@ -74,13 +71,13 @@ func TestConfig_LoadSingleInput(t *testing.T) {
FieldDrop: []string{"other", "stuff"},
FieldPass: []string{"some", "strings"},
TagDrop: []models.TagFilter{
{
models.TagFilter{
Name: "badtag",
Filter: []string{"othertag"},
},
},
TagPass: []models.TagFilter{
{
models.TagFilter{
Name: "goodtag",
Filter: []string{"mytag"},
},
@ -120,13 +117,13 @@ func TestConfig_LoadDirectory(t *testing.T) {
FieldDrop: []string{"other", "stuff"},
FieldPass: []string{"some", "strings"},
TagDrop: []models.TagFilter{
{
models.TagFilter{
Name: "badtag",
Filter: []string{"othertag"},
},
},
TagPass: []models.TagFilter{
{
models.TagFilter{
Name: "goodtag",
Filter: []string{"mytag"},
},
@ -146,11 +143,7 @@ func TestConfig_LoadDirectory(t *testing.T) {
"Testdata did not produce correct memcached metadata.")
ex := inputs.Inputs["exec"]().(*exec.Exec)
p, err := parsers.NewParser(&parsers.Config{
MetricName: "exec",
DataFormat: "json",
JSONStrict: true,
})
p, err := parsers.NewJSONParser("exec", nil, nil)
assert.NoError(t, err)
ex.SetParser(p)
ex.Command = "/usr/bin/myothercollector --foo=bar"
@ -159,11 +152,6 @@ func TestConfig_LoadDirectory(t *testing.T) {
MeasurementSuffix: "_myothercollector",
}
eConfig.Tags = make(map[string]string)
exec := c.Inputs[1].Input.(*exec.Exec)
require.NotNil(t, exec.Log)
exec.Log = nil
assert.Equal(t, ex, c.Inputs[1].Input,
"Merged Testdata did not produce a correct exec struct.")
assert.Equal(t, eConfig, c.Inputs[1].Config,
@ -186,74 +174,3 @@ func TestConfig_LoadDirectory(t *testing.T) {
assert.Equal(t, pConfig, c.Inputs[3].Config,
"Merged Testdata did not produce correct procstat metadata.")
}
func TestConfig_LoadSpecialTypes(t *testing.T) {
c := NewConfig()
err := c.LoadConfig("./testdata/special_types.toml")
assert.NoError(t, err)
require.Equal(t, 1, len(c.Inputs))
inputHTTPListener, ok := c.Inputs[0].Input.(*http_listener_v2.HTTPListenerV2)
assert.Equal(t, true, ok)
// Tests telegraf duration parsing.
assert.Equal(t, internal.Duration{Duration: time.Second}, inputHTTPListener.WriteTimeout)
// Tests telegraf size parsing.
assert.Equal(t, internal.Size{Size: 1024 * 1024}, inputHTTPListener.MaxBodySize)
// Tests toml multiline basic strings.
assert.Equal(t, "/path/to/my/cert\n", inputHTTPListener.TLSCert)
}
func TestConfig_FieldNotDefined(t *testing.T) {
c := NewConfig()
err := c.LoadConfig("./testdata/invalid_field.toml")
require.Error(t, err, "invalid field name")
assert.Equal(t, "Error loading config file ./testdata/invalid_field.toml: Error parsing http_listener_v2, line 2: field corresponding to `not_a_field' is not defined in http_listener_v2.HTTPListenerV2", err.Error())
}
func TestConfig_WrongFieldType(t *testing.T) {
c := NewConfig()
err := c.LoadConfig("./testdata/wrong_field_type.toml")
require.Error(t, err, "invalid field type")
assert.Equal(t, "Error loading config file ./testdata/wrong_field_type.toml: Error parsing http_listener_v2, line 2: (http_listener_v2.HTTPListenerV2.Port) cannot unmarshal TOML string into int", err.Error())
c = NewConfig()
err = c.LoadConfig("./testdata/wrong_field_type2.toml")
require.Error(t, err, "invalid field type2")
assert.Equal(t, "Error loading config file ./testdata/wrong_field_type2.toml: Error parsing http_listener_v2, line 2: (http_listener_v2.HTTPListenerV2.Methods) cannot unmarshal TOML string into []string", err.Error())
}
func TestConfig_InlineTables(t *testing.T) {
// #4098
c := NewConfig()
err := c.LoadConfig("./testdata/inline_table.toml")
assert.NoError(t, err)
require.Equal(t, 2, len(c.Outputs))
outputHTTP, ok := c.Outputs[1].Output.(*httpOut.HTTP)
assert.Equal(t, true, ok)
assert.Equal(t, map[string]string{"Authorization": "Token $TOKEN", "Content-Type": "application/json"}, outputHTTP.Headers)
assert.Equal(t, []string{"org_id"}, c.Outputs[0].Config.Filter.TagInclude)
}
func TestConfig_SliceComment(t *testing.T) {
t.Skipf("Skipping until #3642 is resolved")
c := NewConfig()
err := c.LoadConfig("./testdata/slice_comment.toml")
assert.NoError(t, err)
require.Equal(t, 1, len(c.Outputs))
outputHTTP, ok := c.Outputs[0].Output.(*httpOut.HTTP)
assert.Equal(t, []string{"test"}, outputHTTP.Scopes)
assert.Equal(t, true, ok)
}
func TestConfig_BadOrdering(t *testing.T) {
// #3444: when not using inline tables, care has to be taken so subsequent configuration
// doesn't become part of the table. This is not a bug, but TOML syntax.
c := NewConfig()
err := c.LoadConfig("./testdata/non_slice_slice.toml")
require.Error(t, err, "bad ordering")
assert.Equal(t, "Error loading config file ./testdata/non_slice_slice.toml: Error parsing http array, line 4: cannot unmarshal TOML array into string (need slice)", err.Error())
}

View File

@ -1,6 +1,6 @@
[[inputs.memcached]]
servers = ["$MY_TEST_SERVER"]
namepass = ["metricname1", "ip_${MY_TEST_SERVER}_name"]
namepass = ["metricname1"]
namedrop = ["metricname2"]
fieldpass = ["some", "strings"]
fielddrop = ["other", "stuff"]

View File

@ -0,0 +1,4 @@
# This invalid config file should be skipped during testing
# as it is an ..data folder
[[outputs.influxdb

View File

@ -256,7 +256,7 @@
# specify address via a url matching:
# postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
# or a simple string:
# host=localhost user=pqgotest password=... sslmode=... dbname=app_production
# host=localhost user=pqotest password=... sslmode=... dbname=app_production
#
# All connection parameters are optional. By default, the host is localhost
# and the user is the currently running user. For localhost, we default

View File

@ -1,182 +0,0 @@
package internal
import (
"bufio"
"bytes"
"compress/gzip"
"errors"
"io"
)
// NewStreamContentDecoder returns a reader that will decode the stream
// according to the encoding type.
func NewStreamContentDecoder(encoding string, r io.Reader) (io.Reader, error) {
switch encoding {
case "gzip":
return NewGzipReader(r)
case "identity", "":
return r, nil
default:
return nil, errors.New("invalid value for content_encoding")
}
}
// GzipReader is similar to gzip.Reader but reads only a single gzip stream per read.
type GzipReader struct {
r io.Reader
z *gzip.Reader
endOfStream bool
}
func NewGzipReader(r io.Reader) (io.Reader, error) {
// We need a read that implements ByteReader in order to line up the next
// stream.
br := bufio.NewReader(r)
// Reads the first gzip stream header.
z, err := gzip.NewReader(br)
if err != nil {
return nil, err
}
// Prevent future calls to Read from reading the following gzip header.
z.Multistream(false)
return &GzipReader{r: br, z: z}, nil
}
func (r *GzipReader) Read(b []byte) (int, error) {
if r.endOfStream {
// Reads the next gzip header and prepares for the next stream.
err := r.z.Reset(r.r)
if err != nil {
return 0, err
}
r.z.Multistream(false)
r.endOfStream = false
}
n, err := r.z.Read(b)
// Since multistream is disabled, io.EOF indicates the end of the gzip
// sequence. On the next read we must read the next gzip header.
if err == io.EOF {
r.endOfStream = true
return n, nil
}
return n, err
}
// NewContentEncoder returns a ContentEncoder for the encoding type.
func NewContentEncoder(encoding string) (ContentEncoder, error) {
switch encoding {
case "gzip":
return NewGzipEncoder()
case "identity", "":
return NewIdentityEncoder(), nil
default:
return nil, errors.New("invalid value for content_encoding")
}
}
// NewContentDecoder returns a ContentDecoder for the encoding type.
func NewContentDecoder(encoding string) (ContentDecoder, error) {
switch encoding {
case "gzip":
return NewGzipDecoder()
case "identity", "":
return NewIdentityDecoder(), nil
default:
return nil, errors.New("invalid value for content_encoding")
}
}
// ContentEncoder applies a wrapper encoding to byte buffers.
type ContentEncoder interface {
Encode([]byte) ([]byte, error)
}
// GzipEncoder compresses the buffer using gzip at the default level.
type GzipEncoder struct {
writer *gzip.Writer
buf *bytes.Buffer
}
func NewGzipEncoder() (*GzipEncoder, error) {
var buf bytes.Buffer
return &GzipEncoder{
writer: gzip.NewWriter(&buf),
buf: &buf,
}, nil
}
func (e *GzipEncoder) Encode(data []byte) ([]byte, error) {
e.buf.Reset()
e.writer.Reset(e.buf)
_, err := e.writer.Write(data)
if err != nil {
return nil, err
}
err = e.writer.Close()
if err != nil {
return nil, err
}
return e.buf.Bytes(), nil
}
// IdentityEncoder is a null encoder that applies no transformation.
type IdentityEncoder struct{}
func NewIdentityEncoder() *IdentityEncoder {
return &IdentityEncoder{}
}
func (*IdentityEncoder) Encode(data []byte) ([]byte, error) {
return data, nil
}
// ContentDecoder removes a wrapper encoding from byte buffers.
type ContentDecoder interface {
Decode([]byte) ([]byte, error)
}
// GzipDecoder decompresses buffers with gzip compression.
type GzipDecoder struct {
reader *gzip.Reader
buf *bytes.Buffer
}
func NewGzipDecoder() (*GzipDecoder, error) {
return &GzipDecoder{
reader: new(gzip.Reader),
buf: new(bytes.Buffer),
}, nil
}
func (d *GzipDecoder) Decode(data []byte) ([]byte, error) {
d.reader.Reset(bytes.NewBuffer(data))
d.buf.Reset()
_, err := d.buf.ReadFrom(d.reader)
if err != nil && err != io.EOF {
return nil, err
}
err = d.reader.Close()
if err != nil {
return nil, err
}
return d.buf.Bytes(), nil
}
// IdentityDecoder is a null decoder that returns the input.
type IdentityDecoder struct{}
func NewIdentityDecoder() *IdentityDecoder {
return &IdentityDecoder{}
}
func (*IdentityDecoder) Decode(data []byte) ([]byte, error) {
return data, nil
}

View File

@ -1,94 +0,0 @@
package internal
import (
"bytes"
"io/ioutil"
"testing"
"github.com/stretchr/testify/require"
)
func TestGzipEncodeDecode(t *testing.T) {
enc, err := NewGzipEncoder()
require.NoError(t, err)
dec, err := NewGzipDecoder()
require.NoError(t, err)
payload, err := enc.Encode([]byte("howdy"))
require.NoError(t, err)
actual, err := dec.Decode(payload)
require.NoError(t, err)
require.Equal(t, "howdy", string(actual))
}
func TestGzipReuse(t *testing.T) {
enc, err := NewGzipEncoder()
require.NoError(t, err)
dec, err := NewGzipDecoder()
require.NoError(t, err)
payload, err := enc.Encode([]byte("howdy"))
require.NoError(t, err)
actual, err := dec.Decode(payload)
require.NoError(t, err)
require.Equal(t, "howdy", string(actual))
payload, err = enc.Encode([]byte("doody"))
require.NoError(t, err)
actual, err = dec.Decode(payload)
require.NoError(t, err)
require.Equal(t, "doody", string(actual))
}
func TestIdentityEncodeDecode(t *testing.T) {
enc := NewIdentityEncoder()
dec := NewIdentityDecoder()
payload, err := enc.Encode([]byte("howdy"))
require.NoError(t, err)
actual, err := dec.Decode(payload)
require.NoError(t, err)
require.Equal(t, "howdy", string(actual))
}
func TestStreamIdentityDecode(t *testing.T) {
var r bytes.Buffer
n, err := r.Write([]byte("howdy"))
require.NoError(t, err)
require.Equal(t, 5, n)
dec, err := NewStreamContentDecoder("identity", &r)
require.NoError(t, err)
data, err := ioutil.ReadAll(dec)
require.NoError(t, err)
require.Equal(t, []byte("howdy"), data)
}
func TestStreamGzipDecode(t *testing.T) {
enc, err := NewGzipEncoder()
require.NoError(t, err)
written, err := enc.Encode([]byte("howdy"))
require.NoError(t, err)
w := bytes.NewBuffer(written)
dec, err := NewStreamContentDecoder("gzip", w)
require.NoError(t, err)
b := make([]byte, 10)
n, err := dec.Read(b)
require.NoError(t, err)
require.Equal(t, 5, n)
require.Equal(t, []byte("howdy"), b[:n])
}

View File

@ -1,36 +0,0 @@
package docker
import "strings"
// Adapts some of the logic from the actual Docker library's image parsing
// routines:
// https://github.com/docker/distribution/blob/release/2.7/reference/normalize.go
func ParseImage(image string) (string, string) {
domain := ""
remainder := ""
i := strings.IndexRune(image, '/')
if i == -1 || (!strings.ContainsAny(image[:i], ".:") && image[:i] != "localhost") {
remainder = image
} else {
domain, remainder = image[:i], image[i+1:]
}
imageName := ""
imageVersion := "unknown"
i = strings.LastIndex(remainder, ":")
if i > -1 {
imageVersion = remainder[i+1:]
imageName = remainder[:i]
} else {
imageName = remainder
}
if domain != "" {
imageName = domain + "/" + imageName
}
return imageName, imageVersion
}

View File

@ -1,59 +0,0 @@
package docker_test
import (
"testing"
"github.com/influxdata/telegraf/internal/docker"
"github.com/stretchr/testify/require"
)
func TestParseImage(t *testing.T) {
tests := []struct {
image string
parsedName string
parsedVersion string
}{
{
image: "postgres",
parsedName: "postgres",
parsedVersion: "unknown",
},
{
image: "postgres:latest",
parsedName: "postgres",
parsedVersion: "latest",
},
{
image: "coreos/etcd",
parsedName: "coreos/etcd",
parsedVersion: "unknown",
},
{
image: "coreos/etcd:latest",
parsedName: "coreos/etcd",
parsedVersion: "latest",
},
{
image: "quay.io/postgres",
parsedName: "quay.io/postgres",
parsedVersion: "unknown",
},
{
image: "quay.io:4443/coreos/etcd",
parsedName: "quay.io:4443/coreos/etcd",
parsedVersion: "unknown",
},
{
image: "quay.io:4443/coreos/etcd:latest",
parsedName: "quay.io:4443/coreos/etcd",
parsedVersion: "latest",
},
}
for _, tt := range tests {
t.Run("parse name "+tt.image, func(t *testing.T) {
imageName, imageVersion := docker.ParseImage(tt.image)
require.Equal(t, tt.parsedName, imageName)
require.Equal(t, tt.parsedVersion, imageVersion)
})
}
}

View File

@ -1,30 +0,0 @@
package internal
import (
"bytes"
"os/exec"
"time"
)
// CombinedOutputTimeout runs the given command with the given timeout and
// returns the combined output of stdout and stderr.
// If the command times out, it attempts to kill the process.
func CombinedOutputTimeout(c *exec.Cmd, timeout time.Duration) ([]byte, error) {
var b bytes.Buffer
c.Stdout = &b
c.Stderr = &b
if err := c.Start(); err != nil {
return nil, err
}
err := WaitTimeout(c, timeout)
return b.Bytes(), err
}
// RunTimeout runs the given command with the given timeout.
// If the command times out, it attempts to kill the process.
func RunTimeout(c *exec.Cmd, timeout time.Duration) error {
if err := c.Start(); err != nil {
return err
}
return WaitTimeout(c, timeout)
}

View File

@ -1,58 +0,0 @@
// +build !windows
package internal
import (
"log"
"os/exec"
"syscall"
"time"
)
// KillGrace is the amount of time we allow a process to shutdown before
// sending a SIGKILL.
const KillGrace = 5 * time.Second
// WaitTimeout waits for the given command to finish with a timeout.
// It assumes the command has already been started.
// If the command times out, it attempts to kill the process.
func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
var kill *time.Timer
term := time.AfterFunc(timeout, func() {
err := c.Process.Signal(syscall.SIGTERM)
if err != nil {
log.Printf("E! [agent] Error terminating process: %s", err)
return
}
kill = time.AfterFunc(KillGrace, func() {
err := c.Process.Kill()
if err != nil {
log.Printf("E! [agent] Error killing process: %s", err)
return
}
})
})
err := c.Wait()
// Shutdown all timers
if kill != nil {
kill.Stop()
}
termSent := !term.Stop()
// If the process exited without error treat it as success. This allows a
// process to do a clean shutdown on signal.
if err == nil {
return nil
}
// If SIGTERM was sent then treat any process error as a timeout.
if termSent {
return TimeoutErr
}
// Otherwise there was an error unrelated to termination.
return err
}

View File

@ -1,41 +0,0 @@
// +build windows
package internal
import (
"log"
"os/exec"
"time"
)
// WaitTimeout waits for the given command to finish with a timeout.
// It assumes the command has already been started.
// If the command times out, it attempts to kill the process.
func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
timer := time.AfterFunc(timeout, func() {
err := c.Process.Kill()
if err != nil {
log.Printf("E! [agent] Error killing process: %s", err)
return
}
})
err := c.Wait()
// Shutdown all timers
termSent := !timer.Stop()
// If the process exited without error treat it as success. This allows a
// process to do a clean shutdown on signal.
if err == nil {
return nil
}
// If SIGTERM was sent then treat any process error as a timeout.
if termSent {
return TimeoutErr
}
// Otherwise there was an error unrelated to termination.
return err
}

View File

@ -1,116 +1,110 @@
package globpath
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/gobwas/glob"
"github.com/karrick/godirwalk"
)
var sepStr = fmt.Sprintf("%v", string(os.PathSeparator))
type GlobPath struct {
path string
hasMeta bool
HasSuperMeta bool
rootGlob string
hasSuperMeta bool
g glob.Glob
root string
}
func Compile(path string) (*GlobPath, error) {
out := GlobPath{
hasMeta: hasMeta(path),
HasSuperMeta: hasSuperMeta(path),
path: filepath.FromSlash(path),
hasSuperMeta: hasSuperMeta(path),
path: path,
}
// if there are no glob meta characters in the path, don't bother compiling
// a glob object
if !out.hasMeta || !out.HasSuperMeta {
// a glob object or finding the root directory. (see short-circuit in Match)
if !out.hasMeta || !out.hasSuperMeta {
return &out, nil
}
// find the root elements of the object path, the entry point for recursion
// when you have a super-meta in your path (which are :
// glob(/your/expression/until/first/star/of/super-meta))
out.rootGlob = path[:strings.Index(path, "**")+1]
var err error
if out.g, err = glob.Compile(path, os.PathSeparator); err != nil {
return nil, err
}
// Get the root directory for this filepath
out.root = findRootDir(path)
return &out, nil
}
// Match returns all files matching the expression.
// If it's a static path, returns path.
// All returned path will have the host platform separator.
func (g *GlobPath) Match() []string {
func (g *GlobPath) Match() map[string]os.FileInfo {
if !g.hasMeta {
return []string{g.path}
out := make(map[string]os.FileInfo)
info, err := os.Stat(g.path)
if err == nil {
out[g.path] = info
}
return out
}
if !g.HasSuperMeta {
if !g.hasSuperMeta {
out := make(map[string]os.FileInfo)
files, _ := filepath.Glob(g.path)
return files
for _, file := range files {
info, err := os.Stat(file)
if err == nil {
out[file] = info
}
}
return out
}
roots, err := filepath.Glob(g.rootGlob)
if err != nil {
return []string{}
}
out := []string{}
walkfn := func(path string, _ *godirwalk.Dirent) error {
if g.g.Match(path) {
out = append(out, path)
return walkFilePath(g.root, g.g)
}
// walk the filepath from the given root and return a list of files that match
// the given glob.
func walkFilePath(root string, g glob.Glob) map[string]os.FileInfo {
matchedFiles := make(map[string]os.FileInfo)
walkfn := func(path string, info os.FileInfo, _ error) error {
if g.Match(path) {
matchedFiles[path] = info
}
return nil
}
for _, root := range roots {
fileinfo, err := os.Stat(root)
if err != nil {
filepath.Walk(root, walkfn)
return matchedFiles
}
// find the root dir of the given path (could include globs).
// ie:
// /var/log/telegraf.conf -> /var/log
// /home/** -> /home
// /home/*/** -> /home
// /lib/share/*/*/**.txt -> /lib/share
func findRootDir(path string) string {
pathItems := strings.Split(path, sepStr)
out := sepStr
for i, item := range pathItems {
if i == len(pathItems)-1 {
break
}
if item == "" {
continue
}
if !fileinfo.IsDir() {
if g.MatchString(root) {
out = append(out, root)
}
continue
if hasMeta(item) {
break
}
godirwalk.Walk(root, &godirwalk.Options{
Callback: walkfn,
Unsorted: true,
})
out += item + sepStr
}
if out != "/" {
out = strings.TrimSuffix(out, "/")
}
return out
}
// MatchString tests the path string against the glob. The path should contain
// the host platform separator.
func (g *GlobPath) MatchString(path string) bool {
if !g.HasSuperMeta {
res, _ := filepath.Match(g.path, path)
return res
}
return g.g.Match(path)
}
// GetRoots returns a list of files and directories which should be optimal
// prefixes of matching files when you have a super-meta in your expression :
// - any directory under these roots may contain a matching file
// - no file outside of these roots can match the pattern
// Note that it returns both files and directories.
// All returned path will have the host platform separator.
func (g *GlobPath) GetRoots() []string {
if !g.hasMeta {
return []string{g.path}
}
if !g.HasSuperMeta {
matches, _ := filepath.Glob(g.path)
return matches
}
roots, _ := filepath.Glob(g.rootGlob)
return roots
}
// hasMeta reports whether path contains any magic glob characters.
func hasMeta(path string) bool {
return strings.IndexAny(path, "*?[") >= 0

View File

@ -1,10 +1,12 @@
package globpath
import (
"os"
"runtime"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -27,32 +29,31 @@ func TestCompileAndMatch(t *testing.T) {
require.NoError(t, err)
matches := g1.Match()
require.Len(t, matches, 6)
assert.Len(t, matches, 6)
matches = g2.Match()
require.Len(t, matches, 2)
assert.Len(t, matches, 2)
matches = g3.Match()
require.Len(t, matches, 1)
assert.Len(t, matches, 1)
matches = g4.Match()
require.Len(t, matches, 1)
assert.Len(t, matches, 0)
matches = g5.Match()
require.Len(t, matches, 0)
assert.Len(t, matches, 0)
}
func TestRootGlob(t *testing.T) {
dir := getTestdataDir()
func TestFindRootDir(t *testing.T) {
tests := []struct {
input string
output string
}{
{dir + "/**", dir + "/*"},
{dir + "/nested?/**", dir + "/nested?/*"},
{dir + "/ne**/nest*", dir + "/ne*"},
{dir + "/nested?/*", ""},
{"/var/log/telegraf.conf", "/var/log"},
{"/home/**", "/home"},
{"/home/*/**", "/home"},
{"/lib/share/*/*/**.txt", "/lib/share"},
}
for _, test := range tests {
actual, _ := Compile(test.input)
require.Equal(t, actual.rootGlob, test.output)
actual := findRootDir(test.input)
assert.Equal(t, test.output, actual)
}
}
@ -63,7 +64,7 @@ func TestFindNestedTextFile(t *testing.T) {
require.NoError(t, err)
matches := g1.Match()
require.Len(t, matches, 1)
assert.Len(t, matches, 1)
}
func getTestdataDir() string {
@ -74,10 +75,10 @@ func getTestdataDir() string {
func TestMatch_ErrPermission(t *testing.T) {
tests := []struct {
input string
expected []string
expected map[string]os.FileInfo
}{
{"/root/foo", []string{"/root/foo"}},
{"/root/f*", []string(nil)},
{"/root/foo", map[string]os.FileInfo{}},
{"/root/f*", map[string]os.FileInfo{}},
}
for _, test := range tests {
@ -87,14 +88,3 @@ func TestMatch_ErrPermission(t *testing.T) {
require.Equal(t, test.expected, actual)
}
}
func TestWindowsSeparator(t *testing.T) {
if runtime.GOOS != "windows" {
t.Skip("Skipping Windows only test")
}
glob, err := Compile("testdata/nested1")
require.NoError(t, err)
ok := glob.MatchString("testdata\\nested1")
require.True(t, ok)
}

View File

@ -1,9 +0,0 @@
// +build !goplugin
package goplugin
import "errors"
func LoadExternalPlugins(rootDir string) error {
return errors.New("go plugin support is not enabled")
}

View File

@ -1,42 +0,0 @@
// +build goplugin
package goplugin
import (
"fmt"
"os"
"path"
"path/filepath"
"plugin"
"strings"
)
// loadExternalPlugins loads external plugins from shared libraries (.so, .dll, etc.)
// in the specified directory.
func LoadExternalPlugins(rootDir string) error {
return filepath.Walk(rootDir, func(pth string, info os.FileInfo, err error) error {
// Stop if there was an error.
if err != nil {
return err
}
// Ignore directories.
if info.IsDir() {
return nil
}
// Ignore files that aren't shared libraries.
ext := strings.ToLower(path.Ext(pth))
if ext != ".so" && ext != ".dll" {
return nil
}
// Load plugin.
_, err = plugin.Open(pth)
if err != nil {
return fmt.Errorf("error loading %s: %s", pth, err)
}
return nil
})
}

View File

@ -1,108 +0,0 @@
package internal
import (
"crypto/subtle"
"net"
"net/http"
"net/url"
)
type BasicAuthErrorFunc func(rw http.ResponseWriter)
// AuthHandler returns a http handler that requires HTTP basic auth
// credentials to match the given username and password.
func AuthHandler(username, password, realm string, onError BasicAuthErrorFunc) func(h http.Handler) http.Handler {
return func(h http.Handler) http.Handler {
return &basicAuthHandler{
username: username,
password: password,
realm: realm,
onError: onError,
next: h,
}
}
}
type basicAuthHandler struct {
username string
password string
realm string
onError BasicAuthErrorFunc
next http.Handler
}
func (h *basicAuthHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
if h.username != "" || h.password != "" {
reqUsername, reqPassword, ok := req.BasicAuth()
if !ok ||
subtle.ConstantTimeCompare([]byte(reqUsername), []byte(h.username)) != 1 ||
subtle.ConstantTimeCompare([]byte(reqPassword), []byte(h.password)) != 1 {
rw.Header().Set("WWW-Authenticate", "Basic realm=\""+h.realm+"\"")
h.onError(rw)
http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
return
}
}
h.next.ServeHTTP(rw, req)
}
// ErrorFunc is a callback for writing an error response.
type ErrorFunc func(rw http.ResponseWriter, code int)
// IPRangeHandler returns a http handler that requires the remote address to be
// in the specified network.
func IPRangeHandler(network []*net.IPNet, onError ErrorFunc) func(h http.Handler) http.Handler {
return func(h http.Handler) http.Handler {
return &ipRangeHandler{
network: network,
onError: onError,
next: h,
}
}
}
type ipRangeHandler struct {
network []*net.IPNet
onError ErrorFunc
next http.Handler
}
func (h *ipRangeHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
if len(h.network) == 0 {
h.next.ServeHTTP(rw, req)
return
}
remoteIPString, _, err := net.SplitHostPort(req.RemoteAddr)
if err != nil {
h.onError(rw, http.StatusForbidden)
return
}
remoteIP := net.ParseIP(remoteIPString)
if remoteIP == nil {
h.onError(rw, http.StatusForbidden)
return
}
for _, net := range h.network {
if net.Contains(remoteIP) {
h.next.ServeHTTP(rw, req)
return
}
}
h.onError(rw, http.StatusForbidden)
}
func OnClientError(client *http.Client, err error) {
// Close connection after a timeout error. If this is a HTTP2
// connection this ensures that next interval a new connection will be
// used and name lookup will be performed.
// https://github.com/golang/go/issues/36026
if err, ok := err.(*url.Error); ok && err.Timeout() {
client.CloseIdleConnections()
}
}

View File

@ -3,24 +3,17 @@ package internal
import (
"bufio"
"bytes"
"compress/gzip"
"context"
"crypto/rand"
"errors"
"fmt"
"io"
"math"
"math/rand"
"log"
"math/big"
"os"
"os/exec"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
"time"
"unicode"
"github.com/alecthomas/units"
)
const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
@ -29,52 +22,13 @@ var (
TimeoutErr = errors.New("Command timed out.")
NotImplementedError = errors.New("not implemented yet")
VersionAlreadySetError = errors.New("version has already been set")
)
// Set via the main module
var version string
// Duration just wraps time.Duration
type Duration struct {
Duration time.Duration
}
// Size just wraps an int64
type Size struct {
Size int64
}
type Number struct {
Value float64
}
type ReadWaitCloser struct {
pipeReader *io.PipeReader
wg sync.WaitGroup
}
// SetVersion sets the telegraf agent version
func SetVersion(v string) error {
if version != "" {
return VersionAlreadySetError
}
version = v
return nil
}
// Version returns the telegraf agent version
func Version() string {
return version
}
// ProductToken returns a tag for Telegraf that can be used in user agents.
func ProductToken() string {
return fmt.Sprintf("Telegraf/%s Go/%s",
Version(), strings.TrimPrefix(runtime.Version(), "go"))
}
// UnmarshalTOML parses the duration from the TOML config file
func (d *Duration) UnmarshalTOML(b []byte) error {
var err error
@ -110,37 +64,6 @@ func (d *Duration) UnmarshalTOML(b []byte) error {
return nil
}
func (s *Size) UnmarshalTOML(b []byte) error {
var err error
b = bytes.Trim(b, `'`)
val, err := strconv.ParseInt(string(b), 10, 64)
if err == nil {
s.Size = val
return nil
}
uq, err := strconv.Unquote(string(b))
if err != nil {
return err
}
val, err = units.ParseStrictBytes(uq)
if err != nil {
return err
}
s.Size = val
return nil
}
func (n *Number) UnmarshalTOML(b []byte) error {
value, err := strconv.ParseFloat(string(b), 64)
if err != nil {
return err
}
n.Value = value
return nil
}
// ReadLines reads contents from a file and splits them by new lines.
// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1).
func ReadLines(filename string) ([]string, error) {
@ -203,6 +126,51 @@ func SnakeCase(in string) string {
return string(out)
}
// CombinedOutputTimeout runs the given command with the given timeout and
// returns the combined output of stdout and stderr.
// If the command times out, it attempts to kill the process.
func CombinedOutputTimeout(c *exec.Cmd, timeout time.Duration) ([]byte, error) {
var b bytes.Buffer
c.Stdout = &b
c.Stderr = &b
if err := c.Start(); err != nil {
return nil, err
}
err := WaitTimeout(c, timeout)
return b.Bytes(), err
}
// RunTimeout runs the given command with the given timeout.
// If the command times out, it attempts to kill the process.
func RunTimeout(c *exec.Cmd, timeout time.Duration) error {
if err := c.Start(); err != nil {
return err
}
return WaitTimeout(c, timeout)
}
// WaitTimeout waits for the given command to finish with a timeout.
// It assumes the command has already been started.
// If the command times out, it attempts to kill the process.
func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
timer := time.NewTimer(timeout)
done := make(chan error)
go func() { done <- c.Wait() }()
select {
case err := <-done:
timer.Stop()
return err
case <-timer.C:
if err := c.Process.Kill(); err != nil {
log.Printf("E! FATAL error killing process: %s", err)
return err
}
// wait for the command to return after killing it
<-done
return TimeoutErr
}
}
// RandomSleep will sleep for a random amount of time up to max.
// If the shutdown channel is closed, it will return before it has finished
// sleeping.
@ -210,8 +178,12 @@ func RandomSleep(max time.Duration, shutdown chan struct{}) {
if max == 0 {
return
}
maxSleep := big.NewInt(max.Nanoseconds())
sleepns := rand.Int63n(max.Nanoseconds())
var sleepns int64
if j, err := rand.Int(rand.Reader, maxSleep); err == nil {
sleepns = j.Int64()
}
t := time.NewTimer(time.Nanosecond * time.Duration(sleepns))
select {
@ -223,49 +195,6 @@ func RandomSleep(max time.Duration, shutdown chan struct{}) {
}
}
// RandomDuration returns a random duration between 0 and max.
func RandomDuration(max time.Duration) time.Duration {
if max == 0 {
return 0
}
sleepns := rand.Int63n(max.Nanoseconds())
return time.Duration(sleepns)
}
// SleepContext sleeps until the context is closed or the duration is reached.
func SleepContext(ctx context.Context, duration time.Duration) error {
if duration == 0 {
return nil
}
t := time.NewTimer(duration)
select {
case <-t.C:
return nil
case <-ctx.Done():
t.Stop()
return ctx.Err()
}
}
// AlignDuration returns the duration until next aligned interval.
// If the current time is aligned a 0 duration is returned.
func AlignDuration(tm time.Time, interval time.Duration) time.Duration {
return AlignTime(tm, interval).Sub(tm)
}
// AlignTime returns the time of the next aligned interval.
// If the current time is aligned the current time is returned.
func AlignTime(tm time.Time, interval time.Duration) time.Time {
truncated := tm.Truncate(interval)
if truncated == tm {
return tm
}
return truncated.Add(interval)
}
// Exit status takes the error from exec.Command
// and returns the exit status and true
// if error is not exit status, will return 0 and false
@ -277,148 +206,3 @@ func ExitStatus(err error) (int, bool) {
}
return 0, false
}
func (r *ReadWaitCloser) Close() error {
err := r.pipeReader.Close()
r.wg.Wait() // wait for the gzip goroutine finish
return err
}
// CompressWithGzip takes an io.Reader as input and pipes
// it through a gzip.Writer returning an io.Reader containing
// the gzipped data.
// An error is returned if passing data to the gzip.Writer fails
func CompressWithGzip(data io.Reader) (io.ReadCloser, error) {
pipeReader, pipeWriter := io.Pipe()
gzipWriter := gzip.NewWriter(pipeWriter)
rc := &ReadWaitCloser{
pipeReader: pipeReader,
}
rc.wg.Add(1)
var err error
go func() {
_, err = io.Copy(gzipWriter, data)
gzipWriter.Close()
// subsequent reads from the read half of the pipe will
// return no bytes and the error err, or EOF if err is nil.
pipeWriter.CloseWithError(err)
rc.wg.Done()
}()
return pipeReader, err
}
// ParseTimestamp parses a Time according to the standard Telegraf options.
// These are generally displayed in the toml similar to:
// json_time_key= "timestamp"
// json_time_format = "2006-01-02T15:04:05Z07:00"
// json_timezone = "America/Los_Angeles"
//
// The format can be one of "unix", "unix_ms", "unix_us", "unix_ns", or a Go
// time layout suitable for time.Parse.
//
// When using the "unix" format, a optional fractional component is allowed.
// Specific unix time precisions cannot have a fractional component.
//
// Unix times may be an int64, float64, or string. When using a Go format
// string the timestamp must be a string.
//
// The location is a location string suitable for time.LoadLocation. Unix
// times do not use the location string, a unix time is always return in the
// UTC location.
func ParseTimestamp(format string, timestamp interface{}, location string) (time.Time, error) {
switch format {
case "unix", "unix_ms", "unix_us", "unix_ns":
return parseUnix(format, timestamp)
default:
if location == "" {
location = "UTC"
}
return parseTime(format, timestamp, location)
}
}
func parseUnix(format string, timestamp interface{}) (time.Time, error) {
integer, fractional, err := parseComponents(timestamp)
if err != nil {
return time.Unix(0, 0), err
}
switch strings.ToLower(format) {
case "unix":
return time.Unix(integer, fractional).UTC(), nil
case "unix_ms":
return time.Unix(0, integer*1e6).UTC(), nil
case "unix_us":
return time.Unix(0, integer*1e3).UTC(), nil
case "unix_ns":
return time.Unix(0, integer).UTC(), nil
default:
return time.Unix(0, 0), errors.New("unsupported type")
}
}
// Returns the integers before and after an optional decimal point. Both '.'
// and ',' are supported for the decimal point. The timestamp can be an int64,
// float64, or string.
// ex: "42.5" -> (42, 5, nil)
func parseComponents(timestamp interface{}) (int64, int64, error) {
switch ts := timestamp.(type) {
case string:
parts := strings.SplitN(ts, ".", 2)
if len(parts) == 2 {
return parseUnixTimeComponents(parts[0], parts[1])
}
parts = strings.SplitN(ts, ",", 2)
if len(parts) == 2 {
return parseUnixTimeComponents(parts[0], parts[1])
}
integer, err := strconv.ParseInt(ts, 10, 64)
if err != nil {
return 0, 0, err
}
return integer, 0, nil
case int64:
return ts, 0, nil
case float64:
integer, fractional := math.Modf(ts)
return int64(integer), int64(fractional * 1e9), nil
default:
return 0, 0, errors.New("unsupported type")
}
}
func parseUnixTimeComponents(first, second string) (int64, int64, error) {
integer, err := strconv.ParseInt(first, 10, 64)
if err != nil {
return 0, 0, err
}
// Convert to nanoseconds, dropping any greater precision.
buf := []byte("000000000")
copy(buf, second)
fractional, err := strconv.ParseInt(string(buf), 10, 64)
if err != nil {
return 0, 0, err
}
return integer, fractional, nil
}
// ParseTime parses a string timestamp according to the format string.
func parseTime(format string, timestamp interface{}, location string) (time.Time, error) {
switch ts := timestamp.(type) {
case string:
loc, err := time.LoadLocation(location)
if err != nil {
return time.Unix(0, 0), err
}
return time.ParseInLocation(format, ts, loc)
default:
return time.Unix(0, 0), errors.New("unsupported type")
}
}

View File

@ -1,19 +1,11 @@
package internal
import (
"bytes"
"compress/gzip"
"crypto/rand"
"io"
"io/ioutil"
"log"
"os/exec"
"regexp"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type SnakeTest struct {
@ -68,30 +60,6 @@ func TestRunTimeout(t *testing.T) {
assert.True(t, elapsed < time.Millisecond*75)
}
// Verifies behavior of a command that doesn't get killed.
func TestRunTimeoutFastExit(t *testing.T) {
if testing.Short() {
t.Skip("Skipping test due to random failures.")
}
if echobin == "" {
t.Skip("'echo' binary not available on OS, skipping.")
}
cmd := exec.Command(echobin)
start := time.Now()
err := RunTimeout(cmd, time.Millisecond*20)
buf := &bytes.Buffer{}
log.SetOutput(buf)
elapsed := time.Since(start)
require.NoError(t, err)
// Verify that command gets killed in 20ms, with some breathing room
assert.True(t, elapsed < time.Millisecond*75)
// Verify "process already finished" log doesn't occur.
time.Sleep(time.Millisecond * 75)
require.Equal(t, "", buf.String())
}
func TestCombinedOutputTimeout(t *testing.T) {
// TODO: Fix this test
t.Skip("Test failing too often, skip for now and revisit later.")
@ -194,298 +162,3 @@ func TestDuration(t *testing.T) {
d.UnmarshalTOML([]byte(`1.5`))
assert.Equal(t, time.Second, d.Duration)
}
func TestSize(t *testing.T) {
var s Size
s.UnmarshalTOML([]byte(`"1B"`))
assert.Equal(t, int64(1), s.Size)
s = Size{}
s.UnmarshalTOML([]byte(`1`))
assert.Equal(t, int64(1), s.Size)
s = Size{}
s.UnmarshalTOML([]byte(`'1'`))
assert.Equal(t, int64(1), s.Size)
s = Size{}
s.UnmarshalTOML([]byte(`"1GB"`))
assert.Equal(t, int64(1000*1000*1000), s.Size)
s = Size{}
s.UnmarshalTOML([]byte(`"12GiB"`))
assert.Equal(t, int64(12*1024*1024*1024), s.Size)
}
func TestCompressWithGzip(t *testing.T) {
testData := "the quick brown fox jumps over the lazy dog"
inputBuffer := bytes.NewBuffer([]byte(testData))
outputBuffer, err := CompressWithGzip(inputBuffer)
assert.NoError(t, err)
gzipReader, err := gzip.NewReader(outputBuffer)
assert.NoError(t, err)
defer gzipReader.Close()
output, err := ioutil.ReadAll(gzipReader)
assert.NoError(t, err)
assert.Equal(t, testData, string(output))
}
type mockReader struct {
readN uint64 // record the number of calls to Read
}
func (r *mockReader) Read(p []byte) (n int, err error) {
r.readN++
return rand.Read(p)
}
func TestCompressWithGzipEarlyClose(t *testing.T) {
mr := &mockReader{}
rc, err := CompressWithGzip(mr)
assert.NoError(t, err)
n, err := io.CopyN(ioutil.Discard, rc, 10000)
assert.NoError(t, err)
assert.Equal(t, int64(10000), n)
r1 := mr.readN
err = rc.Close()
assert.NoError(t, err)
n, err = io.CopyN(ioutil.Discard, rc, 10000)
assert.Error(t, io.EOF, err)
assert.Equal(t, int64(0), n)
r2 := mr.readN
// no more read to the source after closing
assert.Equal(t, r1, r2)
}
func TestVersionAlreadySet(t *testing.T) {
err := SetVersion("foo")
assert.Nil(t, err)
err = SetVersion("bar")
assert.NotNil(t, err)
assert.IsType(t, VersionAlreadySetError, err)
assert.Equal(t, "foo", Version())
}
func TestAlignDuration(t *testing.T) {
tests := []struct {
name string
now time.Time
interval time.Duration
expected time.Duration
}{
{
name: "aligned",
now: time.Date(2018, 1, 1, 1, 1, 0, 0, time.UTC),
interval: 10 * time.Second,
expected: 0 * time.Second,
},
{
name: "standard interval",
now: time.Date(2018, 1, 1, 1, 1, 1, 0, time.UTC),
interval: 10 * time.Second,
expected: 9 * time.Second,
},
{
name: "odd interval",
now: time.Date(2018, 1, 1, 1, 1, 1, 0, time.UTC),
interval: 3 * time.Second,
expected: 2 * time.Second,
},
{
name: "sub second interval",
now: time.Date(2018, 1, 1, 1, 1, 0, 5e8, time.UTC),
interval: 1 * time.Second,
expected: 500 * time.Millisecond,
},
{
name: "non divisible not aligned on minutes",
now: time.Date(2018, 1, 1, 1, 0, 0, 0, time.UTC),
interval: 1*time.Second + 100*time.Millisecond,
expected: 400 * time.Millisecond,
},
{
name: "long interval",
now: time.Date(2018, 1, 1, 1, 1, 0, 0, time.UTC),
interval: 1 * time.Hour,
expected: 59 * time.Minute,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
actual := AlignDuration(tt.now, tt.interval)
require.Equal(t, tt.expected, actual)
})
}
}
func TestAlignTime(t *testing.T) {
rfc3339 := func(value string) time.Time {
t, _ := time.Parse(time.RFC3339, value)
return t
}
tests := []struct {
name string
now time.Time
interval time.Duration
expected time.Time
}{
{
name: "aligned",
now: rfc3339("2018-01-01T01:01:00Z"),
interval: 10 * time.Second,
expected: rfc3339("2018-01-01T01:01:00Z"),
},
{
name: "aligned",
now: rfc3339("2018-01-01T01:01:01Z"),
interval: 10 * time.Second,
expected: rfc3339("2018-01-01T01:01:10Z"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
actual := AlignTime(tt.now, tt.interval)
require.Equal(t, tt.expected, actual)
})
}
}
func TestParseTimestamp(t *testing.T) {
rfc3339 := func(value string) time.Time {
tm, err := time.Parse(time.RFC3339Nano, value)
if err != nil {
panic(err)
}
return tm
}
tests := []struct {
name string
format string
timestamp interface{}
location string
expected time.Time
err bool
}{
{
name: "parse layout string in utc",
format: "2006-01-02 15:04:05",
timestamp: "2019-02-20 21:50:34",
location: "UTC",
expected: rfc3339("2019-02-20T21:50:34Z"),
},
{
name: "parse layout string with invalid timezone",
format: "2006-01-02 15:04:05",
timestamp: "2019-02-20 21:50:34",
location: "InvalidTimeZone",
err: true,
},
{
name: "layout regression 6386",
format: "02.01.2006 15:04:05",
timestamp: "09.07.2019 00:11:00",
expected: rfc3339("2019-07-09T00:11:00Z"),
},
{
name: "default location is utc",
format: "2006-01-02 15:04:05",
timestamp: "2019-02-20 21:50:34",
expected: rfc3339("2019-02-20T21:50:34Z"),
},
{
name: "unix seconds without fractional",
format: "unix",
timestamp: "1568338208",
expected: rfc3339("2019-09-13T01:30:08Z"),
},
{
name: "unix seconds with fractional",
format: "unix",
timestamp: "1568338208.500",
expected: rfc3339("2019-09-13T01:30:08.500Z"),
},
{
name: "unix seconds with fractional and comma decimal point",
format: "unix",
timestamp: "1568338208,500",
expected: rfc3339("2019-09-13T01:30:08.500Z"),
},
{
name: "unix seconds extra precision",
format: "unix",
timestamp: "1568338208.00000050042",
expected: rfc3339("2019-09-13T01:30:08.000000500Z"),
},
{
name: "unix seconds integer",
format: "unix",
timestamp: int64(1568338208),
expected: rfc3339("2019-09-13T01:30:08Z"),
},
{
name: "unix seconds float",
format: "unix",
timestamp: float64(1568338208.500),
expected: rfc3339("2019-09-13T01:30:08.500Z"),
},
{
name: "unix milliseconds",
format: "unix_ms",
timestamp: "1568338208500",
expected: rfc3339("2019-09-13T01:30:08.500Z"),
},
{
name: "unix milliseconds with fractional is ignored",
format: "unix_ms",
timestamp: "1568338208500.42",
expected: rfc3339("2019-09-13T01:30:08.500Z"),
},
{
name: "unix microseconds",
format: "unix_us",
timestamp: "1568338208000500",
expected: rfc3339("2019-09-13T01:30:08.000500Z"),
},
{
name: "unix nanoseconds",
format: "unix_ns",
timestamp: "1568338208000000500",
expected: rfc3339("2019-09-13T01:30:08.000000500Z"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tm, err := ParseTimestamp(tt.format, tt.timestamp, tt.location)
if tt.err {
require.Error(t, err)
} else {
require.NoError(t, err)
require.Equal(t, tt.expected, tm)
}
})
}
}
func TestProductToken(t *testing.T) {
token := ProductToken()
// Telegraf version depends on the call to SetVersion, it cannot be set
// multiple times and is not thread-safe.
re := regexp.MustCompile(`^Telegraf/[^\s]+ Go/\d+.\d+(.\d+)?$`)
require.True(t, re.MatchString(token), token)
}

View File

@ -3,7 +3,6 @@ package models
import (
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
)
@ -79,13 +78,13 @@ func (f *Filter) Compile() error {
return fmt.Errorf("Error compiling 'taginclude', %s", err)
}
for i := range f.TagDrop {
for i, _ := range f.TagDrop {
f.TagDrop[i].filter, err = filter.Compile(f.TagDrop[i].Filter)
if err != nil {
return fmt.Errorf("Error compiling 'tagdrop', %s", err)
}
}
for i := range f.TagPass {
for i, _ := range f.TagPass {
f.TagPass[i].filter, err = filter.Compile(f.TagPass[i].Filter)
if err != nil {
return fmt.Errorf("Error compiling 'tagpass', %s", err)
@ -94,35 +93,45 @@ func (f *Filter) Compile() error {
return nil
}
// Select returns true if the metric matches according to the
// namepass/namedrop and tagpass/tagdrop filters. The metric is not modified.
func (f *Filter) Select(metric telegraf.Metric) bool {
// Apply applies the filter to the given measurement name, fields map, and
// tags map. It will return false if the metric should be "filtered out", and
// true if the metric should "pass".
// It will modify tags & fields in-place if they need to be deleted.
func (f *Filter) Apply(
measurement string,
fields map[string]interface{},
tags map[string]string,
) bool {
if !f.isActive {
return true
}
if !f.shouldNamePass(metric.Name()) {
// check if the measurement name should pass
if !f.shouldNamePass(measurement) {
return false
}
if !f.shouldTagsPass(metric.TagList()) {
// check if the tags should pass
if !f.shouldTagsPass(tags) {
return false
}
// filter fields
for fieldkey, _ := range fields {
if !f.shouldFieldPass(fieldkey) {
delete(fields, fieldkey)
}
}
if len(fields) == 0 {
return false
}
// filter tags
f.filterTags(tags)
return true
}
// Modify removes any tags and fields from the metric according to the
// fieldpass/fielddrop and taginclude/tagexclude filters.
func (f *Filter) Modify(metric telegraf.Metric) {
if !f.isActive {
return
}
f.filterFields(metric)
f.filterTags(metric)
}
// IsActive checking if filter is active
func (f *Filter) IsActive() bool {
return f.isActive
@ -131,6 +140,7 @@ func (f *Filter) IsActive() bool {
// shouldNamePass returns true if the metric should pass, false if should drop
// based on the drop/pass filter parameters
func (f *Filter) shouldNamePass(key string) bool {
pass := func(f *Filter) bool {
if f.namePass.Match(key) {
return true
@ -159,29 +169,44 @@ func (f *Filter) shouldNamePass(key string) bool {
// shouldFieldPass returns true if the metric should pass, false if should drop
// based on the drop/pass filter parameters
func (f *Filter) shouldFieldPass(key string) bool {
if f.fieldPass != nil && f.fieldDrop != nil {
return f.fieldPass.Match(key) && !f.fieldDrop.Match(key)
} else if f.fieldPass != nil {
return f.fieldPass.Match(key)
} else if f.fieldDrop != nil {
return !f.fieldDrop.Match(key)
pass := func(f *Filter) bool {
if f.fieldPass.Match(key) {
return true
}
return false
}
drop := func(f *Filter) bool {
if f.fieldDrop.Match(key) {
return false
}
return true
}
if f.fieldPass != nil && f.fieldDrop != nil {
return pass(f) && drop(f)
} else if f.fieldPass != nil {
return pass(f)
} else if f.fieldDrop != nil {
return drop(f)
}
return true
}
// shouldTagsPass returns true if the metric should pass, false if should drop
// based on the tagdrop/tagpass filter parameters
func (f *Filter) shouldTagsPass(tags []*telegraf.Tag) bool {
func (f *Filter) shouldTagsPass(tags map[string]string) bool {
pass := func(f *Filter) bool {
for _, pat := range f.TagPass {
if pat.filter == nil {
continue
}
for _, tag := range tags {
if tag.Key == pat.Name {
if pat.filter.Match(tag.Value) {
return true
}
if tagval, ok := tags[pat.Name]; ok {
if pat.filter.Match(tagval) {
return true
}
}
}
@ -193,11 +218,9 @@ func (f *Filter) shouldTagsPass(tags []*telegraf.Tag) bool {
if pat.filter == nil {
continue
}
for _, tag := range tags {
if tag.Key == pat.Name {
if pat.filter.Match(tag.Value) {
return false
}
if tagval, ok := tags[pat.Name]; ok {
if pat.filter.Match(tagval) {
return false
}
}
}
@ -219,42 +242,22 @@ func (f *Filter) shouldTagsPass(tags []*telegraf.Tag) bool {
return true
}
// filterFields removes fields according to fieldpass/fielddrop.
func (f *Filter) filterFields(metric telegraf.Metric) {
filterKeys := []string{}
for _, field := range metric.FieldList() {
if !f.shouldFieldPass(field.Key) {
filterKeys = append(filterKeys, field.Key)
}
}
for _, key := range filterKeys {
metric.RemoveField(key)
}
}
// filterTags removes tags according to taginclude/tagexclude.
func (f *Filter) filterTags(metric telegraf.Metric) {
filterKeys := []string{}
// Apply TagInclude and TagExclude filters.
// modifies the tags map in-place.
func (f *Filter) filterTags(tags map[string]string) {
if f.tagInclude != nil {
for _, tag := range metric.TagList() {
if !f.tagInclude.Match(tag.Key) {
filterKeys = append(filterKeys, tag.Key)
for k, _ := range tags {
if !f.tagInclude.Match(k) {
delete(tags, k)
}
}
}
for _, key := range filterKeys {
metric.RemoveTag(key)
}
if f.tagExclude != nil {
for _, tag := range metric.TagList() {
if f.tagExclude.Match(tag.Key) {
filterKeys = append(filterKeys, tag.Key)
for k, _ := range tags {
if f.tagExclude.Match(k) {
delete(tags, k)
}
}
}
for _, key := range filterKeys {
metric.RemoveTag(key)
}
}

View File

@ -2,30 +2,22 @@ package models
import (
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestFilter_ApplyEmpty(t *testing.T) {
f := Filter{}
require.NoError(t, f.Compile())
require.False(t, f.IsActive())
assert.False(t, f.IsActive())
m, err := metric.New("m",
map[string]string{},
map[string]interface{}{"value": int64(1)},
time.Now())
require.NoError(t, err)
require.True(t, f.Select(m))
assert.True(t, f.Apply("m", map[string]interface{}{"value": int64(1)}, map[string]string{}))
}
func TestFilter_ApplyTagsDontPass(t *testing.T) {
filters := []TagFilter{
{
TagFilter{
Name: "cpu",
Filter: []string{"cpu-*"},
},
@ -35,14 +27,11 @@ func TestFilter_ApplyTagsDontPass(t *testing.T) {
}
require.NoError(t, f.Compile())
require.NoError(t, f.Compile())
require.True(t, f.IsActive())
assert.True(t, f.IsActive())
m, err := metric.New("m",
map[string]string{"cpu": "cpu-total"},
assert.False(t, f.Apply("m",
map[string]interface{}{"value": int64(1)},
time.Now())
require.NoError(t, err)
require.False(t, f.Select(m))
map[string]string{"cpu": "cpu-total"}))
}
func TestFilter_ApplyDeleteFields(t *testing.T) {
@ -51,19 +40,11 @@ func TestFilter_ApplyDeleteFields(t *testing.T) {
}
require.NoError(t, f.Compile())
require.NoError(t, f.Compile())
require.True(t, f.IsActive())
assert.True(t, f.IsActive())
m, err := metric.New("m",
map[string]string{},
map[string]interface{}{
"value": int64(1),
"value2": int64(2),
},
time.Now())
require.NoError(t, err)
require.True(t, f.Select(m))
f.Modify(m)
require.Equal(t, map[string]interface{}{"value2": int64(2)}, m.Fields())
fields := map[string]interface{}{"value": int64(1), "value2": int64(2)}
assert.True(t, f.Apply("m", fields, nil))
assert.Equal(t, map[string]interface{}{"value2": int64(2)}, fields)
}
func TestFilter_ApplyDeleteAllFields(t *testing.T) {
@ -72,19 +53,10 @@ func TestFilter_ApplyDeleteAllFields(t *testing.T) {
}
require.NoError(t, f.Compile())
require.NoError(t, f.Compile())
require.True(t, f.IsActive())
assert.True(t, f.IsActive())
m, err := metric.New("m",
map[string]string{},
map[string]interface{}{
"value": int64(1),
"value2": int64(2),
},
time.Now())
require.NoError(t, err)
require.True(t, f.Select(m))
f.Modify(m)
require.Len(t, m.FieldList(), 0)
fields := map[string]interface{}{"value": int64(1), "value2": int64(2)}
assert.False(t, f.Apply("m", fields, nil))
}
func TestFilter_Empty(t *testing.T) {
@ -97,7 +69,7 @@ func TestFilter_Empty(t *testing.T) {
"foo_bar",
"foo.bar",
"foo-bar",
"supercalifragilisticexpialidocious",
"supercalifradjulisticexpialidocious",
}
for _, measurement := range measurements {
@ -245,11 +217,11 @@ func TestFilter_FieldDrop(t *testing.T) {
func TestFilter_TagPass(t *testing.T) {
filters := []TagFilter{
{
TagFilter{
Name: "cpu",
Filter: []string{"cpu-*"},
},
{
TagFilter{
Name: "mem",
Filter: []string{"mem_free"},
}}
@ -258,20 +230,20 @@ func TestFilter_TagPass(t *testing.T) {
}
require.NoError(t, f.Compile())
passes := [][]*telegraf.Tag{
{{Key: "cpu", Value: "cpu-total"}},
{{Key: "cpu", Value: "cpu-0"}},
{{Key: "cpu", Value: "cpu-1"}},
{{Key: "cpu", Value: "cpu-2"}},
{{Key: "mem", Value: "mem_free"}},
passes := []map[string]string{
{"cpu": "cpu-total"},
{"cpu": "cpu-0"},
{"cpu": "cpu-1"},
{"cpu": "cpu-2"},
{"mem": "mem_free"},
}
drops := [][]*telegraf.Tag{
{{Key: "cpu", Value: "cputotal"}},
{{Key: "cpu", Value: "cpu0"}},
{{Key: "cpu", Value: "cpu1"}},
{{Key: "cpu", Value: "cpu2"}},
{{Key: "mem", Value: "mem_used"}},
drops := []map[string]string{
{"cpu": "cputotal"},
{"cpu": "cpu0"},
{"cpu": "cpu1"},
{"cpu": "cpu2"},
{"mem": "mem_used"},
}
for _, tags := range passes {
@ -289,11 +261,11 @@ func TestFilter_TagPass(t *testing.T) {
func TestFilter_TagDrop(t *testing.T) {
filters := []TagFilter{
{
TagFilter{
Name: "cpu",
Filter: []string{"cpu-*"},
},
{
TagFilter{
Name: "mem",
Filter: []string{"mem_free"},
}}
@ -302,20 +274,20 @@ func TestFilter_TagDrop(t *testing.T) {
}
require.NoError(t, f.Compile())
drops := [][]*telegraf.Tag{
{{Key: "cpu", Value: "cpu-total"}},
{{Key: "cpu", Value: "cpu-0"}},
{{Key: "cpu", Value: "cpu-1"}},
{{Key: "cpu", Value: "cpu-2"}},
{{Key: "mem", Value: "mem_free"}},
drops := []map[string]string{
{"cpu": "cpu-total"},
{"cpu": "cpu-0"},
{"cpu": "cpu-1"},
{"cpu": "cpu-2"},
{"mem": "mem_free"},
}
passes := [][]*telegraf.Tag{
{{Key: "cpu", Value: "cputotal"}},
{{Key: "cpu", Value: "cpu0"}},
{{Key: "cpu", Value: "cpu1"}},
{{Key: "cpu", Value: "cpu2"}},
{{Key: "mem", Value: "mem_used"}},
passes := []map[string]string{
{"cpu": "cputotal"},
{"cpu": "cpu0"},
{"cpu": "cpu1"},
{"cpu": "cpu2"},
{"mem": "mem_used"},
}
for _, tags := range passes {
@ -332,70 +304,58 @@ func TestFilter_TagDrop(t *testing.T) {
}
func TestFilter_FilterTagsNoMatches(t *testing.T) {
m, err := metric.New("m",
map[string]string{
"host": "localhost",
"mytag": "foobar",
},
map[string]interface{}{"value": int64(1)},
time.Now())
require.NoError(t, err)
pretags := map[string]string{
"host": "localhost",
"mytag": "foobar",
}
f := Filter{
TagExclude: []string{"nomatch"},
}
require.NoError(t, f.Compile())
f.filterTags(m)
require.Equal(t, map[string]string{
f.filterTags(pretags)
assert.Equal(t, map[string]string{
"host": "localhost",
"mytag": "foobar",
}, m.Tags())
}, pretags)
f = Filter{
TagInclude: []string{"nomatch"},
}
require.NoError(t, f.Compile())
f.filterTags(m)
require.Equal(t, map[string]string{}, m.Tags())
f.filterTags(pretags)
assert.Equal(t, map[string]string{}, pretags)
}
func TestFilter_FilterTagsMatches(t *testing.T) {
m, err := metric.New("m",
map[string]string{
"host": "localhost",
"mytag": "foobar",
},
map[string]interface{}{"value": int64(1)},
time.Now())
require.NoError(t, err)
pretags := map[string]string{
"host": "localhost",
"mytag": "foobar",
}
f := Filter{
TagExclude: []string{"ho*"},
}
require.NoError(t, f.Compile())
f.filterTags(m)
require.Equal(t, map[string]string{
f.filterTags(pretags)
assert.Equal(t, map[string]string{
"mytag": "foobar",
}, m.Tags())
}, pretags)
m, err = metric.New("m",
map[string]string{
"host": "localhost",
"mytag": "foobar",
},
map[string]interface{}{"value": int64(1)},
time.Now())
require.NoError(t, err)
pretags = map[string]string{
"host": "localhost",
"mytag": "foobar",
}
f = Filter{
TagInclude: []string{"my*"},
}
require.NoError(t, f.Compile())
f.filterTags(m)
require.Equal(t, map[string]string{
f.filterTags(pretags)
assert.Equal(t, map[string]string{
"mytag": "foobar",
}, m.Tags())
}, pretags)
}
// TestFilter_FilterNamePassAndDrop used for check case when
@ -414,7 +374,7 @@ func TestFilter_FilterNamePassAndDrop(t *testing.T) {
require.NoError(t, f.Compile())
for i, name := range inputData {
require.Equal(t, f.shouldNamePass(name), expectedResult[i])
assert.Equal(t, f.shouldNamePass(name), expectedResult[i])
}
}
@ -434,7 +394,7 @@ func TestFilter_FilterFieldPassAndDrop(t *testing.T) {
require.NoError(t, f.Compile())
for i, field := range inputData {
require.Equal(t, f.shouldFieldPass(field), expectedResult[i])
assert.Equal(t, f.shouldFieldPass(field), expectedResult[i])
}
}
@ -442,28 +402,29 @@ func TestFilter_FilterFieldPassAndDrop(t *testing.T) {
// both parameters were defined
// see: https://github.com/influxdata/telegraf/issues/2860
func TestFilter_FilterTagsPassAndDrop(t *testing.T) {
inputData := [][]*telegraf.Tag{
{{Key: "tag1", Value: "1"}, {Key: "tag2", Value: "3"}},
{{Key: "tag1", Value: "1"}, {Key: "tag2", Value: "2"}},
{{Key: "tag1", Value: "2"}, {Key: "tag2", Value: "1"}},
{{Key: "tag1", Value: "4"}, {Key: "tag2", Value: "1"}},
inputData := []map[string]string{
{"tag1": "1", "tag2": "3"},
{"tag1": "1", "tag2": "2"},
{"tag1": "2", "tag2": "1"},
{"tag1": "4", "tag2": "1"},
}
expectedResult := []bool{false, true, false, false}
filterPass := []TagFilter{
{
TagFilter{
Name: "tag1",
Filter: []string{"1", "4"},
},
}
filterDrop := []TagFilter{
{
TagFilter{
Name: "tag1",
Filter: []string{"4"},
},
{
TagFilter{
Name: "tag2",
Filter: []string{"3"},
},
@ -477,49 +438,7 @@ func TestFilter_FilterTagsPassAndDrop(t *testing.T) {
require.NoError(t, f.Compile())
for i, tag := range inputData {
require.Equal(t, f.shouldTagsPass(tag), expectedResult[i])
assert.Equal(t, f.shouldTagsPass(tag), expectedResult[i])
}
}
func BenchmarkFilter(b *testing.B) {
tests := []struct {
name string
filter Filter
metric telegraf.Metric
}{
{
name: "empty filter",
filter: Filter{},
metric: testutil.MustMetric("cpu",
map[string]string{},
map[string]interface{}{
"value": 42,
},
time.Unix(0, 0),
),
},
{
name: "namepass",
filter: Filter{
NamePass: []string{"cpu"},
},
metric: testutil.MustMetric("cpu",
map[string]string{},
map[string]interface{}{
"value": 42,
},
time.Unix(0, 0),
),
},
}
for _, tt := range tests {
b.Run(tt.name, func(b *testing.B) {
require.NoError(b, tt.filter.Compile())
for n := 0; n < b.N; n++ {
tt.filter.Select(tt.metric)
}
})
}
}

View File

@ -0,0 +1,86 @@
package models
import (
"log"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
)
// makemetric is used by both RunningAggregator & RunningInput
// to make metrics.
// nameOverride: override the name of the measurement being made.
// namePrefix: add this prefix to each measurement name.
// nameSuffix: add this suffix to each measurement name.
// pluginTags: these are tags that are specific to this plugin.
// daemonTags: these are daemon-wide global tags, and get applied after pluginTags.
// filter: this is a filter to apply to each metric being made.
// applyFilter: if false, the above filter is not applied to each metric.
// This is used by Aggregators, because aggregators use filters
// on incoming metrics instead of on created metrics.
// TODO refactor this to not have such a huge func signature.
func makemetric(
measurement string,
fields map[string]interface{},
tags map[string]string,
nameOverride string,
namePrefix string,
nameSuffix string,
pluginTags map[string]string,
daemonTags map[string]string,
filter Filter,
applyFilter bool,
mType telegraf.ValueType,
t time.Time,
) telegraf.Metric {
if len(fields) == 0 || len(measurement) == 0 {
return nil
}
if tags == nil {
tags = make(map[string]string)
}
// Override measurement name if set
if len(nameOverride) != 0 {
measurement = nameOverride
}
// Apply measurement prefix and suffix if set
if len(namePrefix) != 0 {
measurement = namePrefix + measurement
}
if len(nameSuffix) != 0 {
measurement = measurement + nameSuffix
}
// Apply plugin-wide tags if set
for k, v := range pluginTags {
if _, ok := tags[k]; !ok {
tags[k] = v
}
}
// Apply daemon-wide tags if set
for k, v := range daemonTags {
if _, ok := tags[k]; !ok {
tags[k] = v
}
}
// Apply the metric filter(s)
// for aggregators, the filter does not get applied when the metric is made.
// instead, the filter is applied to metric incoming into the plugin.
// ie, it gets applied in the RunningAggregator.Apply function.
if applyFilter {
if ok := filter.Apply(measurement, fields, tags); !ok {
return nil
}
}
m, err := metric.New(measurement, tags, fields, t, mType)
if err != nil {
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
return nil
}
return m
}

View File

@ -0,0 +1,168 @@
package models
import (
"log"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
)
type RunningAggregator struct {
a telegraf.Aggregator
Config *AggregatorConfig
metrics chan telegraf.Metric
periodStart time.Time
periodEnd time.Time
}
func NewRunningAggregator(
a telegraf.Aggregator,
conf *AggregatorConfig,
) *RunningAggregator {
return &RunningAggregator{
a: a,
Config: conf,
metrics: make(chan telegraf.Metric, 100),
}
}
// AggregatorConfig containing configuration parameters for the running
// aggregator plugin.
type AggregatorConfig struct {
Name string
DropOriginal bool
NameOverride string
MeasurementPrefix string
MeasurementSuffix string
Tags map[string]string
Filter Filter
Period time.Duration
Delay time.Duration
}
func (r *RunningAggregator) Name() string {
return "aggregators." + r.Config.Name
}
func (r *RunningAggregator) MakeMetric(
measurement string,
fields map[string]interface{},
tags map[string]string,
mType telegraf.ValueType,
t time.Time,
) telegraf.Metric {
m := makemetric(
measurement,
fields,
tags,
r.Config.NameOverride,
r.Config.MeasurementPrefix,
r.Config.MeasurementSuffix,
r.Config.Tags,
nil,
r.Config.Filter,
false,
mType,
t,
)
if m != nil {
m.SetAggregate(true)
}
return m
}
// Add applies the given metric to the aggregator.
// Before applying to the plugin, it will run any defined filters on the metric.
// Apply returns true if the original metric should be dropped.
func (r *RunningAggregator) Add(in telegraf.Metric) bool {
if r.Config.Filter.IsActive() {
// check if the aggregator should apply this metric
name := in.Name()
fields := in.Fields()
tags := in.Tags()
t := in.Time()
if ok := r.Config.Filter.Apply(name, fields, tags); !ok {
// aggregator should not apply this metric
return false
}
in, _ = metric.New(name, tags, fields, t)
}
r.metrics <- in
return r.Config.DropOriginal
}
func (r *RunningAggregator) add(in telegraf.Metric) {
r.a.Add(in)
}
func (r *RunningAggregator) push(acc telegraf.Accumulator) {
r.a.Push(acc)
}
func (r *RunningAggregator) reset() {
r.a.Reset()
}
// Run runs the running aggregator, listens for incoming metrics, and waits
// for period ticks to tell it when to push and reset the aggregator.
func (r *RunningAggregator) Run(
acc telegraf.Accumulator,
shutdown chan struct{},
) {
// The start of the period is truncated to the nearest second.
//
// Every metric then gets it's timestamp checked and is dropped if it
// is not within:
//
// start < t < end + truncation + delay
//
// So if we start at now = 00:00.2 with a 10s period and 0.3s delay:
// now = 00:00.2
// start = 00:00
// truncation = 00:00.2
// end = 00:10
// 1st interval: 00:00 - 00:10.5
// 2nd interval: 00:10 - 00:20.5
// etc.
//
now := time.Now()
r.periodStart = now.Truncate(time.Second)
truncation := now.Sub(r.periodStart)
r.periodEnd = r.periodStart.Add(r.Config.Period)
time.Sleep(r.Config.Delay)
periodT := time.NewTicker(r.Config.Period)
defer periodT.Stop()
for {
select {
case <-shutdown:
if len(r.metrics) > 0 {
// wait until metrics are flushed before exiting
continue
}
return
case m := <-r.metrics:
if m.Time().Before(r.periodStart) ||
m.Time().After(r.periodEnd.Add(truncation).Add(r.Config.Delay)) {
// the metric is outside the current aggregation period, so
// skip it.
log.Printf("D! aggregator: metric \"%s\" is not in the current timewindow, skipping", m.Name())
continue
}
r.add(m)
case <-periodT.C:
r.periodStart = r.periodEnd
r.periodEnd = r.periodStart.Add(r.Config.Period)
r.push(acc)
r.reset()
}
}
}

View File

@ -0,0 +1,192 @@
package models
import (
"sync"
"sync/atomic"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
func TestAdd(t *testing.T) {
a := &TestAggregator{}
ra := NewRunningAggregator(a, &AggregatorConfig{
Name: "TestRunningAggregator",
Filter: Filter{
NamePass: []string{"*"},
},
Period: time.Millisecond * 500,
})
assert.NoError(t, ra.Config.Filter.Compile())
acc := testutil.Accumulator{}
go ra.Run(&acc, make(chan struct{}))
m := ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
time.Now().Add(time.Millisecond*150),
)
assert.False(t, ra.Add(m))
for {
time.Sleep(time.Millisecond)
if atomic.LoadInt64(&a.sum) > 0 {
break
}
}
assert.Equal(t, int64(101), atomic.LoadInt64(&a.sum))
}
func TestAddMetricsOutsideCurrentPeriod(t *testing.T) {
a := &TestAggregator{}
ra := NewRunningAggregator(a, &AggregatorConfig{
Name: "TestRunningAggregator",
Filter: Filter{
NamePass: []string{"*"},
},
Period: time.Millisecond * 500,
})
assert.NoError(t, ra.Config.Filter.Compile())
acc := testutil.Accumulator{}
go ra.Run(&acc, make(chan struct{}))
// metric before current period
m := ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
time.Now().Add(-time.Hour),
)
assert.False(t, ra.Add(m))
// metric after current period
m = ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
time.Now().Add(time.Hour),
)
assert.False(t, ra.Add(m))
// "now" metric
m = ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
time.Now().Add(time.Millisecond*50),
)
assert.False(t, ra.Add(m))
for {
time.Sleep(time.Millisecond)
if atomic.LoadInt64(&a.sum) > 0 {
break
}
}
assert.Equal(t, int64(101), atomic.LoadInt64(&a.sum))
}
func TestAddAndPushOnePeriod(t *testing.T) {
a := &TestAggregator{}
ra := NewRunningAggregator(a, &AggregatorConfig{
Name: "TestRunningAggregator",
Filter: Filter{
NamePass: []string{"*"},
},
Period: time.Millisecond * 500,
})
assert.NoError(t, ra.Config.Filter.Compile())
acc := testutil.Accumulator{}
shutdown := make(chan struct{})
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
ra.Run(&acc, shutdown)
}()
m := ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
time.Now().Add(time.Millisecond*100),
)
assert.False(t, ra.Add(m))
for {
time.Sleep(time.Millisecond)
if acc.NMetrics() > 0 {
break
}
}
acc.AssertContainsFields(t, "TestMetric", map[string]interface{}{"sum": int64(101)})
close(shutdown)
wg.Wait()
}
func TestAddDropOriginal(t *testing.T) {
ra := NewRunningAggregator(&TestAggregator{}, &AggregatorConfig{
Name: "TestRunningAggregator",
Filter: Filter{
NamePass: []string{"RI*"},
},
DropOriginal: true,
})
assert.NoError(t, ra.Config.Filter.Compile())
m := ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
time.Now(),
)
assert.True(t, ra.Add(m))
// this metric name doesn't match the filter, so Add will return false
m2 := ra.MakeMetric(
"foobar",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
time.Now(),
)
assert.False(t, ra.Add(m2))
}
type TestAggregator struct {
sum int64
}
func (t *TestAggregator) Description() string { return "" }
func (t *TestAggregator) SampleConfig() string { return "" }
func (t *TestAggregator) Reset() {
atomic.StoreInt64(&t.sum, 0)
}
func (t *TestAggregator) Push(acc telegraf.Accumulator) {
acc.AddFields("TestMetric",
map[string]interface{}{"sum": t.sum},
map[string]string{},
)
}
func (t *TestAggregator) Add(in telegraf.Metric) {
for _, v := range in.Fields() {
if vi, ok := v.(int64); ok {
atomic.AddInt64(&t.sum, vi)
}
}
}

View File

@ -0,0 +1,102 @@
package models
import (
"fmt"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/serializers/influx"
"github.com/influxdata/telegraf/selfstat"
)
var GlobalMetricsGathered = selfstat.Register("agent", "metrics_gathered", map[string]string{})
type RunningInput struct {
Input telegraf.Input
Config *InputConfig
trace bool
defaultTags map[string]string
MetricsGathered selfstat.Stat
}
func NewRunningInput(
input telegraf.Input,
config *InputConfig,
) *RunningInput {
return &RunningInput{
Input: input,
Config: config,
MetricsGathered: selfstat.Register(
"gather",
"metrics_gathered",
map[string]string{"input": config.Name},
),
}
}
// InputConfig containing a name, interval, and filter
type InputConfig struct {
Name string
NameOverride string
MeasurementPrefix string
MeasurementSuffix string
Tags map[string]string
Filter Filter
Interval time.Duration
}
func (r *RunningInput) Name() string {
return "inputs." + r.Config.Name
}
// MakeMetric either returns a metric, or returns nil if the metric doesn't
// need to be created (because of filtering, an error, etc.)
func (r *RunningInput) MakeMetric(
measurement string,
fields map[string]interface{},
tags map[string]string,
mType telegraf.ValueType,
t time.Time,
) telegraf.Metric {
m := makemetric(
measurement,
fields,
tags,
r.Config.NameOverride,
r.Config.MeasurementPrefix,
r.Config.MeasurementSuffix,
r.Config.Tags,
r.defaultTags,
r.Config.Filter,
true,
mType,
t,
)
if r.trace && m != nil {
s := influx.NewSerializer()
s.SetFieldSortOrder(influx.SortFields)
octets, err := s.Serialize(m)
if err == nil {
fmt.Print("> " + string(octets))
}
}
r.MetricsGathered.Incr(1)
GlobalMetricsGathered.Incr(1)
return m
}
func (r *RunningInput) Trace() bool {
return r.trace
}
func (r *RunningInput) SetTrace(trace bool) {
r.trace = trace
}
func (r *RunningInput) SetDefaultTags(tags map[string]string) {
r.defaultTags = tags
}

View File

@ -4,61 +4,26 @@ import (
"testing"
"time"
"github.com/influxdata/telegraf/selfstat"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestMakeMetricFilterAfterApplyingGlobalTags(t *testing.T) {
now := time.Now()
ri := NewRunningInput(&testInput{}, &InputConfig{
Filter: Filter{
TagInclude: []string{"b"},
},
})
require.NoError(t, ri.Config.Filter.Compile())
ri.SetDefaultTags(map[string]string{"a": "x", "b": "y"})
m, err := metric.New("cpu",
map[string]string{},
map[string]interface{}{
"value": 42,
},
now)
require.NoError(t, err)
actual := ri.MakeMetric(m)
expected, err := metric.New("cpu",
map[string]string{
"b": "y",
},
map[string]interface{}{
"value": 42,
},
now)
require.NoError(t, err)
testutil.RequireMetricEqual(t, expected, actual)
}
func TestMakeMetricNoFields(t *testing.T) {
now := time.Now()
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
})
m, err := metric.New("RITest",
map[string]string{},
m := ri.MakeMetric(
"RITest",
map[string]interface{}{},
map[string]string{},
telegraf.Untyped,
now,
telegraf.Untyped)
m = ri.MakeMetric(m)
require.NoError(t, err)
)
assert.Nil(t, m)
}
@ -69,16 +34,16 @@ func TestMakeMetricNilFields(t *testing.T) {
Name: "TestRunningInput",
})
m, err := metric.New("RITest",
map[string]string{},
m := ri.MakeMetric(
"RITest",
map[string]interface{}{
"value": int64(101),
"value": int(101),
"nil": nil,
},
map[string]string{},
telegraf.Untyped,
now,
telegraf.Untyped)
require.NoError(t, err)
m = ri.MakeMetric(m)
)
expected, err := metric.New("RITest",
map[string]string{},
@ -101,14 +66,16 @@ func TestMakeMetricWithPluginTags(t *testing.T) {
},
})
m := testutil.MustMetric("RITest",
map[string]string{},
map[string]interface{}{
"value": int64(101),
},
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
nil,
telegraf.Untyped,
now,
telegraf.Untyped)
m = ri.MakeMetric(m)
)
expected, err := metric.New("RITest",
map[string]string{
@ -133,17 +100,17 @@ func TestMakeMetricFilteredOut(t *testing.T) {
Filter: Filter{NamePass: []string{"foobar"}},
})
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
assert.NoError(t, ri.Config.Filter.Compile())
m, err := metric.New("RITest",
map[string]string{},
map[string]interface{}{
"value": int64(101),
},
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
nil,
telegraf.Untyped,
now,
telegraf.Untyped)
m = ri.MakeMetric(m)
require.NoError(t, err)
)
assert.Nil(t, m)
}
@ -156,14 +123,16 @@ func TestMakeMetricWithDaemonTags(t *testing.T) {
"foo": "bar",
})
m := testutil.MustMetric("RITest",
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
map[string]interface{}{
"value": int64(101),
},
telegraf.Untyped,
now,
telegraf.Untyped)
m = ri.MakeMetric(m)
)
expected, err := metric.New("RITest",
map[string]string{
"foo": "bar",
@ -184,15 +153,13 @@ func TestMakeMetricNameOverride(t *testing.T) {
NameOverride: "foobar",
})
m, err := metric.New("RITest",
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
map[string]interface{}{
"value": int64(101),
},
telegraf.Untyped,
now,
telegraf.Untyped)
require.NoError(t, err)
m = ri.MakeMetric(m)
)
expected, err := metric.New("foobar",
nil,
map[string]interface{}{
@ -211,15 +178,13 @@ func TestMakeMetricNamePrefix(t *testing.T) {
MeasurementPrefix: "foobar_",
})
m, err := metric.New("RITest",
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
map[string]interface{}{
"value": int64(101),
},
telegraf.Untyped,
now,
telegraf.Untyped)
require.NoError(t, err)
m = ri.MakeMetric(m)
)
expected, err := metric.New("foobar_RITest",
nil,
map[string]interface{}{
@ -238,15 +203,13 @@ func TestMakeMetricNameSuffix(t *testing.T) {
MeasurementSuffix: "_foobar",
})
m, err := metric.New("RITest",
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
map[string]interface{}{
"value": int64(101),
},
telegraf.Untyped,
now,
telegraf.Untyped)
require.NoError(t, err)
m = ri.MakeMetric(m)
)
expected, err := metric.New("RITest_foobar",
nil,
map[string]interface{}{
@ -258,35 +221,6 @@ func TestMakeMetricNameSuffix(t *testing.T) {
require.Equal(t, expected, m)
}
func TestMetricErrorCounters(t *testing.T) {
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestMetricErrorCounters",
})
getGatherErrors := func() int64 {
for _, r := range selfstat.Metrics() {
tag, hasTag := r.GetTag("input")
if r.Name() == "internal_gather" && hasTag && tag == "TestMetricErrorCounters" {
errCount, ok := r.GetField("errors")
if !ok {
t.Fatal("Expected error field")
}
return errCount.(int64)
}
}
return 0
}
before := getGatherErrors()
ri.Log().Error("Oh no")
after := getGatherErrors()
require.Greater(t, after, before)
require.GreaterOrEqual(t, int64(1), GlobalGatherErrors.Get())
}
type testInput struct{}
func (t *testInput) Description() string { return "" }

View File

@ -0,0 +1,194 @@
package models
import (
"log"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/buffer"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/selfstat"
)
const (
// Default size of metrics batch size.
DEFAULT_METRIC_BATCH_SIZE = 1000
// Default number of metrics kept. It should be a multiple of batch size.
DEFAULT_METRIC_BUFFER_LIMIT = 10000
)
// RunningOutput contains the output configuration
type RunningOutput struct {
Name string
Output telegraf.Output
Config *OutputConfig
MetricBufferLimit int
MetricBatchSize int
MetricsFiltered selfstat.Stat
MetricsWritten selfstat.Stat
BufferSize selfstat.Stat
BufferLimit selfstat.Stat
WriteTime selfstat.Stat
metrics *buffer.Buffer
failMetrics *buffer.Buffer
// Guards against concurrent calls to the Output as described in #3009
sync.Mutex
}
func NewRunningOutput(
name string,
output telegraf.Output,
conf *OutputConfig,
batchSize int,
bufferLimit int,
) *RunningOutput {
if bufferLimit == 0 {
bufferLimit = DEFAULT_METRIC_BUFFER_LIMIT
}
if batchSize == 0 {
batchSize = DEFAULT_METRIC_BATCH_SIZE
}
ro := &RunningOutput{
Name: name,
metrics: buffer.NewBuffer(batchSize),
failMetrics: buffer.NewBuffer(bufferLimit),
Output: output,
Config: conf,
MetricBufferLimit: bufferLimit,
MetricBatchSize: batchSize,
MetricsWritten: selfstat.Register(
"write",
"metrics_written",
map[string]string{"output": name},
),
MetricsFiltered: selfstat.Register(
"write",
"metrics_filtered",
map[string]string{"output": name},
),
BufferSize: selfstat.Register(
"write",
"buffer_size",
map[string]string{"output": name},
),
BufferLimit: selfstat.Register(
"write",
"buffer_limit",
map[string]string{"output": name},
),
WriteTime: selfstat.RegisterTiming(
"write",
"write_time_ns",
map[string]string{"output": name},
),
}
ro.BufferLimit.Set(int64(ro.MetricBufferLimit))
return ro
}
// AddMetric adds a metric to the output. This function can also write cached
// points if FlushBufferWhenFull is true.
func (ro *RunningOutput) AddMetric(m telegraf.Metric) {
if m == nil {
return
}
// Filter any tagexclude/taginclude parameters before adding metric
if ro.Config.Filter.IsActive() {
// In order to filter out tags, we need to create a new metric, since
// metrics are immutable once created.
name := m.Name()
tags := m.Tags()
fields := m.Fields()
t := m.Time()
if ok := ro.Config.Filter.Apply(name, fields, tags); !ok {
ro.MetricsFiltered.Incr(1)
return
}
// error is not possible if creating from another metric, so ignore.
m, _ = metric.New(name, tags, fields, t)
}
ro.metrics.Add(m)
if ro.metrics.Len() == ro.MetricBatchSize {
batch := ro.metrics.Batch(ro.MetricBatchSize)
err := ro.write(batch)
if err != nil {
ro.failMetrics.Add(batch...)
}
}
}
// Write writes all cached points to this output.
func (ro *RunningOutput) Write() error {
nFails, nMetrics := ro.failMetrics.Len(), ro.metrics.Len()
ro.BufferSize.Set(int64(nFails + nMetrics))
log.Printf("D! Output [%s] buffer fullness: %d / %d metrics. ",
ro.Name, nFails+nMetrics, ro.MetricBufferLimit)
var err error
if !ro.failMetrics.IsEmpty() {
// how many batches of failed writes we need to write.
nBatches := nFails/ro.MetricBatchSize + 1
batchSize := ro.MetricBatchSize
for i := 0; i < nBatches; i++ {
// If it's the last batch, only grab the metrics that have not had
// a write attempt already (this is primarily to preserve order).
if i == nBatches-1 {
batchSize = nFails % ro.MetricBatchSize
}
batch := ro.failMetrics.Batch(batchSize)
// If we've already failed previous writes, don't bother trying to
// write to this output again. We are not exiting the loop just so
// that we can rotate the metrics to preserve order.
if err == nil {
err = ro.write(batch)
}
if err != nil {
ro.failMetrics.Add(batch...)
}
}
}
batch := ro.metrics.Batch(ro.MetricBatchSize)
// see comment above about not trying to write to an already failed output.
// if ro.failMetrics is empty then err will always be nil at this point.
if err == nil {
err = ro.write(batch)
}
if err != nil {
ro.failMetrics.Add(batch...)
return err
}
return nil
}
func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
nMetrics := len(metrics)
if nMetrics == 0 {
return nil
}
ro.Lock()
defer ro.Unlock()
start := time.Now()
err := ro.Output.Write(metrics)
elapsed := time.Since(start)
if err == nil {
log.Printf("D! Output [%s] wrote batch of %d metrics in %s\n",
ro.Name, nMetrics, elapsed)
ro.MetricsWritten.Incr(int64(nMetrics))
ro.WriteTime.Incr(elapsed.Nanoseconds())
}
return err
}
// OutputConfig containing name and filter
type OutputConfig struct {
Name string
Filter Filter
}

View File

@ -4,11 +4,10 @@ import (
"fmt"
"sync"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/selfstat"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -29,14 +28,6 @@ var next5 = []telegraf.Metric{
testutil.TestMetric(101, "metric10"),
}
func reverse(metrics []telegraf.Metric) []telegraf.Metric {
result := make([]telegraf.Metric, 0, len(metrics))
for i := len(metrics) - 1; i >= 0; i-- {
result = append(result, metrics[i])
}
return result
}
// Benchmark adding metrics.
func BenchmarkRunningOutputAddWrite(b *testing.B) {
conf := &OutputConfig{
@ -84,6 +75,23 @@ func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
}
}
func TestAddingNilMetric(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{},
}
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
ro.AddMetric(nil)
ro.AddMetric(nil)
ro.AddMetric(nil)
err := ro.Write()
assert.NoError(t, err)
assert.Len(t, m.Metrics(), 0)
}
// Test that NameDrop filters ger properly applied.
func TestRunningOutput_DropFilter(t *testing.T) {
conf := &OutputConfig{
@ -218,60 +226,6 @@ func TestRunningOutput_TagIncludeMatch(t *testing.T) {
assert.Len(t, m.Metrics()[0].Tags(), 1)
}
// Test that measurement name overriding correctly
func TestRunningOutput_NameOverride(t *testing.T) {
conf := &OutputConfig{
NameOverride: "new_metric_name",
}
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
ro.AddMetric(testutil.TestMetric(101, "metric1"))
assert.Len(t, m.Metrics(), 0)
err := ro.Write()
assert.NoError(t, err)
assert.Len(t, m.Metrics(), 1)
assert.Equal(t, "new_metric_name", m.Metrics()[0].Name())
}
// Test that measurement name prefix is added correctly
func TestRunningOutput_NamePrefix(t *testing.T) {
conf := &OutputConfig{
NamePrefix: "prefix_",
}
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
ro.AddMetric(testutil.TestMetric(101, "metric1"))
assert.Len(t, m.Metrics(), 0)
err := ro.Write()
assert.NoError(t, err)
assert.Len(t, m.Metrics(), 1)
assert.Equal(t, "prefix_metric1", m.Metrics()[0].Name())
}
// Test that measurement name suffix is added correctly
func TestRunningOutput_NameSuffix(t *testing.T) {
conf := &OutputConfig{
NameSuffix: "_suffix",
}
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
ro.AddMetric(testutil.TestMetric(101, "metric1"))
assert.Len(t, m.Metrics(), 0)
err := ro.Write()
assert.NoError(t, err)
assert.Len(t, m.Metrics(), 1)
assert.Equal(t, "metric1_suffix", m.Metrics()[0].Name())
}
// Test that we can write metrics with simple default setup.
func TestRunningOutputDefault(t *testing.T) {
conf := &OutputConfig{
@ -294,6 +248,56 @@ func TestRunningOutputDefault(t *testing.T) {
assert.Len(t, m.Metrics(), 10)
}
// Test that running output doesn't flush until it's full when
// FlushBufferWhenFull is set.
func TestRunningOutputFlushWhenFull(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{},
}
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 6, 10)
// Fill buffer to 1 under limit
for _, metric := range first5 {
ro.AddMetric(metric)
}
// no flush yet
assert.Len(t, m.Metrics(), 0)
// add one more metric
ro.AddMetric(next5[0])
// now it flushed
assert.Len(t, m.Metrics(), 6)
// add one more metric and write it manually
ro.AddMetric(next5[1])
err := ro.Write()
assert.NoError(t, err)
assert.Len(t, m.Metrics(), 7)
}
// Test that running output doesn't flush until it's full when
// FlushBufferWhenFull is set, twice.
func TestRunningOutputMultiFlushWhenFull(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{},
}
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 4, 12)
// Fill buffer past limit twive
for _, metric := range first5 {
ro.AddMetric(metric)
}
for _, metric := range next5 {
ro.AddMetric(metric)
}
// flushed twice
assert.Len(t, m.Metrics(), 8)
}
func TestRunningOutputWriteFail(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{},
@ -360,7 +364,7 @@ func TestRunningOutputWriteFailOrder(t *testing.T) {
// Verify that 10 metrics were written
assert.Len(t, m.Metrics(), 10)
// Verify that they are in order
expected := append(reverse(next5), reverse(first5)...)
expected := append(first5, next5...)
assert.Equal(t, expected, m.Metrics())
}
@ -418,17 +422,24 @@ func TestRunningOutputWriteFailOrder2(t *testing.T) {
err = ro.Write()
require.NoError(t, err)
// Verify that 20 metrics were written
// Verify that 10 metrics were written
assert.Len(t, m.Metrics(), 20)
// Verify that they are in order
expected := append(reverse(next5), reverse(first5)...)
expected = append(expected, reverse(next5)...)
expected = append(expected, reverse(first5)...)
expected := append(first5, next5...)
expected = append(expected, first5...)
expected = append(expected, next5...)
assert.Equal(t, expected, m.Metrics())
}
// Verify that the order of points is preserved when there is a remainder
// of points for the batch.
//
// ie, with a batch size of 5:
//
// 1 2 3 4 5 6 <-- order, failed points
// 6 1 2 3 4 5 <-- order, after 1st write failure (1 2 3 4 5 was batch)
// 1 2 3 4 5 6 <-- order, after 2nd write failure, (6 was batch)
//
func TestRunningOutputWriteFailOrder3(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{},
@ -464,54 +475,10 @@ func TestRunningOutputWriteFailOrder3(t *testing.T) {
// Verify that 6 metrics were written
assert.Len(t, m.Metrics(), 6)
// Verify that they are in order
expected := []telegraf.Metric{next5[0], first5[4], first5[3], first5[2], first5[1], first5[0]}
expected := append(first5, next5[0])
assert.Equal(t, expected, m.Metrics())
}
func TestInternalMetrics(t *testing.T) {
_ = NewRunningOutput(
"test_internal",
&mockOutput{},
&OutputConfig{
Filter: Filter{},
Name: "test_name",
Alias: "test_alias",
},
5,
10)
expected := []telegraf.Metric{
testutil.MustMetric(
"internal_write",
map[string]string{
"output": "test_name",
"alias": "test_alias",
},
map[string]interface{}{
"buffer_limit": 10,
"buffer_size": 0,
"errors": 0,
"metrics_added": 0,
"metrics_dropped": 0,
"metrics_filtered": 0,
"metrics_written": 0,
"write_time_ns": 0,
},
time.Unix(0, 0),
),
}
var actual []telegraf.Metric
for _, m := range selfstat.Metrics() {
output, _ := m.GetTag("output")
if m.Name() == "internal_write" && output == "test_name" {
actual = append(actual, m)
}
}
testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime())
}
type mockOutput struct {
sync.Mutex

View File

@ -0,0 +1,51 @@
package models
import (
"sync"
"github.com/influxdata/telegraf"
)
type RunningProcessor struct {
Name string
sync.Mutex
Processor telegraf.Processor
Config *ProcessorConfig
}
type RunningProcessors []*RunningProcessor
func (rp RunningProcessors) Len() int { return len(rp) }
func (rp RunningProcessors) Swap(i, j int) { rp[i], rp[j] = rp[j], rp[i] }
func (rp RunningProcessors) Less(i, j int) bool { return rp[i].Config.Order < rp[j].Config.Order }
// FilterConfig containing a name and filter
type ProcessorConfig struct {
Name string
Order int64
Filter Filter
}
func (rp *RunningProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
rp.Lock()
defer rp.Unlock()
ret := []telegraf.Metric{}
for _, metric := range in {
if rp.Config.Filter.IsActive() {
// check if the filter should be applied to this metric
if ok := rp.Config.Filter.Apply(metric.Name(), metric.Fields(), metric.Tags()); !ok {
// this means filter should not be applied
ret = append(ret, metric)
continue
}
}
// This metric should pass through the filter, so call the filter Apply
// function and append results to the output slice.
ret = append(ret, rp.Processor.Apply(metric)...)
}
return ret
}

View File

@ -0,0 +1,117 @@
package models
import (
"testing"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
type TestProcessor struct {
}
func (f *TestProcessor) SampleConfig() string { return "" }
func (f *TestProcessor) Description() string { return "" }
// Apply renames:
// "foo" to "fuz"
// "bar" to "baz"
// And it also drops measurements named "dropme"
func (f *TestProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
out := make([]telegraf.Metric, 0)
for _, m := range in {
switch m.Name() {
case "foo":
out = append(out, testutil.TestMetric(1, "fuz"))
case "bar":
out = append(out, testutil.TestMetric(1, "baz"))
case "dropme":
// drop the metric!
default:
out = append(out, m)
}
}
return out
}
func NewTestRunningProcessor() *RunningProcessor {
out := &RunningProcessor{
Name: "test",
Processor: &TestProcessor{},
Config: &ProcessorConfig{Filter: Filter{}},
}
return out
}
func TestRunningProcessor(t *testing.T) {
inmetrics := []telegraf.Metric{
testutil.TestMetric(1, "foo"),
testutil.TestMetric(1, "bar"),
testutil.TestMetric(1, "baz"),
}
expectedNames := []string{
"fuz",
"baz",
"baz",
}
rfp := NewTestRunningProcessor()
filteredMetrics := rfp.Apply(inmetrics...)
actualNames := []string{
filteredMetrics[0].Name(),
filteredMetrics[1].Name(),
filteredMetrics[2].Name(),
}
assert.Equal(t, expectedNames, actualNames)
}
func TestRunningProcessor_WithNameDrop(t *testing.T) {
inmetrics := []telegraf.Metric{
testutil.TestMetric(1, "foo"),
testutil.TestMetric(1, "bar"),
testutil.TestMetric(1, "baz"),
}
expectedNames := []string{
"foo",
"baz",
"baz",
}
rfp := NewTestRunningProcessor()
rfp.Config.Filter.NameDrop = []string{"foo"}
assert.NoError(t, rfp.Config.Filter.Compile())
filteredMetrics := rfp.Apply(inmetrics...)
actualNames := []string{
filteredMetrics[0].Name(),
filteredMetrics[1].Name(),
filteredMetrics[2].Name(),
}
assert.Equal(t, expectedNames, actualNames)
}
func TestRunningProcessor_DroppedMetric(t *testing.T) {
inmetrics := []telegraf.Metric{
testutil.TestMetric(1, "dropme"),
testutil.TestMetric(1, "foo"),
testutil.TestMetric(1, "bar"),
}
expectedNames := []string{
"fuz",
"baz",
}
rfp := NewTestRunningProcessor()
filteredMetrics := rfp.Apply(inmetrics...)
actualNames := []string{
filteredMetrics[0].Name(),
filteredMetrics[1].Name(),
}
assert.Equal(t, expectedNames, actualNames)
}

View File

@ -1,185 +0,0 @@
package rotate
// Rotating things
import (
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"time"
)
// FilePerm defines the permissions that Writer will use for all
// the files it creates.
const (
FilePerm = os.FileMode(0644)
DateFormat = "2006-01-02"
)
// FileWriter implements the io.Writer interface and writes to the
// filename specified.
// Will rotate at the specified interval and/or when the current file size exceeds maxSizeInBytes
// At rotation time, current file is renamed and a new file is created.
// If the number of archives exceeds maxArchives, older files are deleted.
type FileWriter struct {
filename string
filenameRotationTemplate string
current *os.File
interval time.Duration
maxSizeInBytes int64
maxArchives int
expireTime time.Time
bytesWritten int64
sync.Mutex
}
// NewFileWriter creates a new file writer.
func NewFileWriter(filename string, interval time.Duration, maxSizeInBytes int64, maxArchives int) (io.WriteCloser, error) {
if interval == 0 && maxSizeInBytes <= 0 {
// No rotation needed so a basic io.Writer will do the trick
return openFile(filename)
}
w := &FileWriter{
filename: filename,
interval: interval,
maxSizeInBytes: maxSizeInBytes,
maxArchives: maxArchives,
filenameRotationTemplate: getFilenameRotationTemplate(filename),
}
if err := w.openCurrent(); err != nil {
return nil, err
}
return w, nil
}
func openFile(filename string) (*os.File, error) {
return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_APPEND, FilePerm)
}
func getFilenameRotationTemplate(filename string) string {
// Extract the file extension
fileExt := filepath.Ext(filename)
// Remove the file extension from the filename (if any)
stem := strings.TrimSuffix(filename, fileExt)
return stem + ".%s-%s" + fileExt
}
// Write writes p to the current file, then checks to see if
// rotation is necessary.
func (w *FileWriter) Write(p []byte) (n int, err error) {
w.Lock()
defer w.Unlock()
if n, err = w.current.Write(p); err != nil {
return 0, err
}
w.bytesWritten += int64(n)
if err = w.rotateIfNeeded(); err != nil {
return 0, err
}
return n, nil
}
// Close closes the current file. Writer is unusable after this
// is called.
func (w *FileWriter) Close() (err error) {
w.Lock()
defer w.Unlock()
// Rotate before closing
if err = w.rotate(); err != nil {
return err
}
w.current = nil
return nil
}
func (w *FileWriter) openCurrent() (err error) {
// In case ModTime() fails, we use time.Now()
w.expireTime = time.Now().Add(w.interval)
w.bytesWritten = 0
w.current, err = openFile(w.filename)
if err != nil {
return err
}
// Goal here is to rotate old pre-existing files.
// For that we use fileInfo.ModTime, instead of time.Now().
// Example: telegraf is restarted every 23 hours and
// the rotation interval is set to 24 hours.
// With time.now() as a reference we'd never rotate the file.
if fileInfo, err := w.current.Stat(); err == nil {
w.expireTime = fileInfo.ModTime().Add(w.interval)
w.bytesWritten = fileInfo.Size()
}
if err = w.rotateIfNeeded(); err != nil {
return err
}
return nil
}
func (w *FileWriter) rotateIfNeeded() error {
if (w.interval > 0 && time.Now().After(w.expireTime)) ||
(w.maxSizeInBytes > 0 && w.bytesWritten >= w.maxSizeInBytes) {
if err := w.rotate(); err != nil {
//Ignore rotation errors and keep the log open
fmt.Printf("unable to rotate the file '%s', %s", w.filename, err.Error())
}
return w.openCurrent()
}
return nil
}
func (w *FileWriter) rotate() (err error) {
if err = w.current.Close(); err != nil {
return err
}
// Use year-month-date for readability, unix time to make the file name unique with second precision
now := time.Now()
rotatedFilename := fmt.Sprintf(w.filenameRotationTemplate, now.Format(DateFormat), strconv.FormatInt(now.Unix(), 10))
if err = os.Rename(w.filename, rotatedFilename); err != nil {
return err
}
if err = w.purgeArchivesIfNeeded(); err != nil {
return err
}
return nil
}
func (w *FileWriter) purgeArchivesIfNeeded() (err error) {
if w.maxArchives == -1 {
//Skip archiving
return nil
}
var matches []string
if matches, err = filepath.Glob(fmt.Sprintf(w.filenameRotationTemplate, "*", "*")); err != nil {
return err
}
//if there are more archives than the configured maximum, then purge older files
if len(matches) > w.maxArchives {
//sort files alphanumerically to delete older files first
sort.Strings(matches)
for _, filename := range matches[:len(matches)-w.maxArchives] {
if err = os.Remove(filename); err != nil {
return err
}
}
}
return nil
}

View File

@ -1,148 +0,0 @@
package rotate
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestFileWriter_NoRotation(t *testing.T) {
tempDir, err := ioutil.TempDir("", "RotationNo")
require.NoError(t, err)
writer, err := NewFileWriter(filepath.Join(tempDir, "test"), 0, 0, 0)
require.NoError(t, err)
defer func() { writer.Close(); os.RemoveAll(tempDir) }()
_, err = writer.Write([]byte("Hello World"))
require.NoError(t, err)
_, err = writer.Write([]byte("Hello World 2"))
require.NoError(t, err)
files, _ := ioutil.ReadDir(tempDir)
assert.Equal(t, 1, len(files))
}
func TestFileWriter_TimeRotation(t *testing.T) {
tempDir, err := ioutil.TempDir("", "RotationTime")
require.NoError(t, err)
interval, _ := time.ParseDuration("1s")
writer, err := NewFileWriter(filepath.Join(tempDir, "test"), interval, 0, -1)
require.NoError(t, err)
defer func() { writer.Close(); os.RemoveAll(tempDir) }()
_, err = writer.Write([]byte("Hello World"))
require.NoError(t, err)
time.Sleep(1 * time.Second)
_, err = writer.Write([]byte("Hello World 2"))
require.NoError(t, err)
files, _ := ioutil.ReadDir(tempDir)
assert.Equal(t, 2, len(files))
}
func TestFileWriter_ReopenTimeRotation(t *testing.T) {
tempDir, err := ioutil.TempDir("", "RotationTime")
require.NoError(t, err)
interval, _ := time.ParseDuration("1s")
filePath := filepath.Join(tempDir, "test.log")
err = ioutil.WriteFile(filePath, []byte("Hello World"), 0644)
time.Sleep(1 * time.Second)
assert.NoError(t, err)
writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), interval, 0, -1)
require.NoError(t, err)
defer func() { writer.Close(); os.RemoveAll(tempDir) }()
files, _ := ioutil.ReadDir(tempDir)
assert.Equal(t, 2, len(files))
}
func TestFileWriter_SizeRotation(t *testing.T) {
tempDir, err := ioutil.TempDir("", "RotationSize")
require.NoError(t, err)
maxSize := int64(9)
writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1)
require.NoError(t, err)
defer func() { writer.Close(); os.RemoveAll(tempDir) }()
_, err = writer.Write([]byte("Hello World"))
require.NoError(t, err)
_, err = writer.Write([]byte("World 2"))
require.NoError(t, err)
files, _ := ioutil.ReadDir(tempDir)
assert.Equal(t, 2, len(files))
}
func TestFileWriter_ReopenSizeRotation(t *testing.T) {
tempDir, err := ioutil.TempDir("", "RotationSize")
require.NoError(t, err)
maxSize := int64(12)
filePath := filepath.Join(tempDir, "test.log")
err = ioutil.WriteFile(filePath, []byte("Hello World"), 0644)
assert.NoError(t, err)
writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1)
require.NoError(t, err)
defer func() { writer.Close(); os.RemoveAll(tempDir) }()
_, err = writer.Write([]byte("Hello World Again"))
require.NoError(t, err)
files, _ := ioutil.ReadDir(tempDir)
assert.Equal(t, 2, len(files))
}
func TestFileWriter_DeleteArchives(t *testing.T) {
tempDir, err := ioutil.TempDir("", "RotationDeleteArchives")
require.NoError(t, err)
maxSize := int64(5)
writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, 2)
require.NoError(t, err)
defer func() { writer.Close(); os.RemoveAll(tempDir) }()
_, err = writer.Write([]byte("First file"))
require.NoError(t, err)
// File names include the date with second precision
// So, to force rotation with different file names
// we need to wait
time.Sleep(1 * time.Second)
_, err = writer.Write([]byte("Second file"))
require.NoError(t, err)
time.Sleep(1 * time.Second)
_, err = writer.Write([]byte("Third file"))
require.NoError(t, err)
files, _ := ioutil.ReadDir(tempDir)
assert.Equal(t, 3, len(files))
for _, tempFile := range files {
var bytes []byte
var err error
path := filepath.Join(tempDir, tempFile.Name())
if bytes, err = ioutil.ReadFile(path); err != nil {
t.Error(err.Error())
return
}
contents := string(bytes)
if contents != "" && contents != "Second file" && contents != "Third file" {
t.Error("Should have deleted the eldest log file")
return
}
}
}
func TestFileWriter_CloseRotates(t *testing.T) {
tempDir, err := ioutil.TempDir("", "RotationClose")
require.NoError(t, err)
defer os.RemoveAll(tempDir)
maxSize := int64(9)
writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1)
require.NoError(t, err)
writer.Close()
files, _ := ioutil.ReadDir(tempDir)
assert.Equal(t, 1, len(files))
assert.Regexp(t, "^test\\.[^\\.]+\\.log$", files[0].Name())
}

View File

@ -1,64 +0,0 @@
package syslog
import (
"fmt"
"strings"
)
// Framing represents the framing technique we expect the messages to come.
type Framing int
const (
// OctetCounting indicates the transparent framing technique for syslog transport.
OctetCounting Framing = iota
// NonTransparent indicates the non-transparent framing technique for syslog transport.
NonTransparent
)
func (f Framing) String() string {
switch f {
case OctetCounting:
return "OCTET-COUNTING"
case NonTransparent:
return "NON-TRANSPARENT"
}
return ""
}
// UnmarshalTOML implements ability to unmarshal framing from TOML files.
func (f *Framing) UnmarshalTOML(data []byte) (err error) {
return f.UnmarshalText(data)
}
// UnmarshalText implements encoding.TextUnmarshaler
func (f *Framing) UnmarshalText(data []byte) (err error) {
s := string(data)
switch strings.ToUpper(s) {
case `OCTET-COUNTING`:
fallthrough
case `"OCTET-COUNTING"`:
fallthrough
case `'OCTET-COUNTING'`:
*f = OctetCounting
return
case `NON-TRANSPARENT`:
fallthrough
case `"NON-TRANSPARENT"`:
fallthrough
case `'NON-TRANSPARENT'`:
*f = NonTransparent
return
}
*f = -1
return fmt.Errorf("unknown framing")
}
// MarshalText implements encoding.TextMarshaler
func (f Framing) MarshalText() ([]byte, error) {
s := f.String()
if s != "" {
return []byte(s), nil
}
return nil, fmt.Errorf("unknown framing")
}

View File

@ -1,37 +0,0 @@
package syslog
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestFraming(t *testing.T) {
var f1 Framing
f1.UnmarshalTOML([]byte(`"non-transparent"`))
assert.Equal(t, NonTransparent, f1)
var f2 Framing
f2.UnmarshalTOML([]byte(`non-transparent`))
assert.Equal(t, NonTransparent, f2)
var f3 Framing
f3.UnmarshalTOML([]byte(`'non-transparent'`))
assert.Equal(t, NonTransparent, f3)
var f4 Framing
f4.UnmarshalTOML([]byte(`"octet-counting"`))
assert.Equal(t, OctetCounting, f4)
var f5 Framing
f5.UnmarshalTOML([]byte(`octet-counting`))
assert.Equal(t, OctetCounting, f5)
var f6 Framing
f6.UnmarshalTOML([]byte(`'octet-counting'`))
assert.Equal(t, OctetCounting, f6)
var f7 Framing
err := f7.UnmarshalTOML([]byte(`nope`))
assert.Equal(t, Framing(-1), f7)
assert.Error(t, err)
}

View File

@ -1,77 +0,0 @@
package templating
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestEngineAlternateSeparator(t *testing.T) {
defaultTemplate, _ := NewDefaultTemplateWithPattern("topic*")
engine, err := NewEngine("_", defaultTemplate, []string{
"/ /*/*/* /measurement/origin/measurement*",
})
require.NoError(t, err)
name, tags, field, err := engine.Apply("/telegraf/host01/cpu")
require.NoError(t, err)
require.Equal(t, "telegraf_cpu", name)
require.Equal(t, map[string]string{
"origin": "host01",
}, tags)
require.Equal(t, "", field)
}
func TestEngineWithWildcardTemplate(t *testing.T) {
var (
defaultTmpl, err = NewDefaultTemplateWithPattern("measurement*")
templates = []string{
"taskmanagerTask.alarm-detector.Assign.alarmDefinitionId metricsType.process.nodeId.x.alarmDefinitionId.measurement.field rule=1",
"taskmanagerTask.*.*.*.* metricsType.process.nodeId.measurement rule=2",
}
)
require.NoError(t, err)
engine, err := NewEngine(".", defaultTmpl, templates)
require.NoError(t, err)
for _, testCase := range []struct {
line string
measurement string
field string
tags map[string]string
}{
{
line: "taskmanagerTask.alarm-detector.Assign.alarmDefinitionId.timeout_errors.duration.p75",
measurement: "duration",
field: "p75",
tags: map[string]string{
"metricsType": "taskmanagerTask",
"process": "alarm-detector",
"nodeId": "Assign",
"x": "alarmDefinitionId",
"alarmDefinitionId": "timeout_errors",
"rule": "1",
},
},
{
line: "taskmanagerTask.alarm-detector.Assign.numRecordsInPerSecond.m5_rate",
measurement: "numRecordsInPerSecond",
tags: map[string]string{
"metricsType": "taskmanagerTask",
"process": "alarm-detector",
"nodeId": "Assign",
"rule": "2",
},
},
} {
t.Run(testCase.line, func(t *testing.T) {
measurement, tags, field, err := engine.Apply(testCase.line)
require.NoError(t, err)
assert.Equal(t, testCase.measurement, measurement)
assert.Equal(t, testCase.field, field)
assert.Equal(t, testCase.tags, tags)
})
}
}

Some files were not shown because too many files have changed in this diff Show More