Compare commits
9 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
18fd2d987d | ||
|
|
5e70cb3e44 | ||
|
|
ce203dc687 | ||
|
|
b0a2e8e1bd | ||
|
|
499495f844 | ||
|
|
20ab8fb2c3 | ||
|
|
bc474d3a53 | ||
|
|
547be87d79 | ||
|
|
619d4d5d29 |
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,5 +1,5 @@
|
||||
### Required for all PRs:
|
||||
|
||||
- [ ] Signed [CLA](https://influxdata.com/community/cla/).
|
||||
- [ ] Associated README.md updated.
|
||||
- [ ] Has appropriate unit tests.
|
||||
- [ ] CHANGELOG.md updated (we recommend not updating this until the PR has been approved by a maintainer)
|
||||
- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed)
|
||||
- [ ] README.md updated (if adding a new plugin)
|
||||
|
||||
156
CHANGELOG.md
156
CHANGELOG.md
@@ -1,157 +1,4 @@
|
||||
## v1.4 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- The `kafka_consumer` input has been updated to support Kafka 0.9 and
|
||||
above style consumer offset handling. The previous version of this plugin
|
||||
supporting Kafka 0.8 and below is available as the `kafka_consumer_legacy`
|
||||
plugin.
|
||||
|
||||
- In the `aerospike` input the `node_name` field has been changed to be a tag
|
||||
for both the `aerospike_node` and `aerospike_namespace` measurements.
|
||||
|
||||
- The default prometheus_client port has been changed to 9273.
|
||||
|
||||
### New Plugins
|
||||
|
||||
- [fail2ban](./plugins/inputs/fail2ban/README.md) - Thanks to @grugrut
|
||||
- [fluentd](./plugins/inputs/fluentd/README.md) - Thanks to @DanKans
|
||||
- [histogram](./plugins/aggregators/histogram/README.md) - Thanks to @vlamug
|
||||
- [minecraft](./plugins/inputs/minecraft/README.md) - Thanks to @adamperlin & @Ayrdrie
|
||||
- [openldap](./plugins/inputs/openldap/README.md) - Thanks to @cobaugh
|
||||
- [salesforce](./plugins/inputs/salesforce/README.md) - Thanks to @rody
|
||||
- [tomcat](./plugins/inputs/tomcat/README.md) - Thanks to @mlindes
|
||||
- [win_services](./plugins/inputs/win_services/README.md) - Thanks to @vlastahajek
|
||||
- [zipkin](./plugins/inputs/zipkin/README.md) - Thanks to @adamperlin & @Ayrdrie
|
||||
|
||||
### Features
|
||||
|
||||
- [#2487](https://github.com/influxdata/telegraf/pull/2487): Add Kafka 0.9+ consumer support
|
||||
- [#2773](https://github.com/influxdata/telegraf/pull/2773): Add support for self-signed certs to InfluxDB input plugin
|
||||
- [#2293](https://github.com/influxdata/telegraf/pull/2293): Add TCP listener for statsd input
|
||||
- [#2581](https://github.com/influxdata/telegraf/pull/2581): Add Docker container environment variables as tags. Only whitelisted
|
||||
- [#2817](https://github.com/influxdata/telegraf/pull/2817): Add timeout option to IPMI sensor plugin
|
||||
- [#2883](https://github.com/influxdata/telegraf/pull/2883): Add support for an optional SSL/TLS configuration to nginx input plugin
|
||||
- [#2882](https://github.com/influxdata/telegraf/pull/2882): Add timezone support for logparser timestamps.
|
||||
- [#2814](https://github.com/influxdata/telegraf/pull/2814): Add result_type field for http_response input.
|
||||
- [#2734](https://github.com/influxdata/telegraf/pull/2734): Add include/exclude filters for docker containers.
|
||||
- [#2602](https://github.com/influxdata/telegraf/pull/2602): Add secure connection support to graphite output.
|
||||
- [#2908](https://github.com/influxdata/telegraf/pull/2908): Add min/max response time on linux/darwin to ping.
|
||||
- [#2929](https://github.com/influxdata/telegraf/pull/2929): Add HTTP Proxy support to influxdb output.
|
||||
- [#2933](https://github.com/influxdata/telegraf/pull/2933): Add standard SSL options to mysql input.
|
||||
- [#2875](https://github.com/influxdata/telegraf/pull/2875): Add input plugin for fail2ban.
|
||||
- [#2924](https://github.com/influxdata/telegraf/pull/2924): Support HOST_PROC in processes and linux_sysctl_fs inputs.
|
||||
- [#2960](https://github.com/influxdata/telegraf/pull/2960): Add Minecraft input plugin.
|
||||
- [#2963](https://github.com/influxdata/telegraf/pull/2963): Add support for RethinkDB 1.0 handshake protocol.
|
||||
- [#2943](https://github.com/influxdata/telegraf/pull/2943): Add optional usage_active and time_active CPU metrics.
|
||||
- [#2973](https://github.com/influxdata/telegraf/pull/2973): Change default prometheus_client port.
|
||||
- [#2661](https://github.com/influxdata/telegraf/pull/2661): Add fluentd input plugin.
|
||||
- [#2990](https://github.com/influxdata/telegraf/pull/2990): Add result_type field to net_response input plugin.
|
||||
- [#2571](https://github.com/influxdata/telegraf/pull/2571): Add read timeout to socket_listener
|
||||
- [#2612](https://github.com/influxdata/telegraf/pull/2612): Add input plugin for OpenLDAP.
|
||||
- [#3042](https://github.com/influxdata/telegraf/pull/3042): Add network option to dns_query.
|
||||
- [#3054](https://github.com/influxdata/telegraf/pull/3054): Add redis_version field to redis input.
|
||||
- [#3063](https://github.com/influxdata/telegraf/pull/3063): Add tls options to docker input.
|
||||
- [#2387](https://github.com/influxdata/telegraf/pull/2387): Add histogram aggregator plugin.
|
||||
- [#3080](https://github.com/influxdata/telegraf/pull/3080): Add zipkin input plugin.
|
||||
- [#3023](https://github.com/influxdata/telegraf/pull/3023): Add Windows Services input plugin.
|
||||
- [#3098](https://github.com/influxdata/telegraf/pull/3098): Add path tag to logparser containing path of logfile.
|
||||
- [#3075](https://github.com/influxdata/telegraf/pull/3075): Add salesforce input plugin.
|
||||
- [#3097](https://github.com/influxdata/telegraf/pull/3097): Add option to run varnish under sudo.
|
||||
- [#3119](https://github.com/influxdata/telegraf/pull/3119): Add weighted_io_time to diskio input.
|
||||
- [#2978](https://github.com/influxdata/telegraf/pull/2978): Add gzip content-encoding support to influxdb output.
|
||||
- [#3127](https://github.com/influxdata/telegraf/pull/3127): Allow using system plugin in Windows.
|
||||
- [#3112](https://github.com/influxdata/telegraf/pull/3112): Add tomcat input plugin.
|
||||
- [#3182](https://github.com/influxdata/telegraf/pull/3182): HTTP headers can be added to InfluxDB output.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#2607](https://github.com/influxdata/telegraf/issues/2607): Improve logging of errors in Cassandra input.
|
||||
- [#2819](https://github.com/influxdata/telegraf/pull/2819): [enh] set db_version at 0 if query version fails
|
||||
- [#2749](https://github.com/influxdata/telegraf/pull/2749): Fixed sqlserver input to work with case sensitive server collation.
|
||||
- [#2716](https://github.com/influxdata/telegraf/pull/2716): Systemd does not see all shutdowns as failures
|
||||
- [#2782](https://github.com/influxdata/telegraf/pull/2782): Reuse transports in input plugins
|
||||
- [#2815](https://github.com/influxdata/telegraf/issues/2815): Inputs processes fails with "no such process".
|
||||
- [#1137](https://github.com/influxdata/telegraf/issues/1137): Fix multiple plugin loading in win_perf_counters.
|
||||
- [#2855](https://github.com/influxdata/telegraf/pull/2855): MySQL input: log and continue on field parse error.
|
||||
- [#2885](https://github.com/influxdata/telegraf/pull/2885): Fix timeout option in Windows ping input sample configuration.
|
||||
- [#2911](https://github.com/influxdata/telegraf/issues/2911): Fix Kinesis output plugin in govcloud.
|
||||
- [#2917](https://github.com/influxdata/telegraf/issues/2917): Fix Aerospike input adds all nodes to a single series.
|
||||
- [#2452](https://github.com/influxdata/telegraf/pull/2452): Improve Prometheus Client output documentation.
|
||||
- [#2984](https://github.com/influxdata/telegraf/pull/2984): Display error message if prometheus output fails to listen.
|
||||
- [#2997](https://github.com/influxdata/telegraf/issues/2997): Fix elasticsearch output content type detection warning.
|
||||
- [#2914](https://github.com/influxdata/telegraf/issues/2914): Prevent possible deadlock when using aggregators.
|
||||
- [#2860](https://github.com/influxdata/telegraf/issues/2860): Fix combined tagdrop/tagpass filtering.
|
||||
- [#3036](https://github.com/influxdata/telegraf/pull/3036): Fix filtering when both pass and drop match an item.
|
||||
- [#2964](https://github.com/influxdata/telegraf/issues/2964): Only report cpu usage for online cpus in docker input.
|
||||
- [#3050](https://github.com/influxdata/telegraf/pull/3050): Start first aggregator period at startup time.
|
||||
- [#2906](https://github.com/influxdata/telegraf/issues/2906): Fix panic in logparser if file cannot be opened.
|
||||
- [#2886](https://github.com/influxdata/telegraf/issues/2886): Default to localhost if zookeeper has no servers set.
|
||||
- [#2457](https://github.com/influxdata/telegraf/issues/2457): Fix docker memory and cpu reporting in Windows.
|
||||
- [#3058](https://github.com/influxdata/telegraf/issues/3058): Allow iptable entries with trailing text.
|
||||
- [#1680](https://github.com/influxdata/telegraf/issues/1680): Sanitize password from couchbase metric.
|
||||
- [#3104](https://github.com/influxdata/telegraf/issues/3104): Converge to typed value in prometheus output.
|
||||
- [#2899](https://github.com/influxdata/telegraf/issues/2899): Skip compilcation of logparser and tail on solaris.
|
||||
- [#2951](https://github.com/influxdata/telegraf/issues/2951): Discard logging from tail library.
|
||||
- [#3126](https://github.com/influxdata/telegraf/pull/3126): Remove log message on ping timeout.
|
||||
- [#3144](https://github.com/influxdata/telegraf/issues/3144): Don't retry points beyond retention policy.
|
||||
- [#3015](https://github.com/influxdata/telegraf/issues/3015): Don't start Telegraf on install in Amazon Linux.
|
||||
- [#3153](https://github.com/influxdata/telegraf/issues/3053): Enable hddtemp input on all platforms.
|
||||
- [#3142](https://github.com/influxdata/telegraf/issues/3142): Escape backslash within string fields.
|
||||
- [#3162](https://github.com/influxdata/telegraf/issues/3162): Fix parsing of SHM remotes in ntpq input
|
||||
- [#3149](https://github.com/influxdata/telegraf/issues/3149): Don't fail parsing zpool stats if pool health is UNAVAIL on FreeBSD.
|
||||
- [#2672](https://github.com/influxdata/telegraf/issues/2672): Fix NSQ input plugin when used with version 1.0.0-compat.
|
||||
- [#2523](https://github.com/influxdata/telegraf/issues/2523): Added CloudWatch metric constraint validation.
|
||||
- [#3179](https://github.com/influxdata/telegraf/issues/3179): Skip non-numerical values in graphite format.
|
||||
|
||||
## v1.3.5 [2017-07-26]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3049](https://github.com/influxdata/telegraf/issues/3049): Fix prometheus output cannot be reloaded.
|
||||
- [#3037](https://github.com/influxdata/telegraf/issues/3037): Fix filestat reporting exists when cannot list directory.
|
||||
- [#2386](https://github.com/influxdata/telegraf/issues/2386): Fix ntpq parse issue when using dns_lookup.
|
||||
- [#2554](https://github.com/influxdata/telegraf/issues/2554): Fix panic when agent.interval = "0s".
|
||||
|
||||
## v1.3.4 [2017-07-12]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3001](https://github.com/influxdata/telegraf/issues/3001): Fix handling of escape characters within fields.
|
||||
- [#2988](https://github.com/influxdata/telegraf/issues/2988): Fix chrony plugin does not track system time offset.
|
||||
- [#3004](https://github.com/influxdata/telegraf/issues/3004): Do not allow metrics with trailing slashes.
|
||||
- [#3011](https://github.com/influxdata/telegraf/issues/3011): Prevent Write from being called concurrently.
|
||||
|
||||
## v1.3.3 [2017-06-28]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#2915](https://github.com/influxdata/telegraf/issues/2915): Allow dos line endings in tail and logparser.
|
||||
- [#2937](https://github.com/influxdata/telegraf/issues/2937): Remove label value sanitization in prometheus output.
|
||||
- [#2948](https://github.com/influxdata/telegraf/issues/2948): Fix bug parsing default timestamps with modified precision.
|
||||
- [#2954](https://github.com/influxdata/telegraf/issues/2954): Fix panic in elasticsearch input if cannot determine master.
|
||||
|
||||
## v1.3.2 [2017-06-14]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#2862](https://github.com/influxdata/telegraf/issues/2862): Fix InfluxDB UDP metric splitting.
|
||||
- [#2888](https://github.com/influxdata/telegraf/issues/2888): Fix mongodb/leofs urls without scheme.
|
||||
- [#2822](https://github.com/influxdata/telegraf/issues/2822): Fix inconsistent label dimensions in prometheus output.
|
||||
|
||||
## v1.3.1 [2017-05-31]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#2749](https://github.com/influxdata/telegraf/pull/2749): Fixed sqlserver input to work with case sensitive server collation.
|
||||
- [#2782](https://github.com/influxdata/telegraf/pull/2782): Reuse transports in input plugins
|
||||
- [#2815](https://github.com/influxdata/telegraf/issues/2815): Inputs processes fails with "no such process".
|
||||
- [#2851](https://github.com/influxdata/telegraf/pull/2851): Fix InfluxDB output database quoting.
|
||||
- [#2856](https://github.com/influxdata/telegraf/issues/2856): Fix net input on older Linux kernels.
|
||||
- [#2848](https://github.com/influxdata/telegraf/pull/2848): Fix panic in mongo input.
|
||||
- [#2869](https://github.com/influxdata/telegraf/pull/2869): Fix length calculation of split metric buffer.
|
||||
|
||||
## v1.3 [2017-05-15]
|
||||
## v1.3 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
|
||||
@@ -234,7 +81,6 @@ be deprecated eventually.
|
||||
- [#2031](https://github.com/influxdata/telegraf/pull/2031): Add Kapacitor input plugin
|
||||
- [#2732](https://github.com/influxdata/telegraf/pull/2732): Use go 1.8.1
|
||||
- [#2712](https://github.com/influxdata/telegraf/issues/2712): Documentation for rabbitmq input plugin
|
||||
- [#2141](https://github.com/influxdata/telegraf/pull/2141): Logparser handles newly-created files.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
|
||||
40
Godeps
40
Godeps
@@ -1,58 +1,44 @@
|
||||
collectd.org 2ce144541b8903101fb8f1483cc0497a68798122
|
||||
github.com/Shopify/sarama 574d3147eee384229bf96a5d12c207fe7b5234f3
|
||||
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
|
||||
github.com/aerospike/aerospike-client-go 95e1ad7791bdbca44707fedbb29be42024900d9c
|
||||
github.com/amir/raidman c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985
|
||||
github.com/apache/thrift 4aaa92ece8503a6da9bc6701604f69acf2b99d07
|
||||
github.com/aws/aws-sdk-go c861d27d0304a79f727e9a8a4e2ac1e74602fdc0
|
||||
github.com/aws/aws-sdk-go 7524cb911daddd6e5c9195def8e59ae892bef8d9
|
||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
github.com/bsm/sarama-cluster ccdc0803695fbce22f1706d04ded46cd518fd832
|
||||
github.com/cenkalti/backoff b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3
|
||||
github.com/couchbase/go-couchbase bfe555a140d53dc1adf390f1a1d4b0fd4ceadb28
|
||||
github.com/couchbase/gomemcached 4a25d2f4e1dea9ea7dd76dfd943407abf9b07d29
|
||||
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||
github.com/docker/docker f5ec1e2936dcbe7b5001c2b817188b095c700c27
|
||||
github.com/docker/go-connections 990a1a1a70b0da4c4cb70e117971a4f0babfbf1a
|
||||
github.com/docker/docker b89aff1afa1f61993ab2ba18fd62d9375a195f5d
|
||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||
github.com/eapache/go-xerial-snappy bb955e01b9346ac19dc29eb16586c90ded99a98c
|
||||
github.com/eapache/queue 44cc805cf13205b55f69e14bcb69867d1ae92f98
|
||||
github.com/eclipse/paho.mqtt.golang d4f545eb108a2d19f9b1a735689dbfb719bc21fb
|
||||
github.com/go-logfmt/logfmt 390ab7935ee28ec6b286364bba9b4dd6410cb3d5
|
||||
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
|
||||
github.com/gobwas/glob bea32b9cd2d6f55753d94a28e959b13f0244797a
|
||||
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
|
||||
github.com/gogo/protobuf 7b6c6391c4ff245962047fc1e2c6e08b1cdfa0e8
|
||||
github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
|
||||
github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7
|
||||
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
|
||||
github.com/google/go-cmp f94e52cad91c65a63acc1e75d4be223ea22e99bc
|
||||
github.com/gorilla/mux 392c28fe23e1c45ddba891b0320b3b5df220beea
|
||||
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
|
||||
github.com/influxdata/tail a395bf99fe07c233f41fba0735fa2b13b58588ea
|
||||
github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
|
||||
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
|
||||
github.com/jackc/pgx b84338d7d62598f75859b2b146d830b22f1b9ec8
|
||||
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
|
||||
github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413
|
||||
github.com/kardianos/service 6d3a0ee7d3425d9d835debc51a0ca1ffa28f4893
|
||||
github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
|
||||
github.com/klauspost/crc32 cb6bfca970f6908083f26f39a79009d608efd5cd
|
||||
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
|
||||
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
|
||||
github.com/miekg/dns 99f84ae56e75126dd77e5de4fae2ea034a468ca1
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/nats-io/go-nats ea9585611a4ab58a205b9b125ebd74c389a6b898
|
||||
github.com/nats-io/nats ea9585611a4ab58a205b9b125ebd74c389a6b898
|
||||
github.com/nats-io/nuid 289cccf02c178dc782430d534e3c1f5b72af807f
|
||||
github.com/nsqio/go-nsq a53d495e81424aaf7a7665a9d32a97715c40e953
|
||||
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
|
||||
github.com/opentracing-contrib/go-observer a52f2342449246d5bcc273e65cbdcfa5f7d6c63c
|
||||
github.com/opentracing/opentracing-go 06f47b42c792fef2796e9681353e1d908c417827
|
||||
github.com/openzipkin/zipkin-go-opentracing 1cafbdfde94fbf2b373534764e0863aa3bd0bf7b
|
||||
github.com/pierrec/lz4 5c9560bfa9ace2bf86080bf40d46b34ae44604df
|
||||
github.com/pierrec/xxHash 5a004441f897722c627870a981d02b29924215fa
|
||||
github.com/pkg/errors 645ef00459ed84a119197bfb8d8205042c6df63d
|
||||
github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
github.com/prometheus/client_golang c317fb74746eac4fc65fe3909195f4cf67c5562a
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common dd2f054febf4a6c00f2343686efb775948a8bff4
|
||||
@@ -60,14 +46,9 @@ github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
|
||||
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
|
||||
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
|
||||
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
|
||||
github.com/shirou/gopsutil 9a4a9167ad3b4355dbf1c2c7a0f5f0d3fb1e9ab9
|
||||
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
||||
github.com/Shopify/sarama c01858abb625b73a3af51d0798e4ad42c8147093
|
||||
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
|
||||
github.com/shirou/gopsutil 70693b6a3da51a8a686d31f1b346077bbc066062
|
||||
github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
|
||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||
github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
|
||||
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
||||
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
|
||||
github.com/vjeantet/grok d73e972b60935c7fec0b4ffbc904ed39ecaf7efe
|
||||
github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee
|
||||
@@ -76,14 +57,9 @@ github.com/yuin/gopher-lua 66c871e454fcf10251c61bf8eff02d0978cae75a
|
||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||
golang.org/x/crypto dc137beb6cce2043eb6b5f223ab8bf51c32459f4
|
||||
golang.org/x/net f2499483f923065a842d38eb4c7f1927e6fc6e6d
|
||||
golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
|
||||
golang.org/x/text 506f9d5c962f284575e88337e7d9296d27e729d3
|
||||
gopkg.in/asn1-ber.v1 4e86f4367175e39f69d9358a5f17b4dda270378d
|
||||
gopkg.in/dancannon/gorethink.v1 edc7a6a68e2d8015f5ffe1b2560eed989f8a45be
|
||||
gopkg.in/fatih/pool.v2 6e328e67893eb46323ad06f0e92cb9536babbabc
|
||||
gopkg.in/fsnotify.v1 a8a77c9133d2d6fd8334f3260d06f60e8d80a5fb
|
||||
gopkg.in/gorethink/gorethink.v3 7ab832f7b65573104a555d84a27992ae9ea1f659
|
||||
gopkg.in/ldap.v2 8168ee085ee43257585e50c6441aadf54ecb2c9f
|
||||
gopkg.in/mgo.v2 3f83fa5005286a7fe593b055f0d7771a7dce4655
|
||||
gopkg.in/olivere/elastic.v5 3113f9b9ad37509fe5f8a0e5e91c96fdc4435e26
|
||||
gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
|
||||
gopkg.in/olivere/elastic.v5 ee3ebceab960cf68ab9a89ee6d78c031ef5b4a4e
|
||||
gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6
|
||||
|
||||
11
Godeps_windows
Normal file
11
Godeps_windows
Normal file
@@ -0,0 +1,11 @@
|
||||
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
|
||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
|
||||
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
||||
golang.org/x/sys a646d33e2ee3172a661fc09bca23bb4889a41bc8
|
||||
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
|
||||
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
|
||||
github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
||||
gopkg.in/fsnotify.v1 a8a77c9133d2d6fd8334f3260d06f60e8d80a5fb
|
||||
gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
|
||||
135
Makefile
135
Makefile
@@ -1,72 +1,56 @@
|
||||
PREFIX := /usr/local
|
||||
VERSION := $(shell git describe --exact-match --tags 2>/dev/null)
|
||||
BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
|
||||
COMMIT := $(shell git rev-parse --short HEAD)
|
||||
VERSION := $(shell sh -c 'git describe --always --tags')
|
||||
BRANCH := $(shell sh -c 'git rev-parse --abbrev-ref HEAD')
|
||||
COMMIT := $(shell sh -c 'git rev-parse --short HEAD')
|
||||
ifdef GOBIN
|
||||
PATH := $(GOBIN):$(PATH)
|
||||
else
|
||||
PATH := $(subst :,/bin:,$(GOPATH))/bin:$(PATH)
|
||||
endif
|
||||
|
||||
TELEGRAF := telegraf$(shell go tool dist env | grep -q 'GOOS=.windows.' && echo .exe)
|
||||
# Standard Telegraf build
|
||||
default: prepare build
|
||||
|
||||
LDFLAGS := $(LDFLAGS) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)
|
||||
ifdef VERSION
|
||||
LDFLAGS += -X main.version=$(VERSION)
|
||||
endif
|
||||
# Windows build
|
||||
windows: prepare-windows build-windows
|
||||
|
||||
# Only run the build (no dependency grabbing)
|
||||
build:
|
||||
go install -ldflags \
|
||||
"-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" ./...
|
||||
|
||||
all:
|
||||
$(MAKE) deps
|
||||
$(MAKE) telegraf
|
||||
build-windows:
|
||||
GOOS=windows GOARCH=amd64 go build -o telegraf.exe -ldflags \
|
||||
"-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
deps:
|
||||
go get github.com/sparrc/gdm
|
||||
gdm restore
|
||||
|
||||
telegraf:
|
||||
go build -i -o $(TELEGRAF) -ldflags "$(LDFLAGS)" ./cmd/telegraf/telegraf.go
|
||||
|
||||
go-install:
|
||||
go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf
|
||||
|
||||
install: telegraf
|
||||
mkdir -p $(DESTDIR)$(PREFIX)/bin/
|
||||
cp $(TELEGRAF) $(DESTDIR)$(PREFIX)/bin/
|
||||
|
||||
test:
|
||||
go test -short ./...
|
||||
|
||||
test-windows:
|
||||
go test ./plugins/inputs/ping/...
|
||||
go test ./plugins/inputs/win_perf_counters/...
|
||||
go test ./plugins/inputs/win_services/...
|
||||
|
||||
lint:
|
||||
go vet ./...
|
||||
|
||||
test-all: lint
|
||||
go test ./...
|
||||
build-for-docker:
|
||||
CGO_ENABLED=0 GOOS=linux go build -installsuffix cgo -o telegraf -ldflags \
|
||||
"-s -X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
# run package script
|
||||
package:
|
||||
./scripts/build.py --package --version="$(VERSION)" --platform=linux --arch=all --upload
|
||||
|
||||
clean:
|
||||
-rm -f telegraf
|
||||
-rm -f telegraf.exe
|
||||
# Get dependencies and use gdm to checkout changesets
|
||||
prepare:
|
||||
go get github.com/sparrc/gdm
|
||||
gdm restore
|
||||
|
||||
# Run all docker containers necessary for integration tests
|
||||
# Use the windows godeps file to prepare dependencies
|
||||
prepare-windows:
|
||||
go get github.com/sparrc/gdm
|
||||
gdm restore
|
||||
gdm restore -f Godeps_windows
|
||||
|
||||
# Run all docker containers necessary for unit tests
|
||||
docker-run:
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
|
||||
docker run --name zookeeper -p "2181:2181" -d wurstmeister/zookeeper
|
||||
docker run --name kafka \
|
||||
--link zookeeper:zookeeper \
|
||||
-e KAFKA_ADVERTISED_HOST_NAME=localhost \
|
||||
-e KAFKA_ADVERTISED_PORT=9092 \
|
||||
-e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
|
||||
-e KAFKA_CREATE_TOPICS="test:1:1" \
|
||||
-p "9092:9092" \
|
||||
-d wurstmeister/kafka
|
||||
-e ADVERTISED_HOST=localhost \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5
|
||||
docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql
|
||||
docker run --name memcached -p "11211:11211" -d memcached
|
||||
@@ -77,41 +61,38 @@ docker-run:
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d stealthly/docker-riemann
|
||||
docker run --name nats -p "4222:4222" -d nats
|
||||
docker run --name openldap \
|
||||
-e SLAPD_CONFIG_ROOTDN="cn=manager,cn=config" \
|
||||
-e SLAPD_CONFIG_ROOTPW="secret" \
|
||||
-p "389:389" -p "636:636" \
|
||||
-d cobaugh/openldap-alpine
|
||||
|
||||
# Run docker containers necessary for integration tests; skipping services provided
|
||||
# by CircleCI
|
||||
# Run docker containers necessary for CircleCI unit tests
|
||||
docker-run-circle:
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
|
||||
docker run --name zookeeper -p "2181:2181" -d wurstmeister/zookeeper
|
||||
docker run --name kafka \
|
||||
--link zookeeper:zookeeper \
|
||||
-e KAFKA_ADVERTISED_HOST_NAME=localhost \
|
||||
-e KAFKA_ADVERTISED_PORT=9092 \
|
||||
-e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
|
||||
-e KAFKA_CREATE_TOPICS="test:1:1" \
|
||||
-p "9092:9092" \
|
||||
-d wurstmeister/kafka
|
||||
-e ADVERTISED_HOST=localhost \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d stealthly/docker-riemann
|
||||
docker run --name nats -p "4222:4222" -d nats
|
||||
docker run --name openldap \
|
||||
-e SLAPD_CONFIG_ROOTDN="cn=manager,cn=config" \
|
||||
-e SLAPD_CONFIG_ROOTPW="secret" \
|
||||
-p "389:389" -p "636:636" \
|
||||
-d cobaugh/openldap-alpine
|
||||
|
||||
# Kill all docker containers, ignore errors
|
||||
docker-kill:
|
||||
-docker kill aerospike elasticsearch kafka memcached mqtt mysql nats nsq \
|
||||
openldap postgres rabbitmq redis riemann zookeeper
|
||||
-docker rm aerospike elasticsearch kafka memcached mqtt mysql nats nsq \
|
||||
openldap postgres rabbitmq redis riemann zookeeper
|
||||
-docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats elasticsearch
|
||||
-docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats elasticsearch
|
||||
|
||||
.PHONY: deps telegraf telegraf.exe install test test-windows lint test-all \
|
||||
package clean docker-run docker-run-circle docker-kill
|
||||
# Run full unit tests using docker containers (includes setup and teardown)
|
||||
test: vet docker-kill docker-run
|
||||
# Sleeping for kafka leadership election, TSDB setup, etc.
|
||||
sleep 60
|
||||
# SUCCESS, running tests
|
||||
go test -race ./...
|
||||
|
||||
# Run "short" unit tests
|
||||
test-short: vet
|
||||
go test -short ./...
|
||||
|
||||
vet:
|
||||
go vet ./...
|
||||
|
||||
.PHONY: test test-short vet build default
|
||||
|
||||
74
README.md
74
README.md
@@ -20,20 +20,20 @@ For more information on Processor and Aggregator plugins please [read this](./do
|
||||
New plugins are designed to be easy to contribute,
|
||||
we'll eagerly accept pull
|
||||
requests and will manage the set of plugins that Telegraf supports.
|
||||
|
||||
## Contributing
|
||||
|
||||
There are many ways to contribute:
|
||||
- Fix and [report bugs](https://github.com/influxdata/telegraf/issues/new)
|
||||
- [Improve documentation](https://github.com/influxdata/telegraf/issues?q=is%3Aopen+label%3Adocumentation)
|
||||
- [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls)
|
||||
- Answer questions on github and on the [Community Site](https://community.influxdata.com/)
|
||||
- [Contribute plugins](CONTRIBUTING.md)
|
||||
See the [contributing guide](CONTRIBUTING.md) for instructions on writing
|
||||
new plugins.
|
||||
|
||||
## Installation:
|
||||
|
||||
You can download the binaries directly from the [downloads](https://www.influxdata.com/downloads) page
|
||||
or from the [releases](https://github.com/influxdata/telegraf/releases) section.
|
||||
You can either download the binaries directly from the
|
||||
[downloads](https://www.influxdata.com/downloads) page.
|
||||
|
||||
A few alternate installs are available here as well:
|
||||
|
||||
### FreeBSD tarball:
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-VERSION_freebsd_amd64.tar.gz
|
||||
|
||||
### Ansible Role:
|
||||
|
||||
@@ -41,14 +41,13 @@ Ansible role: https://github.com/rossmcdonald/telegraf
|
||||
|
||||
### From Source:
|
||||
|
||||
Telegraf requires golang version 1.8+, the Makefile requires GNU make.
|
||||
|
||||
Dependencies are managed with [gdm](https://github.com/sparrc/gdm),
|
||||
which is installed by the Makefile if you don't have it already.
|
||||
Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
|
||||
which gets installed via the Makefile
|
||||
if you don't have it already. You also must build with golang version 1.8+.
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install)
|
||||
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
|
||||
3. Run `go get -d github.com/influxdata/telegraf`
|
||||
3. Run `go get github.com/influxdata/telegraf`
|
||||
4. Run `cd $GOPATH/src/github.com/influxdata/telegraf`
|
||||
5. Run `make`
|
||||
|
||||
@@ -57,37 +56,37 @@ which is installed by the Makefile if you don't have it already.
|
||||
See usage with:
|
||||
|
||||
```
|
||||
./telegraf --help
|
||||
telegraf --help
|
||||
```
|
||||
|
||||
#### Generate a telegraf config file:
|
||||
|
||||
```
|
||||
./telegraf config > telegraf.conf
|
||||
telegraf config > telegraf.conf
|
||||
```
|
||||
|
||||
#### Generate config with only cpu input & influxdb output plugins defined:
|
||||
#### Generate config with only cpu input & influxdb output plugins defined
|
||||
|
||||
```
|
||||
./telegraf --input-filter cpu --output-filter influxdb config
|
||||
telegraf --input-filter cpu --output-filter influxdb config
|
||||
```
|
||||
|
||||
#### Run a single telegraf collection, outputing metrics to stdout:
|
||||
#### Run a single telegraf collection, outputing metrics to stdout
|
||||
|
||||
```
|
||||
./telegraf --config telegraf.conf --test
|
||||
telegraf --config telegraf.conf -test
|
||||
```
|
||||
|
||||
#### Run telegraf with all plugins defined in config file:
|
||||
#### Run telegraf with all plugins defined in config file
|
||||
|
||||
```
|
||||
./telegraf --config telegraf.conf
|
||||
telegraf --config telegraf.conf
|
||||
```
|
||||
|
||||
#### Run telegraf, enabling the cpu & memory input, and influxdb output plugins:
|
||||
#### Run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
|
||||
```
|
||||
./telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
||||
telegraf --config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
||||
```
|
||||
|
||||
|
||||
@@ -112,16 +111,12 @@ configuration options.
|
||||
* [couchbase](./plugins/inputs/couchbase)
|
||||
* [couchdb](./plugins/inputs/couchdb)
|
||||
* [disque](./plugins/inputs/disque)
|
||||
* [dmcache](./plugins/inputs/dmcache)
|
||||
* [dns query time](./plugins/inputs/dns_query)
|
||||
* [docker](./plugins/inputs/docker)
|
||||
* [dovecot](./plugins/inputs/dovecot)
|
||||
* [elasticsearch](./plugins/inputs/elasticsearch)
|
||||
* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
|
||||
* [fail2ban](./plugins/inputs/fail2ban)
|
||||
* [filestat](./plugins/inputs/filestat)
|
||||
* [fluentd](./plugins/inputs/fluentd)
|
||||
* [graylog](./plugins/inputs/graylog)
|
||||
* [haproxy](./plugins/inputs/haproxy)
|
||||
* [hddtemp](./plugins/inputs/hddtemp)
|
||||
* [http_response](./plugins/inputs/http_response)
|
||||
@@ -132,14 +127,12 @@ configuration options.
|
||||
* [ipmi_sensor](./plugins/inputs/ipmi_sensor)
|
||||
* [iptables](./plugins/inputs/iptables)
|
||||
* [jolokia](./plugins/inputs/jolokia)
|
||||
* [kapacitor](./plugins/inputs/kapacitor)
|
||||
* [kubernetes](./plugins/inputs/kubernetes)
|
||||
* [leofs](./plugins/inputs/leofs)
|
||||
* [lustre2](./plugins/inputs/lustre2)
|
||||
* [mailchimp](./plugins/inputs/mailchimp)
|
||||
* [memcached](./plugins/inputs/memcached)
|
||||
* [mesos](./plugins/inputs/mesos)
|
||||
* [minecraft](./plugins/inputs/minecraft)
|
||||
* [mongodb](./plugins/inputs/mongodb)
|
||||
* [mysql](./plugins/inputs/mysql)
|
||||
* [net_response](./plugins/inputs/net_response)
|
||||
@@ -147,7 +140,6 @@ configuration options.
|
||||
* [nsq](./plugins/inputs/nsq)
|
||||
* [nstat](./plugins/inputs/nstat)
|
||||
* [ntpq](./plugins/inputs/ntpq)
|
||||
* [openldap](./plugins/inputs/openldap)
|
||||
* [phpfpm](./plugins/inputs/phpfpm)
|
||||
* [phusion passenger](./plugins/inputs/passenger)
|
||||
* [ping](./plugins/inputs/ping)
|
||||
@@ -155,25 +147,22 @@ configuration options.
|
||||
* [postgresql_extensible](./plugins/inputs/postgresql_extensible)
|
||||
* [powerdns](./plugins/inputs/powerdns)
|
||||
* [procstat](./plugins/inputs/procstat)
|
||||
* [prometheus](./plugins/inputs/prometheus) (can be used for [Caddy server](./plugins/inputs/prometheus/README.md#usage-for-caddy-http-server))
|
||||
* [prometheus](./plugins/inputs/prometheus)
|
||||
* [puppetagent](./plugins/inputs/puppetagent)
|
||||
* [rabbitmq](./plugins/inputs/rabbitmq)
|
||||
* [raindrops](./plugins/inputs/raindrops)
|
||||
* [redis](./plugins/inputs/redis)
|
||||
* [rethinkdb](./plugins/inputs/rethinkdb)
|
||||
* [riak](./plugins/inputs/riak)
|
||||
* [salesforce](./plugins/inputs/salesforce)
|
||||
* [sensors](./plugins/inputs/sensors)
|
||||
* [snmp](./plugins/inputs/snmp)
|
||||
* [snmp_legacy](./plugins/inputs/snmp_legacy)
|
||||
* [sql server](./plugins/inputs/sqlserver) (microsoft)
|
||||
* [tomcat](./plugins/inputs/tomcat)
|
||||
* [twemproxy](./plugins/inputs/twemproxy)
|
||||
* [varnish](./plugins/inputs/varnish)
|
||||
* [zfs](./plugins/inputs/zfs)
|
||||
* [zookeeper](./plugins/inputs/zookeeper)
|
||||
* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters)
|
||||
* [win_services](./plugins/inputs/win_services)
|
||||
* [win_perf_counters ](./plugins/inputs/win_perf_counters) (windows performance counters)
|
||||
* [sysstat](./plugins/inputs/sysstat)
|
||||
* [system](./plugins/inputs/system)
|
||||
* cpu
|
||||
@@ -206,8 +195,6 @@ Telegraf can also collect metrics via the following service plugins:
|
||||
* [github](./plugins/inputs/webhooks/github)
|
||||
* [mandrill](./plugins/inputs/webhooks/mandrill)
|
||||
* [rollbar](./plugins/inputs/webhooks/rollbar)
|
||||
* [papertrail](./plugins/inputs/webhooks/papertrail)
|
||||
* [zipkin](./plugins/inputs/zipkin)
|
||||
|
||||
Telegraf is able to parse the following input data formats into metrics, these
|
||||
formats may be used with input plugins supporting the `data_format` option:
|
||||
@@ -226,7 +213,6 @@ formats may be used with input plugins supporting the `data_format` option:
|
||||
## Aggregator Plugins
|
||||
|
||||
* [minmax](./plugins/aggregators/minmax)
|
||||
* [histogram](./plugins/aggregators/histogram)
|
||||
|
||||
## Output Plugins
|
||||
|
||||
@@ -254,3 +240,9 @@ formats may be used with input plugins supporting the `data_format` option:
|
||||
* [socket_writer](./plugins/outputs/socket_writer)
|
||||
* [tcp](./plugins/outputs/socket_writer)
|
||||
* [udp](./plugins/outputs/socket_writer)
|
||||
|
||||
## Contributing
|
||||
|
||||
Please see the
|
||||
[contributing guide](CONTRIBUTING.md)
|
||||
for details on contributing a plugin to Telegraf.
|
||||
|
||||
@@ -247,7 +247,7 @@ func (a *Agent) flush() {
|
||||
}
|
||||
|
||||
// flusher monitors the metrics input channel and flushes on the minimum interval
|
||||
func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, aggC chan telegraf.Metric) error {
|
||||
func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) error {
|
||||
// Inelegant, but this sleep is to allow the Gather threads to run, so that
|
||||
// the flusher will flush after metrics are collected.
|
||||
time.Sleep(time.Millisecond * 300)
|
||||
@@ -291,29 +291,6 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, ag
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
if len(aggC) > 0 {
|
||||
// keep going until aggC is flushed
|
||||
continue
|
||||
}
|
||||
return
|
||||
case metric := <-aggC:
|
||||
metrics := []telegraf.Metric{metric}
|
||||
for _, processor := range a.Config.Processors {
|
||||
metrics = processor.Apply(metrics...)
|
||||
}
|
||||
for _, m := range metrics {
|
||||
outMetricC <- m
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration)
|
||||
semaphore := make(chan struct{}, 1)
|
||||
for {
|
||||
@@ -362,9 +339,6 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
|
||||
// channel shared between all input threads for accumulating metrics
|
||||
metricC := make(chan telegraf.Metric, 100)
|
||||
aggC := make(chan telegraf.Metric, 100)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
// Start all ServicePlugins
|
||||
for _, input := range a.Config.Inputs {
|
||||
@@ -393,7 +367,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := a.flusher(shutdown, metricC, aggC); err != nil {
|
||||
if err := a.flusher(shutdown, metricC); err != nil {
|
||||
log.Printf("E! Flusher routine failed, exiting: %s\n", err.Error())
|
||||
close(shutdown)
|
||||
}
|
||||
@@ -403,10 +377,10 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
for _, aggregator := range a.Config.Aggregators {
|
||||
go func(agg *models.RunningAggregator) {
|
||||
defer wg.Done()
|
||||
acc := NewAccumulator(agg, aggC)
|
||||
acc := NewAccumulator(agg, metricC)
|
||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||
a.Config.Agent.Interval.Duration)
|
||||
agg.Run(acc, now, shutdown)
|
||||
agg.Run(acc, shutdown)
|
||||
}(aggregator)
|
||||
}
|
||||
|
||||
|
||||
32
appveyor.yml
32
appveyor.yml
@@ -1,32 +0,0 @@
|
||||
version: "{build}"
|
||||
|
||||
cache:
|
||||
- C:\Cache
|
||||
|
||||
clone_folder: C:\gopath\src\github.com\influxdata\telegraf
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
|
||||
platform: x64
|
||||
|
||||
install:
|
||||
- IF NOT EXIST "C:\Cache" mkdir C:\Cache
|
||||
- IF NOT EXIST "C:\Cache\go1.8.1.msi" curl -o "C:\Cache\go1.8.1.msi" https://storage.googleapis.com/golang/go1.8.1.windows-amd64.msi
|
||||
- IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip
|
||||
- IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip
|
||||
- IF EXIST "C:\Go" rmdir /S /Q C:\Go
|
||||
- msiexec.exe /i "C:\Cache\go1.8.1.msi" /quiet
|
||||
- 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y
|
||||
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
|
||||
- go version
|
||||
- go env
|
||||
|
||||
build_script:
|
||||
- cmd: C:\GnuWin32\bin\make
|
||||
|
||||
test_script:
|
||||
- cmd: C:\GnuWin32\bin\make test-windows
|
||||
|
||||
artifacts:
|
||||
- path: telegraf.exe
|
||||
@@ -51,18 +51,16 @@ var fAggregatorFilters = flag.String("aggregator-filter", "",
|
||||
var fProcessorFilters = flag.String("processor-filter", "",
|
||||
"filter the processors to enable, separator is :")
|
||||
var fUsage = flag.String("usage", "",
|
||||
"print usage for a plugin, ie, 'telegraf --usage mysql'")
|
||||
"print usage for a plugin, ie, 'telegraf -usage mysql'")
|
||||
var fService = flag.String("service", "",
|
||||
"operate on the service")
|
||||
|
||||
// Telegraf version, populated linker.
|
||||
// ie, -ldflags "-X main.version=`git describe --always --tags`"
|
||||
|
||||
var (
|
||||
nextVersion = "1.4.0"
|
||||
version string
|
||||
commit string
|
||||
branch string
|
||||
version string
|
||||
commit string
|
||||
branch string
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -83,8 +81,8 @@ Usage:
|
||||
|
||||
The commands & flags are:
|
||||
|
||||
config print out full sample configuration to stdout
|
||||
version print the version to stdout
|
||||
config print out full sample configuration to stdout
|
||||
version print the version to stdout
|
||||
|
||||
--config <file> configuration file to load
|
||||
--test gather metrics once, print them to stdout, and exit
|
||||
@@ -105,7 +103,7 @@ Examples:
|
||||
telegraf --input-filter cpu --output-filter influxdb config
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf --config telegraf.conf --test
|
||||
telegraf --config telegraf.conf -test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf --config telegraf.conf
|
||||
@@ -153,16 +151,6 @@ func reloadLoop(
|
||||
log.Fatalf("E! Error: no inputs found, did you provide a valid config file?")
|
||||
}
|
||||
|
||||
if int64(c.Agent.Interval.Duration) <= 0 {
|
||||
log.Fatalf("E! Agent interval must be positive, found %s",
|
||||
c.Agent.Interval.Duration)
|
||||
}
|
||||
|
||||
if int64(c.Agent.FlushInterval.Duration) <= 0 {
|
||||
log.Fatalf("E! Agent flush_interval must be positive; found %s",
|
||||
c.Agent.Interval.Duration)
|
||||
}
|
||||
|
||||
ag, err := agent.NewAgent(c)
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
@@ -208,7 +196,7 @@ func reloadLoop(
|
||||
}
|
||||
}()
|
||||
|
||||
log.Printf("I! Starting Telegraf %s\n", displayVersion())
|
||||
log.Printf("I! Starting Telegraf (version %s)\n", version)
|
||||
log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||
log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " "))
|
||||
log.Printf("I! Tags enabled: %s", c.ListTags())
|
||||
@@ -266,13 +254,6 @@ func (p *program) Stop(s service.Service) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func displayVersion() string {
|
||||
if version == "" {
|
||||
return fmt.Sprintf("v%s~pre%s", nextVersion, commit)
|
||||
}
|
||||
return "v" + version
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = func() { usageExit(0) }
|
||||
flag.Parse()
|
||||
@@ -314,7 +295,7 @@ func main() {
|
||||
if len(args) > 0 {
|
||||
switch args[0] {
|
||||
case "version":
|
||||
fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit)
|
||||
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
|
||||
return
|
||||
case "config":
|
||||
config.PrintSampleConfig(
|
||||
@@ -342,7 +323,7 @@ func main() {
|
||||
}
|
||||
return
|
||||
case *fVersion:
|
||||
fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit)
|
||||
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
|
||||
return
|
||||
case *fSampleConfig:
|
||||
config.PrintSampleConfig(
|
||||
|
||||
@@ -66,13 +66,10 @@ interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||
This is primarily to avoid
|
||||
large write spikes for users running a large number of telegraf instances.
|
||||
ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s.
|
||||
* **precision**:
|
||||
By default or when set to "0s", precision will be set to the same
|
||||
timestamp order as the collection interval, with the maximum being 1s.
|
||||
Precision will NOT be used for service inputs. It is up to each individual
|
||||
service input to set the timestamp at the appropriate precision.
|
||||
Valid time units are "ns", "us" (or "µs"), "ms", "s".
|
||||
|
||||
* **precision**: By default, precision will be set to the same timestamp order
|
||||
as the collection interval, with the maximum being 1s. Precision will NOT
|
||||
be used for service inputs, such as logparser and statsd. Valid values are
|
||||
"ns", "us" (or "µs"), "ms", "s".
|
||||
* **logfile**: Specify the log file name. The empty string means to log to stderr.
|
||||
* **debug**: Run telegraf in debug mode.
|
||||
* **quiet**: Run telegraf in quiet mode (error messages only).
|
||||
@@ -137,9 +134,8 @@ is tested on points after they have passed the `namepass` test.
|
||||
An array of glob pattern strings. Only fields whose field key matches a
|
||||
pattern in this list are emitted. Not available for outputs.
|
||||
* **fielddrop**:
|
||||
The inverse of `fieldpass`. Fields with a field key matching one of the
|
||||
patterns will be discarded from the point. This is tested on points after
|
||||
they have passed the `fieldpass` test. Not available for outputs.
|
||||
The inverse of `fieldpass`. Fields with a field key matching one of the
|
||||
patterns will be discarded from the point. Not available for outputs.
|
||||
* **tagpass**:
|
||||
A table mapping tag keys to arrays of glob pattern strings. Only points
|
||||
that contain a tag key in the table and a tag value matching one of its
|
||||
@@ -181,6 +177,7 @@ fields which begin with `time_`.
|
||||
[[outputs.influxdb]]
|
||||
url = "http://192.168.59.103:8086" # required.
|
||||
database = "telegraf" # required.
|
||||
precision = "s"
|
||||
|
||||
# INPUTS
|
||||
[[inputs.cpu]]
|
||||
@@ -319,18 +316,21 @@ to avoid measurement collisions:
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf"
|
||||
precision = "s"
|
||||
# Drop all measurements that start with "aerospike"
|
||||
namedrop = ["aerospike*"]
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf-aerospike-data"
|
||||
precision = "s"
|
||||
# Only accept aerospike data:
|
||||
namepass = ["aerospike*"]
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf-cpu0-data"
|
||||
precision = "s"
|
||||
# Only store measurements where the tag "cpu" matches the value "cpu0"
|
||||
[outputs.influxdb.tagpass]
|
||||
cpu = ["cpu0"]
|
||||
|
||||
@@ -96,9 +96,6 @@ tars.cpu-total.us-east-1.cpu.usage_user 0.89 1455320690
|
||||
tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
|
||||
```
|
||||
|
||||
Fields with string values will be skipped. Boolean fields will be converted
|
||||
to 1 (true) or 0 (false).
|
||||
|
||||
### Graphite Configuration:
|
||||
|
||||
```toml
|
||||
|
||||
@@ -1,102 +1,33 @@
|
||||
# Licenses of dependencies
|
||||
|
||||
When distributed in a binary form, Telegraf may contain portions of the
|
||||
following works:
|
||||
|
||||
- collectd.org [MIT](https://github.com/collectd/go-collectd/blob/master/LICENSE)
|
||||
- github.com/aerospike/aerospike-client-go [APACHE](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE)
|
||||
- github.com/amir/raidman [PUBLIC DOMAIN](https://github.com/amir/raidman/blob/master/UNLICENSE)
|
||||
- github.com/armon/go-metrics [MIT](https://github.com/armon/go-metrics/blob/master/LICENSE)
|
||||
- github.com/aws/aws-sdk-go [APACHE](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt)
|
||||
- github.com/beorn7/perks [MIT](https://github.com/beorn7/perks/blob/master/LICENSE)
|
||||
- github.com/boltdb/bolt [MIT](https://github.com/boltdb/bolt/blob/master/LICENSE)
|
||||
- github.com/bsm/sarama-cluster [MIT](https://github.com/bsm/sarama-cluster/blob/master/LICENSE)
|
||||
- github.com/cenkalti/backoff [MIT](https://github.com/cenkalti/backoff/blob/master/LICENSE)
|
||||
- github.com/chuckpreslar/rcon [MIT](https://github.com/chuckpreslar/rcon#license)
|
||||
- github.com/couchbase/go-couchbase [MIT](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
|
||||
- github.com/couchbase/gomemcached [MIT](https://github.com/couchbase/gomemcached/blob/master/LICENSE)
|
||||
- github.com/couchbase/goutils [MIT](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
|
||||
- github.com/dancannon/gorethink [APACHE](https://github.com/dancannon/gorethink/blob/master/LICENSE)
|
||||
- github.com/davecgh/go-spew [ISC](https://github.com/davecgh/go-spew/blob/master/LICENSE)
|
||||
- github.com/docker/docker [APACHE](https://github.com/docker/docker/blob/master/LICENSE)
|
||||
- github.com/docker/cli [APACHE](https://github.com/docker/cli/blob/master/LICENSE)
|
||||
- github.com/eapache/go-resiliency [MIT](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
|
||||
- github.com/eapache/go-xerial-snappy [MIT](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE)
|
||||
- github.com/eapache/queue [MIT](https://github.com/eapache/queue/blob/master/LICENSE)
|
||||
- github.com/eclipse/paho.mqtt.golang [ECLIPSE](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE)
|
||||
- github.com/fsouza/go-dockerclient [BSD](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE)
|
||||
- github.com/gobwas/glob [MIT](https://github.com/gobwas/glob/blob/master/LICENSE)
|
||||
- github.com/google/go-cmp [BSD](https://github.com/google/go-cmp/blob/master/LICENSE)
|
||||
- github.com/gogo/protobuf [BSD](https://github.com/gogo/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/protobuf [BSD](https://github.com/golang/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/snappy [BSD](https://github.com/golang/snappy/blob/master/LICENSE)
|
||||
- github.com/go-logfmt/logfmt [MIT](https://github.com/go-logfmt/logfmt/blob/master/LICENSE)
|
||||
- github.com/gorilla/mux [BSD](https://github.com/gorilla/mux/blob/master/LICENSE)
|
||||
- github.com/go-ini/ini [APACHE](https://github.com/go-ini/ini/blob/master/LICENSE)
|
||||
- github.com/go-ole/go-ole [MPL](http://mattn.mit-license.org/2013)
|
||||
- github.com/go-sql-driver/mysql [MPL](https://github.com/go-sql-driver/mysql/blob/master/LICENSE)
|
||||
- github.com/hailocab/go-hostpool [MIT](https://github.com/hailocab/go-hostpool/blob/master/LICENSE)
|
||||
- github.com/hashicorp/consul [MPL](https://github.com/hashicorp/consul/blob/master/LICENSE)
|
||||
- github.com/hashicorp/go-msgpack [BSD](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft-boltdb [MPL](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft [MPL](https://github.com/hashicorp/raft/blob/master/LICENSE)
|
||||
- github.com/influxdata/tail [MIT](https://github.com/influxdata/tail/blob/master/LICENSE.txt)
|
||||
- github.com/influxdata/toml [MIT](https://github.com/influxdata/toml/blob/master/LICENSE)
|
||||
- github.com/influxdata/wlog [MIT](https://github.com/influxdata/wlog/blob/master/LICENSE)
|
||||
- github.com/jackc/pgx [MIT](https://github.com/jackc/pgx/blob/master/LICENSE)
|
||||
- github.com/jmespath/go-jmespath [APACHE](https://github.com/jmespath/go-jmespath/blob/master/LICENSE)
|
||||
- github.com/kardianos/osext [BSD](https://github.com/kardianos/osext/blob/master/LICENSE)
|
||||
- github.com/kardianos/service [ZLIB](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib)
|
||||
- github.com/kballard/go-shellquote [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
|
||||
- github.com/lib/pq [MIT](https://github.com/lib/pq/blob/master/LICENSE.md)
|
||||
- github.com/matttproud/golang_protobuf_extensions [APACHE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
|
||||
- github.com/Microsoft/go-winio [MIT](https://github.com/Microsoft/go-winio/blob/master/LICENSE)
|
||||
- github.com/miekg/dns [BSD](https://github.com/miekg/dns/blob/master/LICENSE)
|
||||
- github.com/naoina/go-stringutil [MIT](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
|
||||
- github.com/naoina/toml [MIT](https://github.com/naoina/toml/blob/master/LICENSE)
|
||||
- github.com/nats-io/go-nats [MIT](https://github.com/nats-io/go-nats/blob/master/LICENSE)
|
||||
- github.com/nats-io/nats [MIT](https://github.com/nats-io/nats/blob/master/LICENSE)
|
||||
- github.com/nats-io/nuid [MIT](https://github.com/nats-io/nuid/blob/master/LICENSE)
|
||||
- github.com/nsqio/go-nsq [MIT](https://github.com/nsqio/go-nsq/blob/master/LICENSE)
|
||||
- github.com/opentracing-contrib/go-observer [APACHE](https://github.com/opentracing-contrib/go-observer/blob/master/LICENSE)
|
||||
- github.com/opentracing/opentracing-go [MIT](https://github.com/opentracing/opentracing-go/blob/master/LICENSE)
|
||||
- github.com/openzipkin/zipkin-go-opentracing [MIT](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE)
|
||||
- github.com/pierrec/lz4 [BSD](https://github.com/pierrec/lz4/blob/master/LICENSE)
|
||||
- github.com/pierrec/xxHash [BSD](https://github.com/pierrec/xxHash/blob/master/LICENSE)
|
||||
- github.com/pkg/errors [BSD](https://github.com/pkg/errors/blob/master/LICENSE)
|
||||
- github.com/pmezard/go-difflib [BSD](https://github.com/pmezard/go-difflib/blob/master/LICENSE)
|
||||
- github.com/prometheus/client_golang [APACHE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
|
||||
- github.com/prometheus/client_model [APACHE](https://github.com/prometheus/client_model/blob/master/LICENSE)
|
||||
- github.com/prometheus/common [APACHE](https://github.com/prometheus/common/blob/master/LICENSE)
|
||||
- github.com/prometheus/procfs [APACHE](https://github.com/prometheus/procfs/blob/master/LICENSE)
|
||||
- github.com/rcrowley/go-metrics [BSD](https://github.com/rcrowley/go-metrics/blob/master/LICENSE)
|
||||
- github.com/samuel/go-zookeeper [BSD](https://github.com/samuel/go-zookeeper/blob/master/LICENSE)
|
||||
- github.com/satori/go.uuid [MIT](https://github.com/satori/go.uuid/blob/master/LICENSE)
|
||||
- github.com/shirou/gopsutil [BSD](https://github.com/shirou/gopsutil/blob/master/LICENSE)
|
||||
- github.com/shirou/w32 [BSD](https://github.com/shirou/w32/blob/master/LICENSE)
|
||||
- github.com/Shopify/sarama [MIT](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE)
|
||||
- github.com/Sirupsen/logrus [MIT](https://github.com/Sirupsen/logrus/blob/master/LICENSE)
|
||||
- github.com/StackExchange/wmi [MIT](https://github.com/StackExchange/wmi/blob/master/LICENSE)
|
||||
- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md)
|
||||
- github.com/soniah/gosnmp [BSD](https://github.com/soniah/gosnmp/blob/master/LICENSE)
|
||||
- github.com/streadway/amqp [BSD](https://github.com/streadway/amqp/blob/master/LICENSE)
|
||||
- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md)
|
||||
- github.com/stretchr/testify [MIT](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
|
||||
- github.com/vjeantet/grok [APACHE](https://github.com/vjeantet/grok/blob/master/LICENSE)
|
||||
- github.com/wvanbergen/kafka [MIT](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
|
||||
- github.com/wvanbergen/kazoo-go [MIT](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
|
||||
- github.com/yuin/gopher-lua [MIT](https://github.com/yuin/gopher-lua/blob/master/LICENSE)
|
||||
- github.com/zensqlmonitor/go-mssqldb [BSD](https://github.com/zensqlmonitor/go-mssqldb/blob/master/LICENSE.txt)
|
||||
- golang.org/x/crypto [BSD](https://github.com/golang/crypto/blob/master/LICENSE)
|
||||
- golang.org/x/net [BSD](https://go.googlesource.com/net/+/master/LICENSE)
|
||||
- golang.org/x/text [BSD](https://go.googlesource.com/text/+/master/LICENSE)
|
||||
- golang.org/x/sys [BSD](https://go.googlesource.com/sys/+/master/LICENSE)
|
||||
- gopkg.in/asn1-ber.v1 [MIT](https://github.com/go-asn1-ber/asn1-ber/blob/v1.2/LICENSE)
|
||||
- gopkg.in/dancannon/gorethink.v1 [APACHE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
|
||||
- gopkg.in/fatih/pool.v2 [MIT](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)
|
||||
- gopkg.in/fsnotify.v1 [BSD](https://github.com/fsnotify/fsnotify/blob/v1.4.2/LICENSE)
|
||||
- gopkg.in/ldap.v2 [MIT](https://github.com/go-ldap/ldap/blob/v2.5.0/LICENSE)
|
||||
- gopkg.in/mgo.v2 [BSD](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
|
||||
- gopkg.in/olivere/elastic.v5 [MIT](https://github.com/olivere/elastic/blob/v5.0.38/LICENSE)
|
||||
- gopkg.in/tomb.v1 [BSD](https://github.com/go-tomb/tomb/blob/v1/LICENSE)
|
||||
- gopkg.in/yaml.v2 [APACHE](https://github.com/go-yaml/yaml/blob/v2/LICENSE)
|
||||
# List
|
||||
- collectd.org [MIT LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE)
|
||||
- github.com/Shopify/sarama [MIT LICENSE](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE)
|
||||
- github.com/Sirupsen/logrus [MIT LICENSE](https://github.com/Sirupsen/logrus/blob/master/LICENSE)
|
||||
- github.com/armon/go-metrics [MIT LICENSE](https://github.com/armon/go-metrics/blob/master/LICENSE)
|
||||
- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
|
||||
- github.com/cenkalti/backoff [MIT LICENSE](https://github.com/cenkalti/backoff/blob/master/LICENSE)
|
||||
- github.com/dancannon/gorethink [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/master/LICENSE)
|
||||
- github.com/eapache/go-resiliency [MIT LICENSE](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
|
||||
- github.com/eapache/queue [MIT LICENSE](https://github.com/eapache/queue/blob/master/LICENSE)
|
||||
- github.com/fsouza/go-dockerclient [BSD LICENSE](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE)
|
||||
- github.com/go-sql-driver/mysql [MPL LICENSE](https://github.com/go-sql-driver/mysql/blob/master/LICENSE)
|
||||
- github.com/gogo/protobuf [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/protobuf [BSD LICENSE](https://github.com/golang/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
|
||||
- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
|
||||
- github.com/kardianos/service [ZLIB LICENSE](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib)
|
||||
- github.com/kballard/go-shellquote [MIT LICENSE](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
|
||||
- github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md)
|
||||
- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
|
||||
- github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
|
||||
- github.com/naoina/toml [MIT LICENSE](https://github.com/naoina/toml/blob/master/LICENSE)
|
||||
- github.com/prometheus/client_golang [APACHE LICENSE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
|
||||
- github.com/samuel/go-zookeeper [BSD LICENSE](https://github.com/samuel/go-zookeeper/blob/master/LICENSE)
|
||||
- github.com/stretchr/objx [MIT LICENSE](github.com/stretchr/objx)
|
||||
- github.com/stretchr/testify [MIT LICENSE](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
|
||||
- github.com/wvanbergen/kafka [MIT LICENSE](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
|
||||
- github.com/wvanbergen/kazoo-go [MIT LICENSE](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
|
||||
- gopkg.in/dancannon/gorethink.v1 [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
|
||||
- gopkg.in/mgo.v2 [BSD LICENSE](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
|
||||
- golang.org/x/crypto/ [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
|
||||
|
||||
@@ -37,9 +37,3 @@ Telegraf can manage its own service through the --service flag:
|
||||
| `telegraf.exe --service start` | Start the telegraf service |
|
||||
| `telegraf.exe --service stop` | Stop the telegraf service |
|
||||
|
||||
|
||||
Trobleshooting common error #1067
|
||||
|
||||
When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start
|
||||
|
||||
--config C:\"Program Files"\Telegraf\telegraf.conf
|
||||
|
||||
@@ -118,12 +118,6 @@
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## HTTP Proxy Config
|
||||
# http_proxy = "http://corporate.proxy:3128"
|
||||
|
||||
## Compress each HTTP request payload using GZIP.
|
||||
# content_encoding = "gzip"
|
||||
|
||||
|
||||
# # Configuration for Amon Server to send metrics to.
|
||||
# [[outputs.amon]]
|
||||
@@ -276,13 +270,6 @@
|
||||
# template = "host.tags.measurement.field"
|
||||
# ## timeout in seconds for the write connection to graphite
|
||||
# timeout = 2
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
# # Send telegraf metrics to graylog(s)
|
||||
@@ -511,7 +498,7 @@
|
||||
# # Configuration for the Prometheus client to spawn
|
||||
# [[outputs.prometheus_client]]
|
||||
# ## Address to listen on
|
||||
# # listen = ":9273"
|
||||
# # listen = ":9126"
|
||||
#
|
||||
# ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration
|
||||
# # expiration_interval = "60s"
|
||||
@@ -602,32 +589,6 @@
|
||||
# AGGREGATOR PLUGINS #
|
||||
###############################################################################
|
||||
|
||||
# # Create aggregate histograms.
|
||||
# [[aggregators.histogram]]
|
||||
# ## The period in which to flush the aggregator.
|
||||
# period = "30s"
|
||||
#
|
||||
# ## If true, the original metric will be dropped by the
|
||||
# ## aggregator and will not get sent to the output plugins.
|
||||
# drop_original = false
|
||||
#
|
||||
# ## Example config that aggregates all fields of the metric.
|
||||
# # [[aggregators.histogram.config]]
|
||||
# # ## The set of buckets.
|
||||
# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
|
||||
# # ## The name of metric.
|
||||
# # measurement_name = "cpu"
|
||||
#
|
||||
# ## Example config that aggregates only specific fields of the metric.
|
||||
# # [[aggregators.histogram.config]]
|
||||
# # ## The set of buckets.
|
||||
# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
|
||||
# # ## The name of metric.
|
||||
# # measurement_name = "diskio"
|
||||
# # ## The concrete fields of metric
|
||||
# # fields = ["io_time", "read_time", "write_time"]
|
||||
|
||||
|
||||
# # Keep the aggregate min/max of each metric passing through.
|
||||
# [[aggregators.minmax]]
|
||||
# ## General Aggregator Arguments:
|
||||
@@ -651,8 +612,6 @@
|
||||
totalcpu = true
|
||||
## If true, collect raw CPU time metrics.
|
||||
collect_cpu_time = false
|
||||
## If true, compute and report the sum of all non-idle CPU states.
|
||||
report_active = false
|
||||
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
@@ -728,17 +687,15 @@
|
||||
|
||||
# # Read Apache status information (mod_status)
|
||||
# [[inputs.apache]]
|
||||
# ## An array of URLs to gather from, must be directed at the machine
|
||||
# ## readable version of the mod_status page including the auto query string.
|
||||
# ## An array of Apache status URI to gather stats.
|
||||
# ## Default is "http://localhost/server-status?auto".
|
||||
# urls = ["http://localhost/server-status?auto"]
|
||||
# ## user credentials for basic HTTP authentication
|
||||
# username = "myuser"
|
||||
# password = "mypassword"
|
||||
#
|
||||
# ## Credentials for basic HTTP authentication.
|
||||
# # username = "myuser"
|
||||
# # password = "mypassword"
|
||||
#
|
||||
# ## Maximum time to receive response.
|
||||
# # response_timeout = "5s"
|
||||
# ## Timeout to the complete conection and reponse time in seconds
|
||||
# response_timeout = "25s" ## default to 5 seconds
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
@@ -856,7 +813,7 @@
|
||||
#
|
||||
# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
|
||||
# # metrics are made available to the 1 minute period. Some are collected at
|
||||
# # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
|
||||
# # 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
|
||||
# # Note that if a period is configured that is smaller than the minimum for a
|
||||
# # particular metric, that metric will not be returned by the Cloudwatch API
|
||||
# # and will not be collected by Telegraf.
|
||||
@@ -968,23 +925,20 @@
|
||||
# # Query given DNS server and gives statistics
|
||||
# [[inputs.dns_query]]
|
||||
# ## servers to query
|
||||
# servers = ["8.8.8.8"]
|
||||
# servers = ["8.8.8.8"] # required
|
||||
#
|
||||
# ## Network is the network protocol name.
|
||||
# # network = "udp"
|
||||
# ## Domains or subdomains to query. "."(root) is default
|
||||
# domains = ["."] # optional
|
||||
#
|
||||
# ## Domains or subdomains to query.
|
||||
# # domains = ["."]
|
||||
#
|
||||
# ## Query record type.
|
||||
# ## Query record type. Default is "A"
|
||||
# ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
|
||||
# # record_type = "A"
|
||||
# record_type = "A" # optional
|
||||
#
|
||||
# ## Dns server port.
|
||||
# # port = 53
|
||||
# ## Dns server port. 53 is default
|
||||
# port = 53 # optional
|
||||
#
|
||||
# ## Query timeout in seconds.
|
||||
# # timeout = 2
|
||||
# ## Query timeout in seconds. Default is 2 seconds
|
||||
# timeout = 2 # optional
|
||||
|
||||
|
||||
# # Read metrics about docker containers
|
||||
@@ -993,15 +947,8 @@
|
||||
# ## To use TCP, set endpoint = "tcp://[ip]:[port]"
|
||||
# ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||
# endpoint = "unix:///var/run/docker.sock"
|
||||
#
|
||||
# ## Only collect metrics for these containers, collect all if empty
|
||||
# container_names = []
|
||||
#
|
||||
# ## Containers to include and exclude. Globs accepted.
|
||||
# ## Note that an empty array for both will include all containers
|
||||
# container_name_include = []
|
||||
# container_name_exclude = []
|
||||
#
|
||||
# ## Timeout for docker list, info, and stats commands
|
||||
# timeout = "5s"
|
||||
#
|
||||
@@ -1010,20 +957,11 @@
|
||||
# perdevice = true
|
||||
# ## Whether to report for each container total blkio and network stats or not
|
||||
# total = false
|
||||
# ## Which environment variables should we use as a tag
|
||||
# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
|
||||
#
|
||||
# ## docker labels to include and exclude as tags. Globs accepted.
|
||||
# ## Note that an empty array for both will include all labels as tags
|
||||
# docker_label_include = []
|
||||
# docker_label_exclude = []
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
# # Read statistics from one or many dovecot servers
|
||||
@@ -1093,15 +1031,6 @@
|
||||
# data_format = "influx"
|
||||
|
||||
|
||||
# # Read metrics from fail2ban.
|
||||
# [[inputs.fail2ban]]
|
||||
# ## fail2ban-client require root access.
|
||||
# ## Setting 'use_sudo' to true will make use of sudo to run fail2ban-client.
|
||||
# ## Users must configure sudo to allow telegraf user to run fail2ban-client with no password.
|
||||
# ## This plugin run only "fail2ban-client status".
|
||||
# use_sudo = false
|
||||
|
||||
|
||||
# # Read stats about given file(s)
|
||||
# [[inputs.filestat]]
|
||||
# ## Files to gather stats about.
|
||||
@@ -1118,22 +1047,6 @@
|
||||
# md5 = false
|
||||
|
||||
|
||||
# # Read metrics exposed by fluentd in_monitor plugin
|
||||
# [[inputs.fluentd]]
|
||||
# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
|
||||
# ##
|
||||
# ## Endpoint:
|
||||
# ## - only one URI is allowed
|
||||
# ## - https is not supported
|
||||
# endpoint = "http://localhost:24220/api/plugins.json"
|
||||
#
|
||||
# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
|
||||
# exclude = [
|
||||
# "monitor_agent",
|
||||
# "dummy",
|
||||
# ]
|
||||
|
||||
|
||||
# # Read flattened metrics from one or more GrayLog HTTP endpoints
|
||||
# [[inputs.graylog]]
|
||||
# ## API endpoint, currently supported API:
|
||||
@@ -1215,26 +1128,25 @@
|
||||
# # HTTP/HTTPS request given an address a method and a timeout
|
||||
# [[inputs.http_response]]
|
||||
# ## Server address (default http://localhost)
|
||||
# # address = "http://localhost"
|
||||
#
|
||||
# address = "http://github.com"
|
||||
# ## Set response_timeout (default 5 seconds)
|
||||
# # response_timeout = "5s"
|
||||
#
|
||||
# response_timeout = "5s"
|
||||
# ## HTTP Request Method
|
||||
# # method = "GET"
|
||||
#
|
||||
# method = "GET"
|
||||
# ## Whether to follow redirects from the server (defaults to false)
|
||||
# # follow_redirects = false
|
||||
#
|
||||
# follow_redirects = true
|
||||
# ## HTTP Request Headers (all values must be strings)
|
||||
# # [inputs.http_response.headers]
|
||||
# # Host = "github.com"
|
||||
# ## Optional HTTP Request Body
|
||||
# # body = '''
|
||||
# # {'fake':'data'}
|
||||
# # '''
|
||||
#
|
||||
# ## Optional substring or regex match in body of the response
|
||||
# # response_string_match = "\"service_status\": \"up\""
|
||||
# # response_string_match = "ok"
|
||||
# # response_string_match = "\".*_status\".?:.?\"up\""
|
||||
# ## response_string_match = "\"service_status\": \"up\""
|
||||
# ## response_string_match = "ok"
|
||||
# ## response_string_match = "\".*_status\".?:.?\"up\""
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
@@ -1242,10 +1154,6 @@
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
#
|
||||
# ## HTTP Request Headers (all values must be strings)
|
||||
# # [inputs.http_response.headers]
|
||||
# # Host = "github.com"
|
||||
|
||||
|
||||
# # Read flattened metrics from one or more JSON HTTP endpoints
|
||||
@@ -1308,13 +1216,6 @@
|
||||
# "http://localhost:8086/debug/vars"
|
||||
# ]
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
#
|
||||
# ## http request & header timeout
|
||||
# timeout = "5s"
|
||||
|
||||
@@ -1345,13 +1246,6 @@
|
||||
# ## if no servers are specified, local machine sensor stats will be queried
|
||||
# ##
|
||||
# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
|
||||
#
|
||||
# ## Recomended: use metric 'interval' that is a multiple of 'timeout' to avoid
|
||||
# ## gaps or overlap in pulled data
|
||||
# interval = "30s"
|
||||
#
|
||||
# ## Timeout for the ipmitool command to complete
|
||||
# timeout = "20s"
|
||||
|
||||
|
||||
# # Gather packets and bytes throughput from iptables
|
||||
@@ -1471,9 +1365,9 @@
|
||||
|
||||
# # Read metrics from a LeoFS Server via SNMP
|
||||
# [[inputs.leofs]]
|
||||
# ## An array of URLs of the form:
|
||||
# ## "udp://" host [ ":" port]
|
||||
# servers = ["udp://127.0.0.1:4020"]
|
||||
# ## An array of URI to gather stats about LeoFS.
|
||||
# ## Specify an ip or hostname with port. ie 127.0.0.1:4020
|
||||
# servers = ["127.0.0.1:4021"]
|
||||
|
||||
|
||||
# # Provides Linux sysctl fs metrics
|
||||
@@ -1548,24 +1442,14 @@
|
||||
# # ]
|
||||
|
||||
|
||||
# # Collects scores from a minecraft server's scoreboard using the RCON protocol
|
||||
# [[inputs.minecraft]]
|
||||
# ## server address for minecraft
|
||||
# # server = "localhost"
|
||||
# ## port for RCON
|
||||
# # port = "25575"
|
||||
# ## password RCON for mincraft server
|
||||
# # password = ""
|
||||
|
||||
|
||||
# # Read metrics from one or many MongoDB servers
|
||||
# [[inputs.mongodb]]
|
||||
# ## An array of URLs of the form:
|
||||
# ## "mongodb://" [user ":" pass "@"] host [ ":" port]
|
||||
# ## For example:
|
||||
# ## An array of URI to gather stats about. Specify an ip or hostname
|
||||
# ## with optional port add password. ie,
|
||||
# ## mongodb://user:auth_key@10.10.3.30:27017,
|
||||
# ## mongodb://10.10.3.33:18832,
|
||||
# servers = ["mongodb://127.0.0.1:27017"]
|
||||
# ## 10.0.0.1:10000, etc.
|
||||
# servers = ["127.0.0.1:27017"]
|
||||
# gather_perdb_stats = false
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
@@ -1579,7 +1463,7 @@
|
||||
# # Read metrics from one or many mysql servers
|
||||
# [[inputs.mysql]]
|
||||
# ## specify servers via a url matching:
|
||||
# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]
|
||||
# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
|
||||
# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
|
||||
# ## e.g.
|
||||
# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
|
||||
@@ -1636,11 +1520,6 @@
|
||||
# #
|
||||
# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
|
||||
# interval_slow = "30m"
|
||||
#
|
||||
# ## Optional SSL Config (will be used if tls=custom parameter specified in server uri)
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
|
||||
|
||||
# # Read metrics about network interface usage
|
||||
@@ -1682,17 +1561,8 @@
|
||||
|
||||
# # Read Nginx's basic status information (ngx_http_stub_status_module)
|
||||
# [[inputs.nginx]]
|
||||
# # An array of Nginx stub_status URI to gather stats.
|
||||
# urls = ["http://localhost/server_status"]
|
||||
#
|
||||
# # TLS/SSL configuration
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.cer"
|
||||
# ssl_key = "/etc/telegraf/key.key"
|
||||
# insecure_skip_verify = false
|
||||
#
|
||||
# # HTTP response timeout (default: 5s)
|
||||
# response_timeout = "5s"
|
||||
# ## An array of Nginx stub_status URI to gather stats.
|
||||
# urls = ["http://localhost/status"]
|
||||
|
||||
|
||||
# # Read NSQ topic and channel statistics.
|
||||
@@ -1719,27 +1589,6 @@
|
||||
# dns_lookup = true
|
||||
|
||||
|
||||
# # OpenLDAP cn=Monitor plugin
|
||||
# [[inputs.openldap]]
|
||||
# host = "localhost"
|
||||
# port = 389
|
||||
#
|
||||
# # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption.
|
||||
# # note that port will likely need to be changed to 636 for ldaps
|
||||
# # valid options: "" | "starttls" | "ldaps"
|
||||
# ssl = ""
|
||||
#
|
||||
# # skip peer certificate verification. Default is false.
|
||||
# insecure_skip_verify = false
|
||||
#
|
||||
# # Path to PEM-encoded Root certificate to use to verify server certificate
|
||||
# ssl_ca = "/etc/ssl/certs.pem"
|
||||
#
|
||||
# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed.
|
||||
# bind_dn = ""
|
||||
# bind_password = ""
|
||||
|
||||
|
||||
# # Read metrics of passenger using passenger-status
|
||||
# [[inputs.passenger]]
|
||||
# ## Path of passenger-status.
|
||||
@@ -1933,13 +1782,10 @@
|
||||
# location = "/var/lib/puppet/state/last_run_summary.yaml"
|
||||
|
||||
|
||||
# # Reads metrics from RabbitMQ servers via the Management Plugin
|
||||
# # Read metrics from one or many RabbitMQ servers via the management API
|
||||
# [[inputs.rabbitmq]]
|
||||
# ## Management Plugin url. (default: http://localhost:15672)
|
||||
# # url = "http://localhost:15672"
|
||||
# ## Tag added to rabbitmq_overview series; deprecated: use tags
|
||||
# # name = "rmq-server-1"
|
||||
# ## Credentials
|
||||
# # name = "rmq-server-1" # optional tag
|
||||
# # username = "guest"
|
||||
# # password = "guest"
|
||||
#
|
||||
@@ -1960,13 +1806,9 @@
|
||||
# ## Includes connection time, any redirects, and reading the response body.
|
||||
# # client_timeout = "4s"
|
||||
#
|
||||
# ## A list of nodes to gather as the rabbitmq_node measurement. If not
|
||||
# ## specified, metrics for all nodes are gathered.
|
||||
# ## A list of nodes to pull metrics about. If not specified, metrics for
|
||||
# ## all nodes are gathered.
|
||||
# # nodes = ["rabbit@node1", "rabbit@node2"]
|
||||
#
|
||||
# ## A list of queues to gather as the rabbitmq_queue measurement. If not
|
||||
# ## specified, metrics for all queues are gathered.
|
||||
# # queues = ["telegraf"]
|
||||
|
||||
|
||||
# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
|
||||
@@ -1997,14 +1839,6 @@
|
||||
# ## rethinkdb://10.10.3.33:18832,
|
||||
# ## 10.0.0.1:10000, etc.
|
||||
# servers = ["127.0.0.1:28015"]
|
||||
# ##
|
||||
# ## If you use actual rethinkdb of > 2.3.0 with username/password authorization,
|
||||
# ## protocol have to be named "rethinkdb2" - it will use 1_0 H.
|
||||
# # servers = ["rethinkdb2://username:password@127.0.0.1:28015"]
|
||||
# ##
|
||||
# ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol
|
||||
# ## have to be named "rethinkdb".
|
||||
# # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"]
|
||||
|
||||
|
||||
# # Read metrics one or many Riak servers
|
||||
@@ -2013,26 +1847,6 @@
|
||||
# servers = ["http://localhost:8098"]
|
||||
|
||||
|
||||
# # Read API usage and limits for a Salesforce organisation
|
||||
# [[inputs.salesforce]]
|
||||
# ## specify your credentials
|
||||
# ##
|
||||
# username = "your_username"
|
||||
# password = "your_password"
|
||||
# ##
|
||||
# ## (optional) security token
|
||||
# # security_token = "your_security_token"
|
||||
# ##
|
||||
# ## (optional) environment type (sandbox or production)
|
||||
# ## default is: production
|
||||
# ##
|
||||
# # environment = "production"
|
||||
# ##
|
||||
# ## (optional) API version (default: "39.0")
|
||||
# ##
|
||||
# # version = "39.0"
|
||||
|
||||
|
||||
# # Monitor sensors, requires lm-sensors package
|
||||
# [[inputs.sensors]]
|
||||
# ## Remove numbers from field names.
|
||||
@@ -2278,26 +2092,6 @@
|
||||
# # vg = "rootvg"
|
||||
|
||||
|
||||
# # Gather metrics from the Tomcat server status page.
|
||||
# [[inputs.tomcat]]
|
||||
# ## URL of the Tomcat server status
|
||||
# # url = "http://127.0.0.1:8080/manager/status/all?XML=true"
|
||||
#
|
||||
# ## HTTP Basic Auth Credentials
|
||||
# # username = "tomcat"
|
||||
# # password = "s3cret"
|
||||
#
|
||||
# ## Request timeout
|
||||
# # timeout = "5s"
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
# # Inserts sine and cosine waves for demonstration purposes
|
||||
# [[inputs.trig]]
|
||||
# ## Set the amplitude
|
||||
@@ -2314,9 +2108,6 @@
|
||||
|
||||
# # A plugin to collect stats from Varnish HTTP Cache
|
||||
# [[inputs.varnish]]
|
||||
# ## If running as a restricted user you can prepend sudo for additional access:
|
||||
# #use_sudo = false
|
||||
#
|
||||
# ## The default location of the varnishstat binary can be overridden with:
|
||||
# binary = "/usr/bin/varnishstat"
|
||||
#
|
||||
@@ -2410,40 +2201,6 @@
|
||||
|
||||
# # Read metrics from Kafka topic(s)
|
||||
# [[inputs.kafka_consumer]]
|
||||
# ## kafka servers
|
||||
# brokers = ["localhost:9092"]
|
||||
# ## topic(s) to consume
|
||||
# topics = ["telegraf"]
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
#
|
||||
# ## Optional SASL Config
|
||||
# # sasl_username = "kafka"
|
||||
# # sasl_password = "secret"
|
||||
#
|
||||
# ## the name of the consumer group
|
||||
# consumer_group = "telegraf_metrics_consumers"
|
||||
# ## Offset (must be either "oldest" or "newest")
|
||||
# offset = "oldest"
|
||||
#
|
||||
# ## Data format to consume.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# data_format = "influx"
|
||||
#
|
||||
# ## Maximum length of a message to consume, in bytes (default 0/unlimited);
|
||||
# ## larger messages are dropped
|
||||
# max_message_len = 65536
|
||||
|
||||
|
||||
# # Read metrics from Kafka topic(s)
|
||||
# [[inputs.kafka_consumer_legacy]]
|
||||
# ## topic(s) to consume
|
||||
# topics = ["telegraf"]
|
||||
# ## an array of Zookeeper connection strings
|
||||
@@ -2475,7 +2232,6 @@
|
||||
# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
|
||||
# ## /var/log/apache.log -> only tail the apache log file
|
||||
# files = ["/var/log/apache/access.log"]
|
||||
#
|
||||
# ## Read files that currently exist from the beginning. Files that are created
|
||||
# ## while telegraf is running (and that match the "files" globs) will always
|
||||
# ## be read from the beginning.
|
||||
@@ -2491,26 +2247,12 @@
|
||||
# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
|
||||
# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
|
||||
# patterns = ["%{COMBINED_LOG_FORMAT}"]
|
||||
#
|
||||
# ## Name of the outputted measurement name.
|
||||
# measurement = "apache_access_log"
|
||||
#
|
||||
# ## Full path(s) to custom pattern files.
|
||||
# custom_pattern_files = []
|
||||
#
|
||||
# ## Custom patterns can also be defined here. Put one pattern per line.
|
||||
# custom_patterns = '''
|
||||
#
|
||||
# ## Timezone allows you to provide an override for timestamps that
|
||||
# ## don't already include an offset
|
||||
# ## e.g. 04/06/2016 12:41:45 data one two 5.43µs
|
||||
# ##
|
||||
# ## Default: "" which renders UTC
|
||||
# ## Options are as follows:
|
||||
# ## 1. Local -- interpret based on machine localtime
|
||||
# ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
|
||||
# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC
|
||||
# timezone = "Canada/Eastern"
|
||||
# '''
|
||||
|
||||
|
||||
@@ -2609,11 +2351,6 @@
|
||||
# ## 0 (default) is unlimited.
|
||||
# # max_connections = 1024
|
||||
#
|
||||
# ## Read timeout.
|
||||
# ## Only applies to stream sockets (e.g. TCP).
|
||||
# ## 0 (default) is unlimited.
|
||||
# # read_timeout = "30s"
|
||||
#
|
||||
# ## Maximum socket buffer size in bytes.
|
||||
# ## For stream sockets, once the buffer fills up, the sender will start backing up.
|
||||
# ## For datagram sockets, once the buffer fills up, metrics will start dropping.
|
||||
@@ -2633,14 +2370,8 @@
|
||||
# # data_format = "influx"
|
||||
|
||||
|
||||
# # Statsd UDP/TCP Server
|
||||
# # Statsd Server
|
||||
# [[inputs.statsd]]
|
||||
# ## Protocol, must be "tcp" or "udp" (default=udp)
|
||||
# protocol = "udp"
|
||||
#
|
||||
# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
|
||||
# max_tcp_connections = 250
|
||||
#
|
||||
# ## Address and port to host UDP listener on
|
||||
# service_address = ":8125"
|
||||
#
|
||||
@@ -2741,9 +2472,3 @@
|
||||
# [inputs.webhooks.papertrail]
|
||||
# path = "/papertrail"
|
||||
|
||||
|
||||
# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.
|
||||
# [[inputs.zipkin]]
|
||||
# # path = "/api/v1/spans" # URL path for span data
|
||||
# # port = 9411 # Port on which Telegraf listens
|
||||
|
||||
|
||||
20
internal/config/testdata/telegraf-agent.toml
vendored
20
internal/config/testdata/telegraf-agent.toml
vendored
@@ -143,31 +143,19 @@
|
||||
[[inputs.diskio]]
|
||||
# no configuration
|
||||
|
||||
# read metrics from a Kafka 0.9+ topic
|
||||
# read metrics from a Kafka topic
|
||||
[[inputs.kafka_consumer]]
|
||||
## kafka brokers
|
||||
brokers = ["localhost:9092"]
|
||||
## topic(s) to consume
|
||||
topics = ["telegraf"]
|
||||
## the name of the consumer group
|
||||
consumer_group = "telegraf_metrics_consumers"
|
||||
## Offset (must be either "oldest" or "newest")
|
||||
offset = "oldest"
|
||||
|
||||
# read metrics from a Kafka legacy topic
|
||||
[[inputs.kafka_consumer_legacy]]
|
||||
## topic(s) to consume
|
||||
# topic(s) to consume
|
||||
topics = ["telegraf"]
|
||||
# an array of Zookeeper connection strings
|
||||
zookeeper_peers = ["localhost:2181"]
|
||||
## the name of the consumer group
|
||||
# the name of the consumer group
|
||||
consumer_group = "telegraf_metrics_consumers"
|
||||
# Maximum number of points to buffer between collection intervals
|
||||
point_buffer = 100000
|
||||
## Offset (must be either "oldest" or "newest")
|
||||
# Offset (must be either "oldest" or "newest")
|
||||
offset = "oldest"
|
||||
|
||||
|
||||
# Read metrics from a LeoFS Server via SNMP
|
||||
[[inputs.leofs]]
|
||||
# An array of URI to gather stats about LeoFS.
|
||||
|
||||
@@ -45,7 +45,7 @@ func (g *GlobPath) Match() map[string]os.FileInfo {
|
||||
if !g.hasMeta {
|
||||
out := make(map[string]os.FileInfo)
|
||||
info, err := os.Stat(g.path)
|
||||
if err == nil {
|
||||
if !os.IsNotExist(err) {
|
||||
out[g.path] = info
|
||||
}
|
||||
return out
|
||||
@@ -55,7 +55,7 @@ func (g *GlobPath) Match() map[string]os.FileInfo {
|
||||
files, _ := filepath.Glob(g.path)
|
||||
for _, file := range files {
|
||||
info, err := os.Stat(file)
|
||||
if err == nil {
|
||||
if !os.IsNotExist(err) {
|
||||
out[file] = info
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package globpath
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -71,20 +70,3 @@ func getTestdataDir() string {
|
||||
_, filename, _, _ := runtime.Caller(1)
|
||||
return strings.Replace(filename, "globpath_test.go", "testdata", 1)
|
||||
}
|
||||
|
||||
func TestMatch_ErrPermission(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
expected map[string]os.FileInfo
|
||||
}{
|
||||
{"/root/foo", map[string]os.FileInfo{}},
|
||||
{"/root/f*", map[string]os.FileInfo{}},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
glob, err := Compile(test.input)
|
||||
require.NoError(t, err)
|
||||
actual := glob.Match()
|
||||
require.Equal(t, test.expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -132,7 +132,6 @@ func (f *Filter) Apply(
|
||||
return true
|
||||
}
|
||||
|
||||
// IsActive checking if filter is active
|
||||
func (f *Filter) IsActive() bool {
|
||||
return f.isActive
|
||||
}
|
||||
@@ -140,66 +139,43 @@ func (f *Filter) IsActive() bool {
|
||||
// shouldNamePass returns true if the metric should pass, false if should drop
|
||||
// based on the drop/pass filter parameters
|
||||
func (f *Filter) shouldNamePass(key string) bool {
|
||||
|
||||
pass := func(f *Filter) bool {
|
||||
if f.namePass != nil {
|
||||
if f.namePass.Match(key) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
drop := func(f *Filter) bool {
|
||||
if f.nameDrop != nil {
|
||||
if f.nameDrop.Match(key) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
if f.namePass != nil && f.nameDrop != nil {
|
||||
return pass(f) && drop(f)
|
||||
} else if f.namePass != nil {
|
||||
return pass(f)
|
||||
} else if f.nameDrop != nil {
|
||||
return drop(f)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// shouldFieldPass returns true if the metric should pass, false if should drop
|
||||
// based on the drop/pass filter parameters
|
||||
func (f *Filter) shouldFieldPass(key string) bool {
|
||||
|
||||
pass := func(f *Filter) bool {
|
||||
if f.fieldPass != nil {
|
||||
if f.fieldPass.Match(key) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
drop := func(f *Filter) bool {
|
||||
if f.fieldDrop != nil {
|
||||
if f.fieldDrop.Match(key) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
if f.fieldPass != nil && f.fieldDrop != nil {
|
||||
return pass(f) && drop(f)
|
||||
} else if f.fieldPass != nil {
|
||||
return pass(f)
|
||||
} else if f.fieldDrop != nil {
|
||||
return drop(f)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// shouldTagsPass returns true if the metric should pass, false if should drop
|
||||
// based on the tagdrop/tagpass filter parameters
|
||||
func (f *Filter) shouldTagsPass(tags map[string]string) bool {
|
||||
|
||||
pass := func(f *Filter) bool {
|
||||
if f.TagPass != nil {
|
||||
for _, pat := range f.TagPass {
|
||||
if pat.filter == nil {
|
||||
continue
|
||||
@@ -213,7 +189,7 @@ func (f *Filter) shouldTagsPass(tags map[string]string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
drop := func(f *Filter) bool {
|
||||
if f.TagDrop != nil {
|
||||
for _, pat := range f.TagDrop {
|
||||
if pat.filter == nil {
|
||||
continue
|
||||
@@ -227,18 +203,6 @@ func (f *Filter) shouldTagsPass(tags map[string]string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Add additional logic in case where both parameters are set.
|
||||
// see: https://github.com/influxdata/telegraf/issues/2860
|
||||
if f.TagPass != nil && f.TagDrop != nil {
|
||||
// return true only in case when tag pass and won't be dropped (true, true).
|
||||
// in case when the same tag should be passed and dropped it will be dropped (true, false).
|
||||
return pass(f) && drop(f)
|
||||
} else if f.TagPass != nil {
|
||||
return pass(f)
|
||||
} else if f.TagDrop != nil {
|
||||
return drop(f)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@@ -357,88 +357,3 @@ func TestFilter_FilterTagsMatches(t *testing.T) {
|
||||
"mytag": "foobar",
|
||||
}, pretags)
|
||||
}
|
||||
|
||||
// TestFilter_FilterNamePassAndDrop used for check case when
|
||||
// both parameters were defined
|
||||
// see: https://github.com/influxdata/telegraf/issues/2860
|
||||
func TestFilter_FilterNamePassAndDrop(t *testing.T) {
|
||||
|
||||
inputData := []string{"name1", "name2", "name3", "name4"}
|
||||
expectedResult := []bool{false, true, false, false}
|
||||
|
||||
f := Filter{
|
||||
NamePass: []string{"name1", "name2"},
|
||||
NameDrop: []string{"name1", "name3"},
|
||||
}
|
||||
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
for i, name := range inputData {
|
||||
assert.Equal(t, f.shouldNamePass(name), expectedResult[i])
|
||||
}
|
||||
}
|
||||
|
||||
// TestFilter_FilterFieldPassAndDrop used for check case when
|
||||
// both parameters were defined
|
||||
// see: https://github.com/influxdata/telegraf/issues/2860
|
||||
func TestFilter_FilterFieldPassAndDrop(t *testing.T) {
|
||||
|
||||
inputData := []string{"field1", "field2", "field3", "field4"}
|
||||
expectedResult := []bool{false, true, false, false}
|
||||
|
||||
f := Filter{
|
||||
FieldPass: []string{"field1", "field2"},
|
||||
FieldDrop: []string{"field1", "field3"},
|
||||
}
|
||||
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
for i, field := range inputData {
|
||||
assert.Equal(t, f.shouldFieldPass(field), expectedResult[i])
|
||||
}
|
||||
}
|
||||
|
||||
// TestFilter_FilterTagsPassAndDrop used for check case when
|
||||
// both parameters were defined
|
||||
// see: https://github.com/influxdata/telegraf/issues/2860
|
||||
func TestFilter_FilterTagsPassAndDrop(t *testing.T) {
|
||||
|
||||
inputData := []map[string]string{
|
||||
{"tag1": "1", "tag2": "3"},
|
||||
{"tag1": "1", "tag2": "2"},
|
||||
{"tag1": "2", "tag2": "1"},
|
||||
{"tag1": "4", "tag2": "1"},
|
||||
}
|
||||
|
||||
expectedResult := []bool{false, true, false, false}
|
||||
|
||||
filterPass := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "tag1",
|
||||
Filter: []string{"1", "4"},
|
||||
},
|
||||
}
|
||||
|
||||
filterDrop := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "tag1",
|
||||
Filter: []string{"4"},
|
||||
},
|
||||
TagFilter{
|
||||
Name: "tag2",
|
||||
Filter: []string{"3"},
|
||||
},
|
||||
}
|
||||
|
||||
f := Filter{
|
||||
TagDrop: filterDrop,
|
||||
TagPass: filterPass,
|
||||
}
|
||||
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
for i, tag := range inputData {
|
||||
assert.Equal(t, f.shouldTagsPass(tag), expectedResult[i])
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package models
|
||||
import (
|
||||
"log"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -78,27 +77,7 @@ func makemetric(
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range tags {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
log.Printf("D! Measurement [%s] tag [%s] "+
|
||||
"ends with a backslash, skipping", measurement, k)
|
||||
delete(tags, k)
|
||||
continue
|
||||
} else if strings.HasSuffix(v, `\`) {
|
||||
log.Printf("D! Measurement [%s] tag [%s] has a value "+
|
||||
"ending with a backslash, skipping", measurement, k)
|
||||
delete(tags, k)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range fields {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
log.Printf("D! Measurement [%s] field [%s] "+
|
||||
"ends with a backslash, skipping", measurement, k)
|
||||
delete(fields, k)
|
||||
continue
|
||||
}
|
||||
// Validate uint64 and float64 fields
|
||||
// convert all int & uint types to int64
|
||||
switch val := v.(type) {
|
||||
@@ -149,8 +128,6 @@ func makemetric(
|
||||
delete(fields, k)
|
||||
continue
|
||||
}
|
||||
case string:
|
||||
fields[k] = v
|
||||
default:
|
||||
fields[k] = v
|
||||
}
|
||||
|
||||
@@ -114,7 +114,6 @@ func (r *RunningAggregator) reset() {
|
||||
// for period ticks to tell it when to push and reset the aggregator.
|
||||
func (r *RunningAggregator) Run(
|
||||
acc telegraf.Accumulator,
|
||||
now time.Time,
|
||||
shutdown chan struct{},
|
||||
) {
|
||||
// The start of the period is truncated to the nearest second.
|
||||
@@ -133,6 +132,7 @@ func (r *RunningAggregator) Run(
|
||||
// 2nd interval: 00:10 - 00:20.5
|
||||
// etc.
|
||||
//
|
||||
now := time.Now()
|
||||
r.periodStart = now.Truncate(time.Second)
|
||||
truncation := now.Sub(r.periodStart)
|
||||
r.periodEnd = r.periodStart.Add(r.Config.Period)
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestAdd(t *testing.T) {
|
||||
})
|
||||
assert.NoError(t, ra.Config.Filter.Compile())
|
||||
acc := testutil.Accumulator{}
|
||||
go ra.Run(&acc, time.Now(), make(chan struct{}))
|
||||
go ra.Run(&acc, make(chan struct{}))
|
||||
|
||||
m := ra.MakeMetric(
|
||||
"RITest",
|
||||
@@ -55,7 +55,7 @@ func TestAddMetricsOutsideCurrentPeriod(t *testing.T) {
|
||||
})
|
||||
assert.NoError(t, ra.Config.Filter.Compile())
|
||||
acc := testutil.Accumulator{}
|
||||
go ra.Run(&acc, time.Now(), make(chan struct{}))
|
||||
go ra.Run(&acc, make(chan struct{}))
|
||||
|
||||
// metric before current period
|
||||
m := ra.MakeMetric(
|
||||
@@ -113,7 +113,7 @@ func TestAddAndPushOnePeriod(t *testing.T) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ra.Run(&acc, time.Now(), shutdown)
|
||||
ra.Run(&acc, shutdown)
|
||||
}()
|
||||
|
||||
m := ra.MakeMetric(
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMakeMetricNoFields(t *testing.T) {
|
||||
@@ -333,129 +332,6 @@ func TestMakeMetricNameSuffix(t *testing.T) {
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetric_TrailingSlash(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
measurement string
|
||||
fields map[string]interface{}
|
||||
tags map[string]string
|
||||
expectedNil bool
|
||||
expectedMeasurement string
|
||||
expectedFields map[string]interface{}
|
||||
expectedTags map[string]string
|
||||
}{
|
||||
{
|
||||
name: "Measurement cannot have trailing slash",
|
||||
measurement: `cpu\`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedNil: true,
|
||||
},
|
||||
{
|
||||
name: "Field key with trailing slash dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
`bad\`: `xyzzy`,
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
expectedTags: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "Field value with trailing slash okay",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
"ok": `xyzzy\`,
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
"ok": `xyzzy\`,
|
||||
},
|
||||
expectedTags: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "Must have one field after dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"bad": math.NaN(),
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedNil: true,
|
||||
},
|
||||
{
|
||||
name: "Tag key with trailing slash dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
tags: map[string]string{
|
||||
`host\`: "localhost",
|
||||
"a": "x",
|
||||
},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
expectedTags: map[string]string{
|
||||
"a": "x",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Tag value with trailing slash dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
tags: map[string]string{
|
||||
`host`: `localhost\`,
|
||||
"a": "x",
|
||||
},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
expectedTags: map[string]string{
|
||||
"a": "x",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
m := ri.MakeMetric(
|
||||
tc.measurement,
|
||||
tc.fields,
|
||||
tc.tags,
|
||||
telegraf.Untyped,
|
||||
now)
|
||||
|
||||
if tc.expectedNil {
|
||||
require.Nil(t, m)
|
||||
} else {
|
||||
require.NotNil(t, m)
|
||||
require.Equal(t, tc.expectedMeasurement, m.Name())
|
||||
require.Equal(t, tc.expectedFields, m.Fields())
|
||||
require.Equal(t, tc.expectedTags, m.Tags())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type testInput struct{}
|
||||
|
||||
func (t *testInput) Description() string { return "" }
|
||||
|
||||
@@ -2,7 +2,6 @@ package models
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -35,9 +34,6 @@ type RunningOutput struct {
|
||||
|
||||
metrics *buffer.Buffer
|
||||
failMetrics *buffer.Buffer
|
||||
|
||||
// Guards against concurrent calls to the Output as described in #3009
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func NewRunningOutput(
|
||||
@@ -173,8 +169,6 @@ func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
|
||||
if nMetrics == 0 {
|
||||
return nil
|
||||
}
|
||||
ro.Lock()
|
||||
defer ro.Unlock()
|
||||
start := time.Now()
|
||||
err := ro.Output.Write(metrics)
|
||||
elapsed := time.Since(start)
|
||||
|
||||
@@ -1,15 +1,11 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type RunningProcessor struct {
|
||||
Name string
|
||||
|
||||
sync.Mutex
|
||||
Name string
|
||||
Processor telegraf.Processor
|
||||
Config *ProcessorConfig
|
||||
}
|
||||
@@ -28,9 +24,6 @@ type ProcessorConfig struct {
|
||||
}
|
||||
|
||||
func (rp *RunningProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
|
||||
rp.Lock()
|
||||
defer rp.Unlock()
|
||||
|
||||
ret := []telegraf.Metric{}
|
||||
|
||||
for _, metric := range in {
|
||||
|
||||
@@ -20,14 +20,8 @@ var (
|
||||
|
||||
// stringFieldEscaper is for escaping string field values only.
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
stringFieldEscaper = strings.NewReplacer(
|
||||
`"`, `\"`,
|
||||
`\`, `\\`,
|
||||
)
|
||||
stringFieldUnEscaper = strings.NewReplacer(
|
||||
`\"`, `"`,
|
||||
`\\`, `\`,
|
||||
)
|
||||
stringFieldEscaper = strings.NewReplacer(`"`, `\"`)
|
||||
stringFieldUnEscaper = strings.NewReplacer(`\"`, `"`)
|
||||
)
|
||||
|
||||
func escape(s string, t string) string {
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"hash/fnv"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -21,14 +20,11 @@ func New(
|
||||
t time.Time,
|
||||
mType ...telegraf.ValueType,
|
||||
) (telegraf.Metric, error) {
|
||||
if len(name) == 0 {
|
||||
return nil, fmt.Errorf("missing measurement name")
|
||||
}
|
||||
if len(fields) == 0 {
|
||||
return nil, fmt.Errorf("%s: must have one or more fields", name)
|
||||
return nil, fmt.Errorf("Metric cannot be made without any fields")
|
||||
}
|
||||
if strings.HasSuffix(name, `\`) {
|
||||
return nil, fmt.Errorf("%s: measurement name cannot end with a backslash", name)
|
||||
if len(name) == 0 {
|
||||
return nil, fmt.Errorf("Metric cannot be made with an empty name")
|
||||
}
|
||||
|
||||
var thisType telegraf.ValueType
|
||||
@@ -48,13 +44,6 @@ func New(
|
||||
// pre-allocate exact size of the tags slice
|
||||
taglen := 0
|
||||
for k, v := range tags {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
return nil, fmt.Errorf("%s: tag key cannot end with a backslash: %s", name, k)
|
||||
}
|
||||
if strings.HasSuffix(v, `\`) {
|
||||
return nil, fmt.Errorf("%s: tag value cannot end with a backslash: %s", name, v)
|
||||
}
|
||||
|
||||
if len(k) == 0 || len(v) == 0 {
|
||||
continue
|
||||
}
|
||||
@@ -78,10 +67,6 @@ func New(
|
||||
// pre-allocate capacity of the fields slice
|
||||
fieldlen := 0
|
||||
for k, _ := range fields {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
return nil, fmt.Errorf("%s: field key cannot end with a backslash: %s", name, k)
|
||||
}
|
||||
|
||||
// 10 bytes is completely arbitrary, but will at least prevent some
|
||||
// amount of allocations. There's a small possibility this will create
|
||||
// slightly more allocations for a metric that has many short fields.
|
||||
@@ -102,31 +87,8 @@ func New(
|
||||
}
|
||||
|
||||
// indexUnescapedByte finds the index of the first byte equal to b in buf that
|
||||
// is not escaped. Does not allow the escape char to be escaped. Returns -1 if
|
||||
// not found.
|
||||
// is not escaped. Returns -1 if not found.
|
||||
func indexUnescapedByte(buf []byte, b byte) int {
|
||||
var keyi int
|
||||
for {
|
||||
i := bytes.IndexByte(buf[keyi:], b)
|
||||
if i == -1 {
|
||||
return -1
|
||||
} else if i == 0 {
|
||||
break
|
||||
}
|
||||
keyi += i
|
||||
if buf[keyi-1] != '\\' {
|
||||
break
|
||||
} else {
|
||||
keyi++
|
||||
}
|
||||
}
|
||||
return keyi
|
||||
}
|
||||
|
||||
// indexUnescapedByteBackslashEscaping finds the index of the first byte equal
|
||||
// to b in buf that is not escaped. Allows for the escape char `\` to be
|
||||
// escaped. Returns -1 if not found.
|
||||
func indexUnescapedByteBackslashEscaping(buf []byte, b byte) int {
|
||||
var keyi int
|
||||
for {
|
||||
i := bytes.IndexByte(buf[keyi:], b)
|
||||
@@ -256,7 +218,7 @@ func (m *metric) SerializeTo(dst []byte) int {
|
||||
}
|
||||
|
||||
func (m *metric) Split(maxSize int) []telegraf.Metric {
|
||||
if m.Len() <= maxSize {
|
||||
if m.Len() < maxSize {
|
||||
return []telegraf.Metric{m}
|
||||
}
|
||||
var out []telegraf.Metric
|
||||
@@ -286,7 +248,7 @@ func (m *metric) Split(maxSize int) []telegraf.Metric {
|
||||
|
||||
// if true, then we need to create a metric _not_ including the currently
|
||||
// selected field
|
||||
if len(m.fields[i:j])+len(fields)+constant >= maxSize {
|
||||
if len(m.fields[i:j])+len(fields)+constant > maxSize {
|
||||
// if false, then we'll create a metric including the currently
|
||||
// selected field anyways. This means that the given maxSize is too
|
||||
// small for a single field to fit.
|
||||
@@ -324,7 +286,7 @@ func (m *metric) Fields() map[string]interface{} {
|
||||
// end index of field value
|
||||
var i3 int
|
||||
if m.fields[i:][i2] == '"' {
|
||||
i3 = indexUnescapedByteBackslashEscaping(m.fields[i:][i2+1:], '"')
|
||||
i3 = indexUnescapedByte(m.fields[i:][i2+1:], '"')
|
||||
if i3 == -1 {
|
||||
i3 = len(m.fields[i:])
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewMetric(t *testing.T) {
|
||||
@@ -250,15 +249,11 @@ func TestNewMetric_Fields(t *testing.T) {
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
"int": int64(1),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
"quote_string": `x"y`,
|
||||
"backslash_quote_string": `x\"y`,
|
||||
"backslash": `x\y`,
|
||||
"ends_with_backslash": `x\`,
|
||||
"float": float64(1),
|
||||
"int": int64(1),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
@@ -371,7 +366,7 @@ func TestIndexUnescapedByte(t *testing.T) {
|
||||
{
|
||||
in: []byte(`foo\\bar`),
|
||||
b: 'b',
|
||||
expected: -1,
|
||||
expected: 5,
|
||||
},
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
@@ -463,7 +458,7 @@ func TestSplitMetric(t *testing.T) {
|
||||
assert.Len(t, split70, 3)
|
||||
|
||||
split60 := m.Split(60)
|
||||
assert.Len(t, split60, 5)
|
||||
assert.Len(t, split60, 4)
|
||||
}
|
||||
|
||||
// test splitting metric into various max lengths
|
||||
@@ -583,42 +578,6 @@ func TestSplitMetric_OneField(t *testing.T) {
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
}
|
||||
|
||||
func TestSplitMetric_ExactSize(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
actual := m.Split(m.Len())
|
||||
// check that no copy was made
|
||||
require.Equal(t, &m, &actual[0])
|
||||
}
|
||||
|
||||
func TestSplitMetric_NoRoomForNewline(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
actual := m.Split(m.Len() - 1)
|
||||
require.Equal(t, 2, len(actual))
|
||||
}
|
||||
|
||||
func TestNewMetricAggregate(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
@@ -689,49 +648,3 @@ func TestEmptyTagValueOrKey(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
func TestNewMetric_TrailingSlash(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
}{
|
||||
{
|
||||
name: `cpu\`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
fields: map[string]interface{}{
|
||||
`value\`: "x",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
tags: map[string]string{
|
||||
`host\`: "localhost",
|
||||
},
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
tags: map[string]string{
|
||||
"host": `localhost\`,
|
||||
},
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
_, err := New(tc.name, tc.tags, tc.fields, now)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -129,7 +129,7 @@ func parseMetric(buf []byte,
|
||||
// apply precision multiplier
|
||||
var nsec int64
|
||||
multiplier := getPrecisionMultiplier(precision)
|
||||
if len(ts) > 0 && multiplier > 1 {
|
||||
if multiplier > 1 {
|
||||
tsint, err := parseIntBytes(ts, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -380,25 +380,11 @@ func TestParsePrecision(t *testing.T) {
|
||||
} {
|
||||
metrics, err := ParseWithDefaultTimePrecision(
|
||||
[]byte(tt.line+"\n"), time.Now(), tt.precision)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, err, tt)
|
||||
assert.Equal(t, tt.expected, metrics[0].UnixNano())
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePrecisionUnsetTime(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
line string
|
||||
precision string
|
||||
}{
|
||||
{"test v=42", "s"},
|
||||
{"test v=42", "ns"},
|
||||
} {
|
||||
_, err := ParseWithDefaultTimePrecision(
|
||||
[]byte(tt.line+"\n"), time.Now(), tt.precision)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseMaxKeyLength(t *testing.T) {
|
||||
key := ""
|
||||
for {
|
||||
|
||||
@@ -57,7 +57,7 @@ func (r *reader) Read(p []byte) (n int, err error) {
|
||||
// this for-loop is the sunny-day scenario, where we are given a
|
||||
// buffer that is large enough to hold at least a single metric.
|
||||
// all of the cases below it are edge-cases.
|
||||
if r.metrics[r.iM].Len() <= len(p[i:]) {
|
||||
if r.metrics[r.iM].Len() < len(p[i:]) {
|
||||
i += r.metrics[r.iM].SerializeTo(p[i:])
|
||||
} else {
|
||||
break
|
||||
@@ -76,7 +76,7 @@ func (r *reader) Read(p []byte) (n int, err error) {
|
||||
if len(tmp) > 1 {
|
||||
r.splitMetrics = tmp
|
||||
r.state = split
|
||||
if r.splitMetrics[0].Len() <= len(p) {
|
||||
if r.splitMetrics[0].Len() < len(p) {
|
||||
i += r.splitMetrics[0].SerializeTo(p)
|
||||
r.iSM = 1
|
||||
} else {
|
||||
@@ -99,7 +99,7 @@ func (r *reader) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
case split:
|
||||
if r.splitMetrics[r.iSM].Len() <= len(p) {
|
||||
if r.splitMetrics[r.iSM].Len() < len(p) {
|
||||
// write the current split metric
|
||||
i += r.splitMetrics[r.iSM].SerializeTo(p)
|
||||
r.iSM++
|
||||
@@ -131,10 +131,6 @@ func (r *reader) Read(p []byte) (n int, err error) {
|
||||
r.iSM++
|
||||
if r.iSM == len(r.splitMetrics) {
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
r.state = normal
|
||||
} else {
|
||||
r.state = split
|
||||
|
||||
@@ -4,13 +4,12 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func BenchmarkMetricReader(b *testing.B) {
|
||||
@@ -117,140 +116,6 @@ func TestMetricReader_OverflowMetric(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for when a metric is the same size as the buffer.
|
||||
//
|
||||
// Previously EOF would not be set until the next call to Read.
|
||||
func TestMetricReader_MetricSizeEqualsBufferSize(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"a": int64(1)}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, m1.Len())
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for when a metric requires to be split and one of the
|
||||
// split metrics is exactly the size of the buffer.
|
||||
//
|
||||
// Previously an empty string would be returned on the next Read without error,
|
||||
// and then next Read call would panic.
|
||||
func TestMetricReader_SplitWithExactLengthSplit(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"a": int64(1), "bb": int64(2)}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 30)
|
||||
|
||||
// foo a=1i,bb=2i 1481032190000000000\n // len 35
|
||||
//
|
||||
// Requires this specific split order:
|
||||
// foo a=1i 1481032190000000000\n // len 29
|
||||
// foo bb=2i 1481032190000000000\n // len 30
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regresssion test for when a metric requires to be split and one of the
|
||||
// split metrics is larger than the buffer.
|
||||
//
|
||||
// Previously the metric index would be set incorrectly causing a panic.
|
||||
func TestMetricReader_SplitOverflowOversized(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
"bbb": int64(2),
|
||||
}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 30)
|
||||
|
||||
// foo a=1i,bbb=2i 1481032190000000000\n // len 36
|
||||
//
|
||||
// foo a=1i 1481032190000000000\n // len 29
|
||||
// foo bbb=2i 1481032190000000000\n // len 31
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regresssion test for when a split metric exactly fits in the buffer.
|
||||
//
|
||||
// Previously the metric would be overflow split when not required.
|
||||
func TestMetricReader_SplitOverflowUneeded(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"a": int64(1), "b": int64(2)}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 29)
|
||||
|
||||
// foo a=1i,b=2i 1481032190000000000\n // len 34
|
||||
//
|
||||
// foo a=1i 1481032190000000000\n // len 29
|
||||
// foo b=2i 1481032190000000000\n // len 29
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricReader_OverflowMultipleMetrics(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m, _ := New("foo", map[string]string{},
|
||||
@@ -620,94 +485,3 @@ func TestMetricReader_SplitMetricChangingBuffer2(t *testing.T) {
|
||||
assert.Equal(t, test.err, err, test.expRegex)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReader_Read(t *testing.T) {
|
||||
epoch := time.Unix(0, 0)
|
||||
|
||||
type args struct {
|
||||
name string
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
t time.Time
|
||||
mType []telegraf.ValueType
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
expected []byte
|
||||
}{
|
||||
{
|
||||
name: "escape backslashes in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape quote in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test"`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\"" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape quote and backslash in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\"`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\\"" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape multiple backslash in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\\`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\\\" 0`),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
buf := make([]byte, 512)
|
||||
m, err := New(tt.args.name, tt.args.tags, tt.args.fields, tt.args.t, tt.args.mType...)
|
||||
require.NoError(t, err)
|
||||
|
||||
r := NewReader([]telegraf.Metric{m})
|
||||
num, err := r.Read(buf)
|
||||
if err != io.EOF {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
line := string(buf[:num])
|
||||
// This is done so that we can use raw strings in the test spec
|
||||
noeol := strings.TrimRight(line, "\n")
|
||||
require.Equal(t, string(tt.expected), noeol)
|
||||
require.Equal(t, len(tt.expected)+1, num)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricRoundtrip(t *testing.T) {
|
||||
const lp = `nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=netstat,sr=database IpExtInBcastOctets=12570626154i,IpExtInBcastPkts=95541226i,IpExtInCEPkts=0i,IpExtInCsumErrors=0i,IpExtInECT0Pkts=55674i,IpExtInECT1Pkts=0i,IpExtInMcastOctets=5928296i,IpExtInMcastPkts=174365i,IpExtInNoECTPkts=17965863529i,IpExtInNoRoutes=20i,IpExtInOctets=3334866321815i,IpExtInTruncatedPkts=0i,IpExtOutBcastOctets=0i,IpExtOutBcastPkts=0i,IpExtOutMcastOctets=0i,IpExtOutMcastPkts=0i,IpExtOutOctets=31397892391399i,TcpExtArpFilter=0i,TcpExtBusyPollRxPackets=0i,TcpExtDelayedACKLocked=14094i,TcpExtDelayedACKLost=302083i,TcpExtDelayedACKs=55486507i,TcpExtEmbryonicRsts=11879i,TcpExtIPReversePathFilter=0i,TcpExtListenDrops=1736i,TcpExtListenOverflows=0i,TcpExtLockDroppedIcmps=0i,TcpExtOfoPruned=0i,TcpExtOutOfWindowIcmps=8i,TcpExtPAWSActive=0i,TcpExtPAWSEstab=974i,TcpExtPAWSPassive=0i,TcpExtPruneCalled=0i,TcpExtRcvPruned=0i,TcpExtSyncookiesFailed=12593i,TcpExtSyncookiesRecv=0i,TcpExtSyncookiesSent=0i,TcpExtTCPACKSkippedChallenge=0i,TcpExtTCPACKSkippedFinWait2=0i,TcpExtTCPACKSkippedPAWS=806i,TcpExtTCPACKSkippedSeq=519i,TcpExtTCPACKSkippedSynRecv=0i,TcpExtTCPACKSkippedTimeWait=0i,TcpExtTCPAbortFailed=0i,TcpExtTCPAbortOnClose=22i,TcpExtTCPAbortOnData=36593i,TcpExtTCPAbortOnLinger=0i,TcpExtTCPAbortOnMemory=0i,TcpExtTCPAbortOnTimeout=674i,TcpExtTCPAutoCorking=494253233i,TcpExtTCPBacklogDrop=0i,TcpExtTCPChallengeACK=281i,TcpExtTCPDSACKIgnoredNoUndo=93354i,TcpExtTCPDSACKIgnoredOld=336i,TcpExtTCPDSACKOfoRecv=0i,TcpExtTCPDSACKOfoSent=7i,TcpExtTCPDSACKOldSent=302073i,TcpExtTCPDSACKRecv=215884i,TcpExtTCPDSACKUndo=7633i,TcpExtTCPDeferAcceptDrop=0i,TcpExtTCPDirectCopyFromBacklog=0i,TcpExtTCPDirectCopyFromPrequeue=0i,TcpExtTCPFACKReorder=1320i,TcpExtTCPFastOpenActive=0i,TcpExtTCPFastOpenActiveFail=0i,TcpExtTCPFastOpenCookieReqd=0i,TcpExtTCPFastOpenListenOverflow=0i,TcpExtTCPFastOpenPassive=0i,TcpExtTCPFastOpenPassiveFail=0i,TcpExtTCPFastRetrans=350681i,TcpExtTCPForwardRetrans=142168i,TcpExtTCPFromZeroWindowAdv=4317i,TcpExtTCPFullUndo=29502i,TcpExtTCPHPAcks=10267073000i,TcpExtTCPHPHits=5629837098i,TcpExtTCPHPHitsToUser=0i,TcpExtTCPHystartDelayCwnd=285127i,TcpExtTCPHystartDelayDetect=12318i,TcpExtTCPHystartTrainCwnd=69160570i,TcpExtTCPHystartTrainDetect=3315799i,TcpExtTCPLossFailures=109i,TcpExtTCPLossProbeRecovery=110819i,TcpExtTCPLossProbes=233995i,TcpExtTCPLossUndo=5276i,TcpExtTCPLostRetransmit=397i,TcpExtTCPMD5NotFound=0i,TcpExtTCPMD5Unexpected=0i,TcpExtTCPMemoryPressures=0i,TcpExtTCPMinTTLDrop=0i,TcpExtTCPOFODrop=0i,TcpExtTCPOFOMerge=7i,TcpExtTCPOFOQueue=15196i,TcpExtTCPOrigDataSent=29055119435i,TcpExtTCPPartialUndo=21320i,TcpExtTCPPrequeueDropped=0i,TcpExtTCPPrequeued=0i,TcpExtTCPPureAcks=1236441827i,TcpExtTCPRcvCoalesce=225590473i,TcpExtTCPRcvCollapsed=0i,TcpExtTCPRenoFailures=0i,TcpExtTCPRenoRecovery=0i,TcpExtTCPRenoRecoveryFail=0i,TcpExtTCPRenoReorder=0i,TcpExtTCPReqQFullDoCookies=0i,TcpExtTCPReqQFullDrop=0i,TcpExtTCPRetransFail=41i,TcpExtTCPSACKDiscard=0i,TcpExtTCPSACKReneging=0i,TcpExtTCPSACKReorder=4307i,TcpExtTCPSYNChallenge=244i,TcpExtTCPSackFailures=1698i,TcpExtTCPSackMerged=184668i,TcpExtTCPSackRecovery=97369i,TcpExtTCPSackRecoveryFail=381i,TcpExtTCPSackShiftFallback=2697079i,TcpExtTCPSackShifted=760299i,TcpExtTCPSchedulerFailed=0i,TcpExtTCPSlowStartRetrans=9276i,TcpExtTCPSpuriousRTOs=959i,TcpExtTCPSpuriousRtxHostQueues=2973i,TcpExtTCPSynRetrans=200970i,TcpExtTCPTSReorder=15221i,TcpExtTCPTimeWaitOverflow=0i,TcpExtTCPTimeouts=70127i,TcpExtTCPToZeroWindowAdv=4317i,TcpExtTCPWantZeroWindowAdv=2133i,TcpExtTW=24809813i,TcpExtTWKilled=0i,TcpExtTWRecycled=0i 1496460785000000000
|
||||
nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=snmp,sr=database IcmpInAddrMaskReps=0i,IcmpInAddrMasks=90i,IcmpInCsumErrors=0i,IcmpInDestUnreachs=284401i,IcmpInEchoReps=9i,IcmpInEchos=1761912i,IcmpInErrors=407i,IcmpInMsgs=2047767i,IcmpInParmProbs=0i,IcmpInRedirects=0i,IcmpInSrcQuenchs=0i,IcmpInTimeExcds=46i,IcmpInTimestampReps=0i,IcmpInTimestamps=1309i,IcmpMsgInType0=9i,IcmpMsgInType11=46i,IcmpMsgInType13=1309i,IcmpMsgInType17=90i,IcmpMsgInType3=284401i,IcmpMsgInType8=1761912i,IcmpMsgOutType0=1761912i,IcmpMsgOutType14=1248i,IcmpMsgOutType3=108709i,IcmpMsgOutType8=9i,IcmpOutAddrMaskReps=0i,IcmpOutAddrMasks=0i,IcmpOutDestUnreachs=108709i,IcmpOutEchoReps=1761912i,IcmpOutEchos=9i,IcmpOutErrors=0i,IcmpOutMsgs=1871878i,IcmpOutParmProbs=0i,IcmpOutRedirects=0i,IcmpOutSrcQuenchs=0i,IcmpOutTimeExcds=0i,IcmpOutTimestampReps=1248i,IcmpOutTimestamps=0i,IpDefaultTTL=64i,IpForwDatagrams=0i,IpForwarding=2i,IpFragCreates=0i,IpFragFails=0i,IpFragOKs=0i,IpInAddrErrors=0i,IpInDelivers=17658795773i,IpInDiscards=0i,IpInHdrErrors=0i,IpInReceives=17659269339i,IpInUnknownProtos=0i,IpOutDiscards=236976i,IpOutNoRoutes=1009i,IpOutRequests=23466783734i,IpReasmFails=0i,IpReasmOKs=0i,IpReasmReqds=0i,IpReasmTimeout=0i,TcpActiveOpens=23308977i,TcpAttemptFails=3757543i,TcpCurrEstab=280i,TcpEstabResets=184792i,TcpInCsumErrors=0i,TcpInErrs=232i,TcpInSegs=17536573089i,TcpMaxConn=-1i,TcpOutRsts=4051451i,TcpOutSegs=29836254873i,TcpPassiveOpens=176546974i,TcpRetransSegs=878085i,TcpRtoAlgorithm=1i,TcpRtoMax=120000i,TcpRtoMin=200i,UdpInCsumErrors=0i,UdpInDatagrams=24441661i,UdpInErrors=0i,UdpLiteInCsumErrors=0i,UdpLiteInDatagrams=0i,UdpLiteInErrors=0i,UdpLiteNoPorts=0i,UdpLiteOutDatagrams=0i,UdpLiteRcvbufErrors=0i,UdpLiteSndbufErrors=0i,UdpNoPorts=17660i,UdpOutDatagrams=51807896i,UdpRcvbufErrors=0i,UdpSndbufErrors=236922i 1496460785000000000
|
||||
`
|
||||
metrics, err := Parse([]byte(lp))
|
||||
require.NoError(t, err)
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 128)
|
||||
_, err = r.Read(buf)
|
||||
require.NoError(t, err)
|
||||
metrics, err = Parse(buf)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
package all
|
||||
|
||||
import (
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/histogram"
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/minmax"
|
||||
)
|
||||
|
||||
@@ -1,97 +0,0 @@
|
||||
# Histogram Aggregator Plugin
|
||||
|
||||
The histogram aggregator plugin creates histograms containing the counts of
|
||||
field values within a range.
|
||||
|
||||
Values added to a bucket are also added to the larger buckets in the
|
||||
distribution. This creates a [cumulative histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg).
|
||||
|
||||
Like other Telegraf aggregators, the metric is emitted every `period` seconds.
|
||||
Bucket counts however are not reset between periods and will be non-strictly
|
||||
increasing while Telegraf is running.
|
||||
|
||||
#### Design
|
||||
|
||||
Each metric is passed to the aggregator and this aggregator searches
|
||||
histogram buckets for those fields, which have been specified in the
|
||||
config. If buckets are found, the aggregator will increment +1 to the appropriate
|
||||
bucket otherwise it will be added to the `+Inf` bucket. Every `period`
|
||||
seconds this data will be forwarded to the outputs.
|
||||
|
||||
The algorithm of hit counting to buckets was implemented on the base
|
||||
of the algorithm which is implemented in the Prometheus
|
||||
[client](https://github.com/prometheus/client_golang/blob/master/prometheus/histogram.go).
|
||||
|
||||
### Configuration
|
||||
|
||||
```toml
|
||||
# Configuration for aggregate histogram metrics
|
||||
[[aggregators.histogram]]
|
||||
## The period in which to flush the aggregator.
|
||||
period = "30s"
|
||||
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
|
||||
## Example config that aggregates all fields of the metric.
|
||||
# [[aggregators.histogram.config]]
|
||||
# ## The set of buckets.
|
||||
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
|
||||
# ## The name of metric.
|
||||
# measurement_name = "cpu"
|
||||
|
||||
## Example config that aggregates only specific fields of the metric.
|
||||
# [[aggregators.histogram.config]]
|
||||
# ## The set of buckets.
|
||||
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
|
||||
# ## The name of metric.
|
||||
# measurement_name = "diskio"
|
||||
# ## The concrete fields of metric
|
||||
# fields = ["io_time", "read_time", "write_time"]
|
||||
```
|
||||
|
||||
The user is responsible for defining the bounds of the histogram bucket as
|
||||
well as the measurement name and fields to aggregate.
|
||||
|
||||
Each histogram config section must contain a `buckets` and `measurement_name`
|
||||
option. Optionally, if `fields` is set only the fields listed will be
|
||||
aggregated. If `fields` is not set all fields are aggregated.
|
||||
|
||||
The `buckets` option contains a list of floats which specify the bucket
|
||||
boundaries. Each float value defines the inclusive upper bound of the bucket.
|
||||
The `+Inf` bucket is added automatically and does not need to be defined.
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
The postfix `bucket` will be added to each field key.
|
||||
|
||||
- measurement1
|
||||
- field1_bucket
|
||||
- field2_bucket
|
||||
|
||||
### Tags:
|
||||
|
||||
All measurements are given the tag `le`. This tag has the border value of
|
||||
bucket. It means that the metric value is less than or equal to the value of
|
||||
this tag. For example, let assume that we have the metric value 10 and the
|
||||
following buckets: [5, 10, 30, 70, 100]. Then the tag `le` will have the value
|
||||
10, because the metrics value is passed into bucket with right border value
|
||||
`10`.
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
cpu,cpu=cpu1,host=localhost,le=0.0 usage_idle_bucket=0i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=10.0 usage_idle_bucket=0i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=20.0 usage_idle_bucket=1i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=30.0 usage_idle_bucket=2i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=40.0 usage_idle_bucket=2i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=50.0 usage_idle_bucket=2i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=60.0 usage_idle_bucket=2i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=70.0 usage_idle_bucket=2i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=80.0 usage_idle_bucket=2i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=90.0 usage_idle_bucket=2i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=100.0 usage_idle_bucket=2i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=+Inf usage_idle_bucket=2i 1486998330000000000
|
||||
```
|
||||
@@ -1,315 +0,0 @@
|
||||
package histogram
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/aggregators"
|
||||
)
|
||||
|
||||
// bucketTag is the tag, which contains right bucket border
|
||||
const bucketTag = "le"
|
||||
|
||||
// bucketInf is the right bucket border for infinite values
|
||||
const bucketInf = "+Inf"
|
||||
|
||||
// HistogramAggregator is aggregator with histogram configs and particular histograms for defined metrics
|
||||
type HistogramAggregator struct {
|
||||
Configs []config `toml:"config"`
|
||||
|
||||
buckets bucketsByMetrics
|
||||
cache map[uint64]metricHistogramCollection
|
||||
}
|
||||
|
||||
// config is the config, which contains name, field of metric and histogram buckets.
|
||||
type config struct {
|
||||
Metric string `toml:"measurement_name"`
|
||||
Fields []string `toml:"fields"`
|
||||
Buckets buckets `toml:"buckets"`
|
||||
}
|
||||
|
||||
// bucketsByMetrics contains the buckets grouped by metric and field name
|
||||
type bucketsByMetrics map[string]bucketsByFields
|
||||
|
||||
// bucketsByFields contains the buckets grouped by field name
|
||||
type bucketsByFields map[string]buckets
|
||||
|
||||
// buckets contains the right borders buckets
|
||||
type buckets []float64
|
||||
|
||||
// metricHistogramCollection aggregates the histogram data
|
||||
type metricHistogramCollection struct {
|
||||
histogramCollection map[string]counts
|
||||
name string
|
||||
tags map[string]string
|
||||
}
|
||||
|
||||
// counts is the number of hits in the bucket
|
||||
type counts []int64
|
||||
|
||||
// groupedByCountFields contains grouped fields by their count and fields values
|
||||
type groupedByCountFields struct {
|
||||
name string
|
||||
tags map[string]string
|
||||
fieldsWithCount map[string]int64
|
||||
}
|
||||
|
||||
// NewHistogramAggregator creates new histogram aggregator
|
||||
func NewHistogramAggregator() telegraf.Aggregator {
|
||||
h := &HistogramAggregator{}
|
||||
h.buckets = make(bucketsByMetrics)
|
||||
h.resetCache()
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## The period in which to flush the aggregator.
|
||||
period = "30s"
|
||||
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
|
||||
## Example config that aggregates all fields of the metric.
|
||||
# [[aggregators.histogram.config]]
|
||||
# ## The set of buckets.
|
||||
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
|
||||
# ## The name of metric.
|
||||
# measurement_name = "cpu"
|
||||
|
||||
## Example config that aggregates only specific fields of the metric.
|
||||
# [[aggregators.histogram.config]]
|
||||
# ## The set of buckets.
|
||||
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
|
||||
# ## The name of metric.
|
||||
# measurement_name = "diskio"
|
||||
# ## The concrete fields of metric
|
||||
# fields = ["io_time", "read_time", "write_time"]
|
||||
`
|
||||
|
||||
// SampleConfig returns sample of config
|
||||
func (h *HistogramAggregator) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
// Description returns description of aggregator plugin
|
||||
func (h *HistogramAggregator) Description() string {
|
||||
return "Create aggregate histograms."
|
||||
}
|
||||
|
||||
// Add adds new hit to the buckets
|
||||
func (h *HistogramAggregator) Add(in telegraf.Metric) {
|
||||
bucketsByField := make(map[string][]float64)
|
||||
for field := range in.Fields() {
|
||||
buckets := h.getBuckets(in.Name(), field)
|
||||
if buckets != nil {
|
||||
bucketsByField[field] = buckets
|
||||
}
|
||||
}
|
||||
|
||||
if len(bucketsByField) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
id := in.HashID()
|
||||
agr, ok := h.cache[id]
|
||||
if !ok {
|
||||
agr = metricHistogramCollection{
|
||||
name: in.Name(),
|
||||
tags: in.Tags(),
|
||||
histogramCollection: make(map[string]counts),
|
||||
}
|
||||
}
|
||||
|
||||
for field, value := range in.Fields() {
|
||||
if buckets, ok := bucketsByField[field]; ok {
|
||||
if agr.histogramCollection[field] == nil {
|
||||
agr.histogramCollection[field] = make(counts, len(buckets)+1)
|
||||
}
|
||||
|
||||
if value, ok := convert(value); ok {
|
||||
index := sort.SearchFloat64s(buckets, value)
|
||||
agr.histogramCollection[field][index]++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
h.cache[id] = agr
|
||||
}
|
||||
|
||||
// Push returns histogram values for metrics
|
||||
func (h *HistogramAggregator) Push(acc telegraf.Accumulator) {
|
||||
metricsWithGroupedFields := []groupedByCountFields{}
|
||||
|
||||
for _, aggregate := range h.cache {
|
||||
for field, counts := range aggregate.histogramCollection {
|
||||
h.groupFieldsByBuckets(&metricsWithGroupedFields, aggregate.name, field, copyTags(aggregate.tags), counts)
|
||||
}
|
||||
}
|
||||
|
||||
for _, metric := range metricsWithGroupedFields {
|
||||
acc.AddFields(metric.name, makeFieldsWithCount(metric.fieldsWithCount), metric.tags)
|
||||
}
|
||||
}
|
||||
|
||||
// groupFieldsByBuckets groups fields by metric buckets which are represented as tags
|
||||
func (h *HistogramAggregator) groupFieldsByBuckets(
|
||||
metricsWithGroupedFields *[]groupedByCountFields,
|
||||
name string,
|
||||
field string,
|
||||
tags map[string]string,
|
||||
counts []int64,
|
||||
) {
|
||||
count := int64(0)
|
||||
for index, bucket := range h.getBuckets(name, field) {
|
||||
count += counts[index]
|
||||
|
||||
tags[bucketTag] = strconv.FormatFloat(bucket, 'f', -1, 64)
|
||||
h.groupField(metricsWithGroupedFields, name, field, count, copyTags(tags))
|
||||
}
|
||||
|
||||
count += counts[len(counts)-1]
|
||||
tags[bucketTag] = bucketInf
|
||||
|
||||
h.groupField(metricsWithGroupedFields, name, field, count, tags)
|
||||
}
|
||||
|
||||
// groupField groups field by count value
|
||||
func (h *HistogramAggregator) groupField(
|
||||
metricsWithGroupedFields *[]groupedByCountFields,
|
||||
name string,
|
||||
field string,
|
||||
count int64,
|
||||
tags map[string]string,
|
||||
) {
|
||||
for key, metric := range *metricsWithGroupedFields {
|
||||
if name == metric.name && isTagsIdentical(tags, metric.tags) {
|
||||
(*metricsWithGroupedFields)[key].fieldsWithCount[field] = count
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
fieldsWithCount := map[string]int64{
|
||||
field: count,
|
||||
}
|
||||
|
||||
*metricsWithGroupedFields = append(
|
||||
*metricsWithGroupedFields,
|
||||
groupedByCountFields{name: name, tags: tags, fieldsWithCount: fieldsWithCount},
|
||||
)
|
||||
}
|
||||
|
||||
// Reset does nothing, because we need to collect counts for a long time, otherwise if config parameter 'reset' has
|
||||
// small value, we will get a histogram with a small amount of the distribution.
|
||||
func (h *HistogramAggregator) Reset() {}
|
||||
|
||||
// resetCache resets cached counts(hits) in the buckets
|
||||
func (h *HistogramAggregator) resetCache() {
|
||||
h.cache = make(map[uint64]metricHistogramCollection)
|
||||
}
|
||||
|
||||
// getBuckets finds buckets and returns them
|
||||
func (h *HistogramAggregator) getBuckets(metric string, field string) []float64 {
|
||||
if buckets, ok := h.buckets[metric][field]; ok {
|
||||
return buckets
|
||||
}
|
||||
|
||||
for _, config := range h.Configs {
|
||||
if config.Metric == metric {
|
||||
if !isBucketExists(field, config) {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := h.buckets[metric]; !ok {
|
||||
h.buckets[metric] = make(bucketsByFields)
|
||||
}
|
||||
|
||||
h.buckets[metric][field] = sortBuckets(config.Buckets)
|
||||
}
|
||||
}
|
||||
|
||||
return h.buckets[metric][field]
|
||||
}
|
||||
|
||||
// isBucketExists checks if buckets exists for the passed field
|
||||
func isBucketExists(field string, cfg config) bool {
|
||||
if len(cfg.Fields) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, fl := range cfg.Fields {
|
||||
if fl == field {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// sortBuckets sorts the buckets if it is needed
|
||||
func sortBuckets(buckets []float64) []float64 {
|
||||
for i, bucket := range buckets {
|
||||
if i < len(buckets)-1 && bucket >= buckets[i+1] {
|
||||
sort.Float64s(buckets)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return buckets
|
||||
}
|
||||
|
||||
// convert converts interface to concrete type
|
||||
func convert(in interface{}) (float64, bool) {
|
||||
switch v := in.(type) {
|
||||
case float64:
|
||||
return v, true
|
||||
case int64:
|
||||
return float64(v), true
|
||||
default:
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
|
||||
// copyTags copies tags
|
||||
func copyTags(tags map[string]string) map[string]string {
|
||||
copiedTags := map[string]string{}
|
||||
for key, val := range tags {
|
||||
copiedTags[key] = val
|
||||
}
|
||||
|
||||
return copiedTags
|
||||
}
|
||||
|
||||
// isTagsIdentical checks the identity of two list of tags
|
||||
func isTagsIdentical(originalTags, checkedTags map[string]string) bool {
|
||||
if len(originalTags) != len(checkedTags) {
|
||||
return false
|
||||
}
|
||||
|
||||
for tagName, tagValue := range originalTags {
|
||||
if tagValue != checkedTags[tagName] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// makeFieldsWithCount assigns count value to all metric fields
|
||||
func makeFieldsWithCount(fieldsWithCountIn map[string]int64) map[string]interface{} {
|
||||
fieldsWithCountOut := map[string]interface{}{}
|
||||
for field, count := range fieldsWithCountIn {
|
||||
fieldsWithCountOut[field+"_bucket"] = count
|
||||
}
|
||||
|
||||
return fieldsWithCountOut
|
||||
}
|
||||
|
||||
// init initializes histogram aggregator plugin
|
||||
func init() {
|
||||
aggregators.Add("histogram", func() telegraf.Aggregator {
|
||||
return NewHistogramAggregator()
|
||||
})
|
||||
}
|
||||
@@ -1,210 +0,0 @@
|
||||
package histogram
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// NewTestHistogram creates new test histogram aggregation with specified config
|
||||
func NewTestHistogram(cfg []config) telegraf.Aggregator {
|
||||
htm := &HistogramAggregator{Configs: cfg}
|
||||
htm.buckets = make(bucketsByMetrics)
|
||||
htm.resetCache()
|
||||
|
||||
return htm
|
||||
}
|
||||
|
||||
// firstMetric1 is the first test metric
|
||||
var firstMetric1, _ = metric.New(
|
||||
"first_metric_name",
|
||||
map[string]string{"tag_name": "tag_value"},
|
||||
map[string]interface{}{
|
||||
"a": float64(15.3),
|
||||
"b": float64(40),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
|
||||
// firstMetric1 is the first test metric with other value
|
||||
var firstMetric2, _ = metric.New(
|
||||
"first_metric_name",
|
||||
map[string]string{"tag_name": "tag_value"},
|
||||
map[string]interface{}{
|
||||
"a": float64(15.9),
|
||||
"c": float64(40),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
|
||||
// secondMetric is the second metric
|
||||
var secondMetric, _ = metric.New(
|
||||
"second_metric_name",
|
||||
map[string]string{"tag_name": "tag_value"},
|
||||
map[string]interface{}{
|
||||
"a": float64(105),
|
||||
"ignoreme": "string",
|
||||
"andme": true,
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
|
||||
// BenchmarkApply runs benchmarks
|
||||
func BenchmarkApply(b *testing.B) {
|
||||
histogram := NewHistogramAggregator()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
histogram.Add(firstMetric1)
|
||||
histogram.Add(firstMetric2)
|
||||
histogram.Add(secondMetric)
|
||||
}
|
||||
}
|
||||
|
||||
// TestHistogramWithPeriodAndOneField tests metrics for one period and for one field
|
||||
func TestHistogramWithPeriodAndOneField(t *testing.T) {
|
||||
var cfg []config
|
||||
cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
|
||||
histogram := NewTestHistogram(cfg)
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
|
||||
histogram.Add(firstMetric1)
|
||||
histogram.Add(firstMetric2)
|
||||
histogram.Push(acc)
|
||||
|
||||
if len(acc.Metrics) != 6 {
|
||||
assert.Fail(t, "Incorrect number of metrics")
|
||||
}
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "0")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "10")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "20")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "30")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "40")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, bucketInf)
|
||||
}
|
||||
|
||||
// TestHistogramWithPeriodAndAllFields tests two metrics for one period and for all fields
|
||||
func TestHistogramWithPeriodAndAllFields(t *testing.T) {
|
||||
var cfg []config
|
||||
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 15.5, 20.0, 30.0, 40.0}})
|
||||
cfg = append(cfg, config{Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}})
|
||||
histogram := NewTestHistogram(cfg)
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
|
||||
histogram.Add(firstMetric1)
|
||||
histogram.Add(firstMetric2)
|
||||
histogram.Add(secondMetric)
|
||||
histogram.Push(acc)
|
||||
|
||||
if len(acc.Metrics) != 12 {
|
||||
assert.Fail(t, "Incorrect number of metrics")
|
||||
}
|
||||
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "0")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, "15.5")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "20")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "30")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, "40")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, bucketInf)
|
||||
|
||||
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "0")
|
||||
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "4")
|
||||
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "10")
|
||||
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "23")
|
||||
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "30")
|
||||
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(1), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, bucketInf)
|
||||
}
|
||||
|
||||
// TestHistogramDifferentPeriodsAndAllFields tests two metrics getting added with a push/reset in between (simulates
|
||||
// getting added in different periods) for all fields
|
||||
func TestHistogramDifferentPeriodsAndAllFields(t *testing.T) {
|
||||
|
||||
var cfg []config
|
||||
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
|
||||
histogram := NewTestHistogram(cfg)
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
histogram.Add(firstMetric1)
|
||||
histogram.Push(acc)
|
||||
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0)}, "0")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0)}, "10")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0)}, "20")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0)}, "30")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(1)}, "40")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(1)}, bucketInf)
|
||||
|
||||
acc.ClearMetrics()
|
||||
histogram.Add(firstMetric2)
|
||||
histogram.Push(acc)
|
||||
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "0")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "10")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "20")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "30")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, "40")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, bucketInf)
|
||||
}
|
||||
|
||||
// TestWrongBucketsOrder tests the calling panic with incorrect order of buckets
|
||||
func TestWrongBucketsOrder(t *testing.T) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
assert.Equal(
|
||||
t,
|
||||
"histogram buckets must be in increasing order: 90.00 >= 20.00, metrics: first_metric_name, field: a",
|
||||
fmt.Sprint(r),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
var cfg []config
|
||||
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 90.0, 20.0, 30.0, 40.0}})
|
||||
histogram := NewTestHistogram(cfg)
|
||||
histogram.Add(firstMetric2)
|
||||
}
|
||||
|
||||
// assertContainsTaggedField is help functions to test histogram data
|
||||
func assertContainsTaggedField(t *testing.T, acc *testutil.Accumulator, metricName string, fields map[string]interface{}, le string) {
|
||||
acc.Lock()
|
||||
defer acc.Unlock()
|
||||
|
||||
for _, checkedMetric := range acc.Metrics {
|
||||
// check metric name
|
||||
if checkedMetric.Measurement != metricName {
|
||||
continue
|
||||
}
|
||||
|
||||
// check "le" tag
|
||||
if checkedMetric.Tags[bucketTag] != le {
|
||||
continue
|
||||
}
|
||||
|
||||
// check fields
|
||||
isFieldsIdentical := true
|
||||
for field := range fields {
|
||||
if _, ok := checkedMetric.Fields[field]; !ok {
|
||||
isFieldsIdentical = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if !isFieldsIdentical {
|
||||
continue
|
||||
}
|
||||
|
||||
// check fields with their counts
|
||||
if assert.Equal(t, fields, checkedMetric.Fields) {
|
||||
return
|
||||
}
|
||||
|
||||
assert.Fail(t, fmt.Sprintf("incorrect fields %v of metric %s", fields, metricName))
|
||||
}
|
||||
|
||||
assert.Fail(t, fmt.Sprintf("unknown measurement '%s' with tags: %v, fields: %v", metricName, map[string]string{"le": le}, fields))
|
||||
}
|
||||
@@ -45,6 +45,7 @@ SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar A
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ telegraf -input-filter example -test
|
||||
measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455
|
||||
measurement2,tag1=foo,tag2=bar,tag3=baz field3=1i 1453831884664956455
|
||||
```
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -73,9 +73,10 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
|
||||
for _, n := range nodes {
|
||||
tags := map[string]string{
|
||||
"aerospike_host": hostport,
|
||||
"node_name": n.GetName(),
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
fields := map[string]interface{}{
|
||||
"node_name": n.GetName(),
|
||||
}
|
||||
stats, err := as.RequestNodeStats(n)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -85,7 +86,7 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
|
||||
if err == nil {
|
||||
fields[strings.Replace(k, "-", "_", -1)] = val
|
||||
} else {
|
||||
log.Printf("I! skipping aerospike field %v with int64 overflow: %q", k, v)
|
||||
log.Printf("I! skipping aerospike field %v with int64 overflow", k)
|
||||
}
|
||||
}
|
||||
acc.AddFields("aerospike_node", fields, tags, time.Now())
|
||||
@@ -99,10 +100,11 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
|
||||
for _, namespace := range namespaces {
|
||||
nTags := map[string]string{
|
||||
"aerospike_host": hostport,
|
||||
"node_name": n.GetName(),
|
||||
}
|
||||
nTags["namespace"] = namespace
|
||||
nFields := make(map[string]interface{})
|
||||
nFields := map[string]interface{}{
|
||||
"node_name": n.GetName(),
|
||||
}
|
||||
info, err := as.RequestNodeInfo(n, "namespace/"+namespace)
|
||||
if err != nil {
|
||||
continue
|
||||
@@ -117,7 +119,7 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
|
||||
if err == nil {
|
||||
nFields[strings.Replace(parts[0], "-", "_", -1)] = val
|
||||
} else {
|
||||
log.Printf("I! skipping aerospike field %v with int64 overflow: %q", parts[0], parts[1])
|
||||
log.Printf("I! skipping aerospike field %v with int64 overflow", parts[0])
|
||||
}
|
||||
}
|
||||
acc.AddFields("aerospike_namespace", nFields, nTags, time.Now())
|
||||
|
||||
@@ -23,10 +23,8 @@ func TestAerospikeStatistics(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, acc.HasMeasurement("aerospike_node"))
|
||||
assert.True(t, acc.HasTag("aerospike_node", "node_name"))
|
||||
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
|
||||
assert.True(t, acc.HasTag("aerospike_namespace", "node_name"))
|
||||
assert.True(t, acc.HasInt64Field("aerospike_node", "batch_error"))
|
||||
assert.True(t, acc.HasIntField("aerospike_node", "batch_error"))
|
||||
}
|
||||
|
||||
func TestAerospikeStatisticsPartialErr(t *testing.T) {
|
||||
@@ -47,7 +45,7 @@ func TestAerospikeStatisticsPartialErr(t *testing.T) {
|
||||
|
||||
assert.True(t, acc.HasMeasurement("aerospike_node"))
|
||||
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
|
||||
assert.True(t, acc.HasInt64Field("aerospike_node", "batch_error"))
|
||||
assert.True(t, acc.HasIntField("aerospike_node", "batch_error"))
|
||||
}
|
||||
|
||||
func TestAerospikeParseValue(t *testing.T) {
|
||||
|
||||
@@ -21,9 +21,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/dovecot"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/fail2ban"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/filestat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/fluentd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/graylog"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/hddtemp"
|
||||
@@ -37,7 +35,6 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer_legacy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kapacitor"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kubernetes"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
|
||||
@@ -46,7 +43,6 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mesos"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/minecraft"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mongodb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mysql"
|
||||
@@ -57,7 +53,6 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nstat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/openldap"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/phpfpm"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ping"
|
||||
@@ -72,7 +67,6 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/redis"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/riak"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/salesforce"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy"
|
||||
@@ -83,15 +77,12 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/system"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tail"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tomcat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/varnish"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/webhooks"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/win_services"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/zfs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/zipkin"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/zookeeper"
|
||||
)
|
||||
|
||||
@@ -39,9 +39,9 @@ The following defaults are known to work with RabbitMQ:
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Data format to consume.
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
@@ -85,10 +85,10 @@ func (a *AMQPConsumer) SampleConfig() string {
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Data format to consume.
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
`
|
||||
}
|
||||
|
||||
@@ -1,84 +1,55 @@
|
||||
# Apache Input Plugin
|
||||
# Telegraf plugin: Apache
|
||||
|
||||
The Apache plugin collects server performance information using the [`mod_status`](https://httpd.apache.org/docs/2.4/mod/mod_status.html) module of the [Apache HTTP Server](https://httpd.apache.org/).
|
||||
#### Plugin arguments:
|
||||
- **urls** []string: List of apache-status URLs to collect from. Default is "http://localhost/server-status?auto".
|
||||
- **username** string: Username for HTTP basic authentication
|
||||
- **password** string: Password for HTTP basic authentication
|
||||
- **timeout** duration: time that the HTTP connection will remain waiting for response. Default 4 seconds ("4s")
|
||||
|
||||
Typically, the `mod_status` module is configured to expose a page at the `/server-status?auto` location of the Apache server. The [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) option must be enabled in order to collect all available fields. For information about how to configure your server reference the [module documenation](https://httpd.apache.org/docs/2.4/mod/mod_status.html#enable).
|
||||
##### Optional SSL Config
|
||||
|
||||
### Configuration:
|
||||
- **ssl_ca** string: the full path for the SSL CA certicate
|
||||
- **ssl_cert** string: the full path for the SSL certificate
|
||||
- **ssl_key** string: the full path for the key file
|
||||
- **insecure_skip_verify** bool: if true HTTP client will skip all SSL verifications related to peer and host. Default to false
|
||||
|
||||
```toml
|
||||
# Read Apache status information (mod_status)
|
||||
[[inputs.apache]]
|
||||
## An array of URLs to gather from, must be directed at the machine
|
||||
## readable version of the mod_status page including the auto query string.
|
||||
## Default is "http://localhost/server-status?auto".
|
||||
urls = ["http://localhost/server-status?auto"]
|
||||
#### Description
|
||||
|
||||
## Credentials for basic HTTP authentication.
|
||||
# username = "myuser"
|
||||
# password = "mypassword"
|
||||
The Apache plugin collects from the /server-status?auto URL. See
|
||||
[apache.org/server-status?auto](http://www.apache.org/server-status?auto) for an
|
||||
example. And
|
||||
[here](http://httpd.apache.org/docs/2.2/mod/mod_status.html) for the apache
|
||||
mod_status documentation.
|
||||
|
||||
## Maximum time to receive response.
|
||||
# response_timeout = "5s"
|
||||
# Measurements:
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
```
|
||||
Meta:
|
||||
- tags: `port=<port>`, `server=url`
|
||||
|
||||
### Measurements & Fields:
|
||||
- apache_TotalAccesses
|
||||
- apache_TotalkBytes
|
||||
- apache_CPULoad
|
||||
- apache_Uptime
|
||||
- apache_ReqPerSec
|
||||
- apache_BytesPerSec
|
||||
- apache_BytesPerReq
|
||||
- apache_BusyWorkers
|
||||
- apache_IdleWorkers
|
||||
- apache_ConnsTotal
|
||||
- apache_ConnsAsyncWriting
|
||||
- apache_ConnsAsyncKeepAlive
|
||||
- apache_ConnsAsyncClosing
|
||||
|
||||
- apache
|
||||
- BusyWorkers (float)
|
||||
- BytesPerReq (float)
|
||||
- BytesPerSec (float)
|
||||
- ConnsAsyncClosing (float)
|
||||
- ConnsAsyncKeepAlive (float)
|
||||
- ConnsAsyncWriting (float)
|
||||
- ConnsTotal (float)
|
||||
- CPUChildrenSystem (float)
|
||||
- CPUChildrenUser (float)
|
||||
- CPULoad (float)
|
||||
- CPUSystem (float)
|
||||
- CPUUser (float)
|
||||
- IdleWorkers (float)
|
||||
- Load1 (float)
|
||||
- Load5 (float)
|
||||
- Load15 (float)
|
||||
- ParentServerConfigGeneration (float)
|
||||
- ParentServerMPMGeneration (float)
|
||||
- ReqPerSec (float)
|
||||
- ServerUptimeSeconds (float)
|
||||
- TotalAccesses (float)
|
||||
- TotalkBytes (float)
|
||||
- Uptime (float)
|
||||
### Scoreboard measurements
|
||||
|
||||
The following fields are collected from the `Scoreboard`, and represent the number of requests in the given state:
|
||||
|
||||
- apache
|
||||
- scboard_closing (float)
|
||||
- scboard_dnslookup (float)
|
||||
- scboard_finishing (float)
|
||||
- scboard_idle_cleanup (float)
|
||||
- scboard_keepalive (float)
|
||||
- scboard_logging (float)
|
||||
- scboard_open (float)
|
||||
- scboard_reading (float)
|
||||
- scboard_sending (float)
|
||||
- scboard_starting (float)
|
||||
- scboard_waiting (float)
|
||||
|
||||
### Tags:
|
||||
|
||||
- All measurements have the following tags:
|
||||
- port
|
||||
- server
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
apache,port=80,server=debian-stretch-apache BusyWorkers=1,BytesPerReq=0,BytesPerSec=0,CPUChildrenSystem=0,CPUChildrenUser=0,CPULoad=0.00995025,CPUSystem=0.01,CPUUser=0.01,ConnsAsyncClosing=0,ConnsAsyncKeepAlive=0,ConnsAsyncWriting=0,ConnsTotal=0,IdleWorkers=49,Load1=0.01,Load15=0,Load5=0,ParentServerConfigGeneration=3,ParentServerMPMGeneration=2,ReqPerSec=0.00497512,ServerUptimeSeconds=201,TotalAccesses=1,TotalkBytes=0,Uptime=201,scboard_closing=0,scboard_dnslookup=0,scboard_finishing=0,scboard_idle_cleanup=0,scboard_keepalive=0,scboard_logging=0,scboard_open=100,scboard_reading=0,scboard_sending=1,scboard_starting=0,scboard_waiting=49 1502489900000000000
|
||||
```
|
||||
- apache_scboard_waiting
|
||||
- apache_scboard_starting
|
||||
- apache_scboard_reading
|
||||
- apache_scboard_sending
|
||||
- apache_scboard_keepalive
|
||||
- apache_scboard_dnslookup
|
||||
- apache_scboard_closing
|
||||
- apache_scboard_logging
|
||||
- apache_scboard_finishing
|
||||
- apache_scboard_idle_cleanup
|
||||
- apache_scboard_open
|
||||
|
||||
@@ -29,22 +29,18 @@ type Apache struct {
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## An array of URLs to gather from, must be directed at the machine
|
||||
## readable version of the mod_status page including the auto query string.
|
||||
## An array of Apache status URI to gather stats.
|
||||
## Default is "http://localhost/server-status?auto".
|
||||
urls = ["http://localhost/server-status?auto"]
|
||||
## user credentials for basic HTTP authentication
|
||||
username = "myuser"
|
||||
password = "mypassword"
|
||||
|
||||
## Credentials for basic HTTP authentication.
|
||||
# username = "myuser"
|
||||
# password = "mypassword"
|
||||
|
||||
## Maximum time to receive response.
|
||||
# response_timeout = "5s"
|
||||
## Timeout to the complete conection and reponse time in seconds
|
||||
response_timeout = "25s" ## default to 5 seconds
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
@@ -70,14 +66,6 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
n.ResponseTimeout.Duration = time.Second * 5
|
||||
}
|
||||
|
||||
if n.client == nil {
|
||||
client, err := n.createHttpClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.client = client
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(n.Urls))
|
||||
for _, u := range n.Urls {
|
||||
@@ -97,24 +85,31 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Apache) createHttpClient() (*http.Client, error) {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
n.SSLCert, n.SSLKey, n.SSLCA, n.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
|
||||
var tr *http.Transport
|
||||
|
||||
if addr.Scheme == "https" {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
n.SSLCert, n.SSLKey, n.SSLCA, n.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
TLSClientConfig: tlsCfg,
|
||||
}
|
||||
} else {
|
||||
tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: tlsCfg,
|
||||
},
|
||||
Timeout: n.ResponseTimeout.Duration,
|
||||
Transport: tr,
|
||||
Timeout: n.ResponseTimeout.Duration,
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
req, err := http.NewRequest("GET", addr.String(), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error on new request to %s : %s\n", addr.String(), err)
|
||||
@@ -124,7 +119,7 @@ func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
req.SetBasicAuth(n.Username, n.Password)
|
||||
}
|
||||
|
||||
resp, err := n.client.Do(req)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error on request to %s : %s\n", addr.String(), err)
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ Using this configuration:
|
||||
When run with:
|
||||
|
||||
```
|
||||
./telegraf --config telegraf.conf --input-filter bcache --test
|
||||
./telegraf -config telegraf.conf -input-filter bcache -test
|
||||
```
|
||||
|
||||
It produces:
|
||||
|
||||
@@ -296,7 +296,7 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
|
||||
continue
|
||||
}
|
||||
if out["status"] != 200.0 {
|
||||
acc.AddError(fmt.Errorf("URL returned with status %v - %s\n", out["status"], requestUrl))
|
||||
acc.AddError(fmt.Errorf("URL returned with status %v\n", out["status"]))
|
||||
continue
|
||||
}
|
||||
m.addTagsFields(out)
|
||||
|
||||
@@ -200,7 +200,7 @@ All measurements will have the following tags:
|
||||
*Admin Socket Stats*
|
||||
|
||||
<pre>
|
||||
telegraf --config /etc/telegraf/telegraf.conf --config-directory /etc/telegraf/telegraf.d --input-filter ceph --test
|
||||
telegraf -test -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d -input-filter ceph
|
||||
* Plugin: ceph, Collection 1
|
||||
> ceph,collection=paxos, id=node-2,role=openstack,type=mon accept_timeout=0,begin=14931264,begin_bytes.avgcount=14931264,begin_bytes.sum=180309683362,begin_keys.avgcount=0,begin_keys.sum=0,begin_latency.avgcount=14931264,begin_latency.sum=9293.29589,collect=1,collect_bytes.avgcount=1,collect_bytes.sum=24,collect_keys.avgcount=1,collect_keys.sum=1,collect_latency.avgcount=1,collect_latency.sum=0.00028,collect_timeout=0,collect_uncommitted=0,commit=14931264,commit_bytes.avgcount=0,commit_bytes.sum=0,commit_keys.avgcount=0,commit_keys.sum=0,commit_latency.avgcount=0,commit_latency.sum=0,lease_ack_timeout=0,lease_timeout=0,new_pn=0,new_pn_latency.avgcount=0,new_pn_latency.sum=0,refresh=14931264,refresh_latency.avgcount=14931264,refresh_latency.sum=8706.98498,restart=4,share_state=0,share_state_bytes.avgcount=0,share_state_bytes.sum=0,share_state_keys.avgcount=0,share_state_keys.sum=0,start_leader=0,start_peon=1,store_state=14931264,store_state_bytes.avgcount=14931264,store_state_bytes.sum=353119959211,store_state_keys.avgcount=14931264,store_state_keys.sum=289807523,store_state_latency.avgcount=14931264,store_state_latency.sum=10952.835724 1462821234814535148
|
||||
> ceph,collection=throttle-mon_client_bytes,id=node-2,type=mon get=1413017,get_or_fail_fail=0,get_or_fail_success=0,get_sum=71211705,max=104857600,put=1413013,put_sum=71211459,take=0,take_sum=0,val=246,wait.avgcount=0,wait.sum=0 1462821234814737219
|
||||
|
||||
@@ -63,7 +63,6 @@ Delete second or Not synchronised.
|
||||
### Measurements & Fields:
|
||||
|
||||
- chrony
|
||||
- system_time (float, seconds)
|
||||
- last_offset (float, seconds)
|
||||
- rms_offset (float, seconds)
|
||||
- frequency (float, ppm)
|
||||
@@ -83,9 +82,9 @@ Delete second or Not synchronised.
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ telegraf --config telegraf.conf --input-filter chrony --test
|
||||
$ telegraf -config telegraf.conf -input-filter chrony -test
|
||||
* Plugin: chrony, Collection 1
|
||||
> chrony,leap_status=normal,reference_id=192.168.1.1,stratum=3 frequency=-35.657,system_time=0.000027073,last_offset=-0.000013616,residual_freq=-0,rms_offset=0.000027073,root_delay=0.000644,root_dispersion=0.003444,skew=0.001,update_interval=1031.2 1463750789687639161
|
||||
> chrony,leap_status=normal,reference_id=192.168.1.1,stratum=3 frequency=-35.657,last_offset=-0.000013616,residual_freq=-0,rms_offset=0.000027073,root_delay=0.000644,root_dispersion=0.003444,skew=0.001,update_interval=1031.2 1463750789687639161
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build linux
|
||||
|
||||
package chrony
|
||||
|
||||
import (
|
||||
@@ -90,7 +92,7 @@ func processChronycOutput(out string) (map[string]interface{}, map[string]string
|
||||
}
|
||||
name := strings.ToLower(strings.Replace(strings.TrimSpace(stats[0]), " ", "_", -1))
|
||||
// ignore reference time
|
||||
if strings.Contains(name, "ref_time") {
|
||||
if strings.Contains(name, "time") {
|
||||
continue
|
||||
}
|
||||
valueFields := strings.Fields(stats[1])
|
||||
|
||||
3
plugins/inputs/chrony/chrony_notlinux.go
Normal file
3
plugins/inputs/chrony/chrony_notlinux.go
Normal file
@@ -0,0 +1,3 @@
|
||||
// +build !linux
|
||||
|
||||
package chrony
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build linux
|
||||
|
||||
package chrony
|
||||
|
||||
import (
|
||||
@@ -29,7 +31,6 @@ func TestGather(t *testing.T) {
|
||||
"stratum": "3",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"system_time": 0.000020390,
|
||||
"last_offset": 0.000012651,
|
||||
"rms_offset": 0.000025577,
|
||||
"frequency": -16.001,
|
||||
|
||||
@@ -9,8 +9,8 @@ API endpoint. In the following order the plugin will attempt to authenticate.
|
||||
1. Assumed credentials via STS if `role_arn` attribute is specified (source credentials are evaluated from subsequent rules)
|
||||
2. Explicit credentials from `access_key`, `secret_key`, and `token` attributes
|
||||
3. Shared profile from `profile` attribute
|
||||
4. [Environment Variables](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#environment-variables)
|
||||
5. [Shared Credentials](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#shared-credentials-file)
|
||||
4. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables)
|
||||
5. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file)
|
||||
6. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html)
|
||||
|
||||
### Configuration:
|
||||
@@ -20,24 +20,9 @@ API endpoint. In the following order the plugin will attempt to authenticate.
|
||||
## Amazon Region (required)
|
||||
region = "us-east-1"
|
||||
|
||||
## Amazon Credentials
|
||||
## Credentials are loaded in the following order
|
||||
## 1) Assumed credentials via STS if role_arn is specified
|
||||
## 2) explicit credentials from 'access_key' and 'secret_key'
|
||||
## 3) shared profile from 'profile'
|
||||
## 4) environment variables
|
||||
## 5) shared credentials file
|
||||
## 6) EC2 Instance Profile
|
||||
#access_key = ""
|
||||
#secret_key = ""
|
||||
#token = ""
|
||||
#role_arn = ""
|
||||
#profile = ""
|
||||
#shared_credential_file = ""
|
||||
|
||||
# The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
|
||||
# metrics are made available to the 1 minute period. Some are collected at
|
||||
# 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
|
||||
# 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
|
||||
# Note that if a period is configured that is smaller than the minimum for a
|
||||
# particular metric, that metric will not be returned by the Cloudwatch API
|
||||
# and will not be collected by Telegraf.
|
||||
@@ -72,6 +57,10 @@ API endpoint. In the following order the plugin will attempt to authenticate.
|
||||
[[inputs.cloudwatch.metrics.dimensions]]
|
||||
name = "LoadBalancerName"
|
||||
value = "p-example"
|
||||
|
||||
[[inputs.cloudwatch.metrics.dimensions]]
|
||||
name = "AvailabilityZone"
|
||||
value = "*"
|
||||
```
|
||||
#### Requirements and Terminology
|
||||
|
||||
@@ -145,6 +134,6 @@ Tag Dimension names are represented in [snake case](https://en.wikipedia.org/wik
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ ./telegraf --config telegraf.conf --input-filter cloudwatch --test
|
||||
$ ./telegraf -config telegraf.conf -input-filter cloudwatch -test
|
||||
> cloudwatch_aws_elb,load_balancer_name=p-example,region=us-east-1,unit=seconds latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875,latency_sample_count=4029,latency_sum=19.382705211639404 1459542420000000000
|
||||
```
|
||||
|
||||
@@ -81,7 +81,7 @@ func (c *CloudWatch) SampleConfig() string {
|
||||
|
||||
# The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
|
||||
# metrics are made available to the 1 minute period. Some are collected at
|
||||
# 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
|
||||
# 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
|
||||
# Note that if a period is configured that is smaller than the minimum for a
|
||||
# particular metric, that metric will not be returned by the Cloudwatch API
|
||||
# and will not be collected by Telegraf.
|
||||
|
||||
@@ -51,6 +51,6 @@ This input does not use tags.
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ ./telegraf --config telegraf.conf --input-filter conntrack --test
|
||||
$ ./telegraf -config telegraf.conf -input-filter conntrack -test
|
||||
conntrack,host=myhost ip_conntrack_count=2,ip_conntrack_max=262144 1461620427667995735
|
||||
```
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Telegraf Input Plugin: Consul
|
||||
|
||||
This plugin will collect statistics about all health checks registered in the Consul. It uses [Consul API](https://www.consul.io/docs/agent/http/health.html#health_state)
|
||||
This plugin will collect statistics about all helath checks registered in the Consul. It uses [Consul API](https://www.consul.io/docs/agent/http/health.html#health_state)
|
||||
to query the data. It will not report the [telemetry](https://www.consul.io/docs/agent/telemetry.html) but Consul can report those stats already using StatsD protocol if needed.
|
||||
|
||||
## Configuration:
|
||||
@@ -46,7 +46,7 @@ the health check at this sample.
|
||||
## Example output
|
||||
|
||||
```
|
||||
$ telegraf --config ./telegraf.conf --input-filter consul --test
|
||||
$ telegraf --config ./telegraf.conf -input-filter consul -test
|
||||
* Plugin: consul, Collection 1
|
||||
> consul_health_checks,host=wolfpit,node=consul-server-node,check_id="serfHealth" check_name="Serf Health Status",service_id="",status="passing",passing=1i,critical=0i,warning=0i 1464698464486439902
|
||||
> consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com,check_id="service:www-example-com.test01" check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical",passing=0i,critical=1i,warning=0i 1464698464486519036
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
### couchbase_node
|
||||
|
||||
Tags:
|
||||
- cluster: sanitized string from `servers` configuration field e.g.: `http://user:password@couchbase-0.example.com:8091/endpoint` -> `http://couchbase-0.example.com:8091/endpoint`
|
||||
- cluster: whatever you called it in `servers` in the configuration, e.g.: `http://couchbase-0.example.com/`
|
||||
- hostname: Couchbase's name for the node and port, e.g., `172.16.10.187:8091`
|
||||
|
||||
Fields:
|
||||
@@ -48,7 +48,7 @@ Fields:
|
||||
## Example output
|
||||
|
||||
```
|
||||
$ telegraf --config telegraf.conf --input-filter couchbase --test
|
||||
$ telegraf -config telegraf.conf -input-filter couchbase -test
|
||||
* Plugin: couchbase, Collection 1
|
||||
> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.10.187:8091 memory_free=22927384576,memory_total=64424656896 1458381183695864929
|
||||
> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.10.65:8091 memory_free=23520161792,memory_total=64424656896 1458381183695972112
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
package couchbase
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"sync"
|
||||
|
||||
couchbase "github.com/couchbase/go-couchbase"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Couchbase struct {
|
||||
@@ -26,8 +24,6 @@ var sampleConfig = `
|
||||
servers = ["http://localhost:8091"]
|
||||
`
|
||||
|
||||
var regexpURI = regexp.MustCompile(`(\S+://)?(\S+\:\S+@)`)
|
||||
|
||||
func (r *Couchbase) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
@@ -75,17 +71,15 @@ func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator, pool *co
|
||||
}
|
||||
pool = &p
|
||||
}
|
||||
|
||||
for i := 0; i < len(pool.Nodes); i++ {
|
||||
node := pool.Nodes[i]
|
||||
tags := map[string]string{"cluster": regexpURI.ReplaceAllString(addr, "${1}"), "hostname": node.Hostname}
|
||||
tags := map[string]string{"cluster": addr, "hostname": node.Hostname}
|
||||
fields := make(map[string]interface{})
|
||||
fields["memory_free"] = node.MemoryFree
|
||||
fields["memory_total"] = node.MemoryTotal
|
||||
acc.AddFields("couchbase_node", fields, tags)
|
||||
}
|
||||
|
||||
for bucketName := range pool.BucketMap {
|
||||
for bucketName, _ := range pool.BucketMap {
|
||||
tags := map[string]string{"cluster": addr, "bucket": bucketName}
|
||||
bs := pool.BucketMap[bucketName].BasicStats
|
||||
fields := make(map[string]interface{})
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -63,7 +63,7 @@ httpd statistics:
|
||||
### Example output:
|
||||
|
||||
```
|
||||
➜ telegraf git:(master) ✗ ./telegraf --config ./config.conf --input-filter couchdb --test
|
||||
➜ telegraf git:(master) ✗ ./telegraf -config ./config.conf -input-filter couchdb -test
|
||||
* Plugin: couchdb,
|
||||
Collection 1
|
||||
> couchdb,server=http://localhost:5984/_stats couchdb_auth_cache_hits_current=0,
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
// +build linux
|
||||
|
||||
package dmcache
|
||||
|
||||
import (
|
||||
@@ -8,23 +8,19 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi
|
||||
# Sample Config:
|
||||
[[inputs.dns_query]]
|
||||
## servers to query
|
||||
servers = ["8.8.8.8"]
|
||||
servers = ["8.8.8.8"] # required
|
||||
|
||||
## Network is the network protocol name.
|
||||
# network = "udp"
|
||||
## Domains or subdomains to query. "." (root) is default
|
||||
domains = ["."] # optional
|
||||
|
||||
## Domains or subdomains to query.
|
||||
# domains = ["."]
|
||||
## Query record type. Posible values: A, AAAA, ANY, CNAME, MX, NS, PTR, SOA, SPF, SRV, TXT. Default is "NS"
|
||||
record_type = "A" # optional
|
||||
|
||||
## Query record type.
|
||||
## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
|
||||
# record_type = "A"
|
||||
## Dns server port. 53 is default
|
||||
port = 53 # optional
|
||||
|
||||
## Dns server port.
|
||||
# port = 53
|
||||
|
||||
## Query timeout in seconds.
|
||||
# timeout = 2
|
||||
## Query timeout in seconds. Default is 2 seconds
|
||||
timeout = 2 # optional
|
||||
```
|
||||
|
||||
For querying more than one record type make:
|
||||
@@ -50,6 +46,6 @@ For querying more than one record type make:
|
||||
### Example output:
|
||||
|
||||
```
|
||||
telegraf --input-filter dns_query --test
|
||||
./telegraf -config telegraf.conf -test -input-filter dns_query -test
|
||||
> dns_query,domain=mjasion.pl,record_type=A,server=8.8.8.8 query_time_ms=67.189842 1456082743585760680
|
||||
```
|
||||
|
||||
@@ -3,12 +3,11 @@ package dns_query
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/miekg/dns"
|
||||
"net"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
@@ -17,9 +16,6 @@ type DnsQuery struct {
|
||||
// Domains or subdomains to query
|
||||
Domains []string
|
||||
|
||||
// Network protocl name
|
||||
Network string
|
||||
|
||||
// Server to query
|
||||
Servers []string
|
||||
|
||||
@@ -35,23 +31,20 @@ type DnsQuery struct {
|
||||
|
||||
var sampleConfig = `
|
||||
## servers to query
|
||||
servers = ["8.8.8.8"]
|
||||
servers = ["8.8.8.8"] # required
|
||||
|
||||
## Network is the network protocol name.
|
||||
# network = "udp"
|
||||
## Domains or subdomains to query. "."(root) is default
|
||||
domains = ["."] # optional
|
||||
|
||||
## Domains or subdomains to query.
|
||||
# domains = ["."]
|
||||
|
||||
## Query record type.
|
||||
## Query record type. Default is "A"
|
||||
## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
|
||||
# record_type = "A"
|
||||
record_type = "A" # optional
|
||||
|
||||
## Dns server port.
|
||||
# port = 53
|
||||
## Dns server port. 53 is default
|
||||
port = 53 # optional
|
||||
|
||||
## Query timeout in seconds.
|
||||
# timeout = 2
|
||||
## Query timeout in seconds. Default is 2 seconds
|
||||
timeout = 2 # optional
|
||||
`
|
||||
|
||||
func (d *DnsQuery) SampleConfig() string {
|
||||
@@ -83,10 +76,6 @@ func (d *DnsQuery) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
func (d *DnsQuery) setDefaultValues() {
|
||||
if d.Network == "" {
|
||||
d.Network = "udp"
|
||||
}
|
||||
|
||||
if len(d.RecordType) == 0 {
|
||||
d.RecordType = "NS"
|
||||
}
|
||||
@@ -110,7 +99,6 @@ func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, error
|
||||
|
||||
c := new(dns.Client)
|
||||
c.ReadTimeout = time.Duration(d.Timeout) * time.Second
|
||||
c.Net = d.Network
|
||||
|
||||
m := new(dns.Msg)
|
||||
recordType, err := d.parseRecordType()
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
# Docker Input Plugin
|
||||
|
||||
The docker plugin uses the Docker Engine API to gather metrics on running
|
||||
docker containers.
|
||||
The docker plugin uses the docker remote API to gather metrics on running
|
||||
docker containers. You can read Docker's documentation for their remote API
|
||||
[here](https://docs.docker.com/engine/reference/api/docker_remote_api_v1.20/#get-container-stats-based-on-resource-usage)
|
||||
|
||||
The docker plugin uses the [Official Docker Client](https://github.com/moby/moby/tree/master/client)
|
||||
to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/).
|
||||
[Library Documentation](https://godoc.org/github.com/moby/moby/client)
|
||||
The docker plugin uses the excellent
|
||||
[docker engine-api](https://github.com/docker/engine-api) library to
|
||||
gather stats. Documentation for the library can be found
|
||||
[here](https://godoc.org/github.com/docker/engine-api) and documentation
|
||||
for the stat structure can be found
|
||||
[here](https://godoc.org/github.com/docker/engine-api/types#Stats)
|
||||
|
||||
### Configuration:
|
||||
|
||||
@@ -16,47 +20,24 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/)
|
||||
## To use TCP, set endpoint = "tcp://[ip]:[port]"
|
||||
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
|
||||
## Only collect metrics for these containers. Values will be appended to
|
||||
## container_name_include.
|
||||
## Deprecated (1.4.0), use container_name_include
|
||||
## Only collect metrics for these containers, collect all if empty
|
||||
container_names = []
|
||||
|
||||
## Containers to include and exclude. Collect all if empty. Globs accepted.
|
||||
container_name_include = []
|
||||
container_name_exclude = []
|
||||
|
||||
## Timeout for docker list, info, and stats commands
|
||||
timeout = "5s"
|
||||
|
||||
## Whether to report for each container per-device blkio (8:0, 8:1...) and
|
||||
## network (eth0, eth1, ...) stats or not
|
||||
perdevice = true
|
||||
|
||||
## Whether to report for each container total blkio and network stats or not
|
||||
total = false
|
||||
|
||||
|
||||
## docker labels to include and exclude as tags. Globs accepted.
|
||||
## Note that an empty array for both will include all labels as tags
|
||||
docker_label_include = []
|
||||
docker_label_exclude = []
|
||||
|
||||
## Which environment variables should we use as a tag
|
||||
tag_env = ["JAVA_HOME", "HEAP_SIZE"]
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
```
|
||||
|
||||
#### Environment Configuration
|
||||
|
||||
When using the `"ENV"` endpoint, the connection is configured using the
|
||||
[cli Docker environment variables](https://godoc.org/github.com/moby/moby/client#NewEnvClient).
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
Every effort was made to preserve the names based on the JSON response from the
|
||||
@@ -186,7 +167,7 @@ based on the availability of per-cpu stats on your system.
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
% ./telegraf --config ~/ws/telegraf.conf --input-filter docker --test
|
||||
% ./telegraf -config ~/ws/telegraf.conf -input-filter docker -test
|
||||
* Plugin: docker, Collection 1
|
||||
> docker n_cpus=8i 1456926671065383978
|
||||
> docker n_used_file_descriptors=15i 1456926671065383978
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
docker "github.com/docker/docker/client"
|
||||
"github.com/docker/go-connections/sockets"
|
||||
)
|
||||
|
||||
var (
|
||||
version string
|
||||
defaultHeaders = map[string]string{"User-Agent": "engine-api-cli-1.0"}
|
||||
)
|
||||
|
||||
type Client interface {
|
||||
Info(ctx context.Context) (types.Info, error)
|
||||
ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
|
||||
ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error)
|
||||
ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error)
|
||||
}
|
||||
|
||||
func NewEnvClient() (Client, error) {
|
||||
client, err := docker.NewEnvClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SocketClient{client}, nil
|
||||
}
|
||||
|
||||
func NewClient(host string, tlsConfig *tls.Config) (Client, error) {
|
||||
proto, addr, _, err := docker.ParseHost(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
transport := &http.Transport{
|
||||
TLSClientConfig: tlsConfig,
|
||||
}
|
||||
sockets.ConfigureTransport(transport, proto, addr)
|
||||
httpClient := &http.Client{Transport: transport}
|
||||
|
||||
client, err := docker.NewClient(host, version, httpClient, defaultHeaders)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SocketClient{client}, nil
|
||||
}
|
||||
|
||||
type SocketClient struct {
|
||||
client *docker.Client
|
||||
}
|
||||
|
||||
func (c *SocketClient) Info(ctx context.Context) (types.Info, error) {
|
||||
return c.client.Info(ctx)
|
||||
}
|
||||
func (c *SocketClient) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
|
||||
return c.client.ContainerList(ctx, options)
|
||||
}
|
||||
func (c *SocketClient) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) {
|
||||
return c.client.ContainerStats(ctx, containerID, stream)
|
||||
}
|
||||
func (c *SocketClient) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
|
||||
return c.client.ContainerInspect(ctx, containerID)
|
||||
}
|
||||
@@ -2,11 +2,9 @@ package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -14,6 +12,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
@@ -25,40 +24,59 @@ type DockerLabelFilter struct {
|
||||
labelExclude filter.Filter
|
||||
}
|
||||
|
||||
type DockerContainerFilter struct {
|
||||
containerInclude filter.Filter
|
||||
containerExclude filter.Filter
|
||||
}
|
||||
|
||||
// Docker object
|
||||
type Docker struct {
|
||||
Endpoint string
|
||||
ContainerNames []string
|
||||
|
||||
Timeout internal.Duration
|
||||
PerDevice bool `toml:"perdevice"`
|
||||
Total bool `toml:"total"`
|
||||
TagEnvironment []string `toml:"tag_env"`
|
||||
LabelInclude []string `toml:"docker_label_include"`
|
||||
LabelExclude []string `toml:"docker_label_exclude"`
|
||||
LabelFilter DockerLabelFilter
|
||||
|
||||
ContainerInclude []string `toml:"container_name_include"`
|
||||
ContainerExclude []string `toml:"container_name_exclude"`
|
||||
ContainerFilter DockerContainerFilter
|
||||
LabelFilter DockerLabelFilter
|
||||
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
InsecureSkipVerify bool
|
||||
client *client.Client
|
||||
engine_host string
|
||||
|
||||
newEnvClient func() (Client, error)
|
||||
newClient func(string, *tls.Config) (Client, error)
|
||||
testing bool
|
||||
labelFiltersCreated bool
|
||||
}
|
||||
|
||||
client Client
|
||||
httpClient *http.Client
|
||||
engine_host string
|
||||
filtersCreated bool
|
||||
// infoWrapper wraps client.Client.List for testing.
|
||||
func infoWrapper(c *client.Client, ctx context.Context) (types.Info, error) {
|
||||
if c != nil {
|
||||
return c.Info(ctx)
|
||||
}
|
||||
fc := FakeDockerClient{}
|
||||
return fc.Info(ctx)
|
||||
}
|
||||
|
||||
// listWrapper wraps client.Client.ContainerList for testing.
|
||||
func listWrapper(
|
||||
c *client.Client,
|
||||
ctx context.Context,
|
||||
options types.ContainerListOptions,
|
||||
) ([]types.Container, error) {
|
||||
if c != nil {
|
||||
return c.ContainerList(ctx, options)
|
||||
}
|
||||
fc := FakeDockerClient{}
|
||||
return fc.ContainerList(ctx, options)
|
||||
}
|
||||
|
||||
// statsWrapper wraps client.Client.ContainerStats for testing.
|
||||
func statsWrapper(
|
||||
c *client.Client,
|
||||
ctx context.Context,
|
||||
containerID string,
|
||||
stream bool,
|
||||
) (types.ContainerStats, error) {
|
||||
if c != nil {
|
||||
return c.ContainerStats(ctx, containerID, stream)
|
||||
}
|
||||
fc := FakeDockerClient{}
|
||||
return fc.ContainerStats(ctx, containerID, stream)
|
||||
}
|
||||
|
||||
// KB, MB, GB, TB, PB...human friendly
|
||||
@@ -68,8 +86,6 @@ const (
|
||||
GB = 1000 * MB
|
||||
TB = 1000 * GB
|
||||
PB = 1000 * TB
|
||||
|
||||
defaultEndpoint = "unix:///var/run/docker.sock"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -81,15 +97,8 @@ var sampleConfig = `
|
||||
## To use TCP, set endpoint = "tcp://[ip]:[port]"
|
||||
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
|
||||
## Only collect metrics for these containers, collect all if empty
|
||||
container_names = []
|
||||
|
||||
## Containers to include and exclude. Globs accepted.
|
||||
## Note that an empty array for both will include all containers
|
||||
container_name_include = []
|
||||
container_name_exclude = []
|
||||
|
||||
## Timeout for docker list, info, and stats commands
|
||||
timeout = "5s"
|
||||
|
||||
@@ -98,60 +107,52 @@ var sampleConfig = `
|
||||
perdevice = true
|
||||
## Whether to report for each container total blkio and network stats or not
|
||||
total = false
|
||||
## Which environment variables should we use as a tag
|
||||
##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
|
||||
|
||||
## docker labels to include and exclude as tags. Globs accepted.
|
||||
## Note that an empty array for both will include all labels as tags
|
||||
docker_label_include = []
|
||||
docker_label_exclude = []
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
// Description returns input description
|
||||
func (d *Docker) Description() string {
|
||||
return "Read metrics about docker containers"
|
||||
}
|
||||
|
||||
// SampleConfig prints sampleConfig
|
||||
func (d *Docker) SampleConfig() string { return sampleConfig }
|
||||
|
||||
// Gather starts stats collection
|
||||
func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||
if d.client == nil {
|
||||
var c Client
|
||||
if d.client == nil && !d.testing {
|
||||
var c *client.Client
|
||||
var err error
|
||||
defaultHeaders := map[string]string{"User-Agent": "engine-api-cli-1.0"}
|
||||
if d.Endpoint == "ENV" {
|
||||
c, err = d.newEnvClient()
|
||||
} else {
|
||||
tlsConfig, err := internal.GetTLSConfig(
|
||||
d.SSLCert, d.SSLKey, d.SSLCA, d.InsecureSkipVerify)
|
||||
c, err = client.NewEnvClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if d.Endpoint == "" {
|
||||
c, err = client.NewClient("unix:///var/run/docker.sock", "", nil, defaultHeaders)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
c, err = client.NewClient(d.Endpoint, "", nil, defaultHeaders)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err = d.newClient(d.Endpoint, tlsConfig)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.client = c
|
||||
}
|
||||
|
||||
// Create label filters if not already created
|
||||
if !d.filtersCreated {
|
||||
if !d.labelFiltersCreated {
|
||||
err := d.createLabelFilters()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = d.createContainerFilters()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.filtersCreated = true
|
||||
d.labelFiltersCreated = true
|
||||
}
|
||||
|
||||
// Get daemon info
|
||||
@@ -164,7 +165,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||
opts := types.ContainerListOptions{}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||
defer cancel()
|
||||
containers, err := d.client.ContainerList(ctx, opts)
|
||||
containers, err := listWrapper(d.client, ctx, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -195,7 +196,7 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||
// Get info from docker daemon
|
||||
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||
defer cancel()
|
||||
info, err := d.client.Info(ctx)
|
||||
info, err := infoWrapper(d.client, ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -290,18 +291,15 @@ func (d *Docker) gatherContainer(
|
||||
"container_image": imageName,
|
||||
"container_version": imageVersion,
|
||||
}
|
||||
|
||||
if len(d.ContainerInclude) > 0 || len(d.ContainerExclude) > 0 {
|
||||
if len(d.ContainerInclude) == 0 || !d.ContainerFilter.containerInclude.Match(cname) {
|
||||
if len(d.ContainerExclude) == 0 || d.ContainerFilter.containerExclude.Match(cname) {
|
||||
return nil
|
||||
}
|
||||
if len(d.ContainerNames) > 0 {
|
||||
if !sliceContains(cname, d.ContainerNames) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||
defer cancel()
|
||||
r, err := d.client.ContainerStats(ctx, container.ID, false)
|
||||
r, err := statsWrapper(d.client, ctx, container.ID, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting docker stats: %s", err.Error())
|
||||
}
|
||||
@@ -313,7 +311,6 @@ func (d *Docker) gatherContainer(
|
||||
}
|
||||
return fmt.Errorf("Error decoding: %s", err.Error())
|
||||
}
|
||||
daemonOSType := r.OSType
|
||||
|
||||
// Add labels to tags
|
||||
for k, label := range container.Labels {
|
||||
@@ -324,24 +321,7 @@ func (d *Docker) gatherContainer(
|
||||
}
|
||||
}
|
||||
|
||||
// Add whitelisted environment variables to tags
|
||||
if len(d.TagEnvironment) > 0 {
|
||||
info, err := d.client.ContainerInspect(ctx, container.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error inspecting docker container: %s", err.Error())
|
||||
}
|
||||
for _, envvar := range info.Config.Env {
|
||||
for _, configvar := range d.TagEnvironment {
|
||||
dock_env := strings.SplitN(envvar, "=", 2)
|
||||
//check for presence of tag in whitelist
|
||||
if len(dock_env) == 2 && len(strings.TrimSpace(dock_env[1])) != 0 && configvar == dock_env[0] {
|
||||
tags[dock_env[0]] = dock_env[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
gatherContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total, daemonOSType)
|
||||
gatherContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -353,68 +333,46 @@ func gatherContainerStats(
|
||||
id string,
|
||||
perDevice bool,
|
||||
total bool,
|
||||
daemonOSType string,
|
||||
) {
|
||||
now := stat.Read
|
||||
|
||||
memfields := map[string]interface{}{
|
||||
"container_id": id,
|
||||
"max_usage": stat.MemoryStats.MaxUsage,
|
||||
"usage": stat.MemoryStats.Usage,
|
||||
"fail_count": stat.MemoryStats.Failcnt,
|
||||
"limit": stat.MemoryStats.Limit,
|
||||
"total_pgmafault": stat.MemoryStats.Stats["total_pgmajfault"],
|
||||
"cache": stat.MemoryStats.Stats["cache"],
|
||||
"mapped_file": stat.MemoryStats.Stats["mapped_file"],
|
||||
"total_inactive_file": stat.MemoryStats.Stats["total_inactive_file"],
|
||||
"pgpgout": stat.MemoryStats.Stats["pagpgout"],
|
||||
"rss": stat.MemoryStats.Stats["rss"],
|
||||
"total_mapped_file": stat.MemoryStats.Stats["total_mapped_file"],
|
||||
"writeback": stat.MemoryStats.Stats["writeback"],
|
||||
"unevictable": stat.MemoryStats.Stats["unevictable"],
|
||||
"pgpgin": stat.MemoryStats.Stats["pgpgin"],
|
||||
"total_unevictable": stat.MemoryStats.Stats["total_unevictable"],
|
||||
"pgmajfault": stat.MemoryStats.Stats["pgmajfault"],
|
||||
"total_rss": stat.MemoryStats.Stats["total_rss"],
|
||||
"total_rss_huge": stat.MemoryStats.Stats["total_rss_huge"],
|
||||
"total_writeback": stat.MemoryStats.Stats["total_write_back"],
|
||||
"total_inactive_anon": stat.MemoryStats.Stats["total_inactive_anon"],
|
||||
"rss_huge": stat.MemoryStats.Stats["rss_huge"],
|
||||
"hierarchical_memory_limit": stat.MemoryStats.Stats["hierarchical_memory_limit"],
|
||||
"total_pgfault": stat.MemoryStats.Stats["total_pgfault"],
|
||||
"total_active_file": stat.MemoryStats.Stats["total_active_file"],
|
||||
"active_anon": stat.MemoryStats.Stats["active_anon"],
|
||||
"total_active_anon": stat.MemoryStats.Stats["total_active_anon"],
|
||||
"total_pgpgout": stat.MemoryStats.Stats["total_pgpgout"],
|
||||
"total_cache": stat.MemoryStats.Stats["total_cache"],
|
||||
"inactive_anon": stat.MemoryStats.Stats["inactive_anon"],
|
||||
"active_file": stat.MemoryStats.Stats["active_file"],
|
||||
"pgfault": stat.MemoryStats.Stats["pgfault"],
|
||||
"inactive_file": stat.MemoryStats.Stats["inactive_file"],
|
||||
"total_pgpgin": stat.MemoryStats.Stats["total_pgpgin"],
|
||||
"usage_percent": calculateMemPercent(stat),
|
||||
"container_id": id,
|
||||
}
|
||||
|
||||
memstats := []string{
|
||||
"active_anon",
|
||||
"active_file",
|
||||
"cache",
|
||||
"hierarchical_memory_limit",
|
||||
"inactive_anon",
|
||||
"inactive_file",
|
||||
"mapped_file",
|
||||
"pgfault",
|
||||
"pgmajfault",
|
||||
"pgpgin",
|
||||
"pgpgout",
|
||||
"rss",
|
||||
"rss_huge",
|
||||
"total_active_anon",
|
||||
"total_active_file",
|
||||
"total_cache",
|
||||
"total_inactive_anon",
|
||||
"total_inactive_file",
|
||||
"total_mapped_file",
|
||||
"total_pgfault",
|
||||
"total_pgmajfault",
|
||||
"total_pgpgin",
|
||||
"total_pgpgout",
|
||||
"total_rss",
|
||||
"total_rss_huge",
|
||||
"total_unevictable",
|
||||
"total_writeback",
|
||||
"unevictable",
|
||||
"writeback",
|
||||
}
|
||||
for _, field := range memstats {
|
||||
if value, ok := stat.MemoryStats.Stats[field]; ok {
|
||||
memfields[field] = value
|
||||
}
|
||||
}
|
||||
if stat.MemoryStats.Failcnt != 0 {
|
||||
memfields["fail_count"] = stat.MemoryStats.Failcnt
|
||||
}
|
||||
|
||||
if daemonOSType != "windows" {
|
||||
memfields["limit"] = stat.MemoryStats.Limit
|
||||
memfields["usage"] = stat.MemoryStats.Usage
|
||||
memfields["max_usage"] = stat.MemoryStats.MaxUsage
|
||||
|
||||
mem := calculateMemUsageUnixNoCache(stat.MemoryStats)
|
||||
memLimit := float64(stat.MemoryStats.Limit)
|
||||
memfields["usage_percent"] = calculateMemPercentUnixNoCache(memLimit, mem)
|
||||
} else {
|
||||
memfields["commit_bytes"] = stat.MemoryStats.Commit
|
||||
memfields["commit_peak_bytes"] = stat.MemoryStats.CommitPeak
|
||||
memfields["private_working_set"] = stat.MemoryStats.PrivateWorkingSet
|
||||
}
|
||||
|
||||
acc.AddFields("docker_container_mem", memfields, tags, now)
|
||||
|
||||
cpufields := map[string]interface{}{
|
||||
@@ -425,33 +383,14 @@ func gatherContainerStats(
|
||||
"throttling_periods": stat.CPUStats.ThrottlingData.Periods,
|
||||
"throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods,
|
||||
"throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime,
|
||||
"usage_percent": calculateCPUPercent(stat),
|
||||
"container_id": id,
|
||||
}
|
||||
|
||||
if daemonOSType != "windows" {
|
||||
previousCPU := stat.PreCPUStats.CPUUsage.TotalUsage
|
||||
previousSystem := stat.PreCPUStats.SystemUsage
|
||||
cpuPercent := calculateCPUPercentUnix(previousCPU, previousSystem, stat)
|
||||
cpufields["usage_percent"] = cpuPercent
|
||||
} else {
|
||||
cpuPercent := calculateCPUPercentWindows(stat)
|
||||
cpufields["usage_percent"] = cpuPercent
|
||||
}
|
||||
|
||||
cputags := copyTags(tags)
|
||||
cputags["cpu"] = "cpu-total"
|
||||
acc.AddFields("docker_container_cpu", cpufields, cputags, now)
|
||||
|
||||
// If we have OnlineCPUs field, then use it to restrict stats gathering to only Online CPUs
|
||||
// (https://github.com/moby/moby/commit/115f91d7575d6de6c7781a96a082f144fd17e400)
|
||||
var percpuusage []uint64
|
||||
if stat.CPUStats.OnlineCPUs > 0 {
|
||||
percpuusage = stat.CPUStats.CPUUsage.PercpuUsage[:stat.CPUStats.OnlineCPUs]
|
||||
} else {
|
||||
percpuusage = stat.CPUStats.CPUUsage.PercpuUsage
|
||||
}
|
||||
|
||||
for i, percpu := range percpuusage {
|
||||
for i, percpu := range stat.CPUStats.CPUUsage.PercpuUsage {
|
||||
percputags := copyTags(tags)
|
||||
percputags["cpu"] = fmt.Sprintf("cpu%d", i)
|
||||
fields := map[string]interface{}{
|
||||
@@ -517,6 +456,26 @@ func gatherContainerStats(
|
||||
gatherBlockIOMetrics(stat, acc, tags, now, id, perDevice, total)
|
||||
}
|
||||
|
||||
func calculateMemPercent(stat *types.StatsJSON) float64 {
|
||||
var memPercent = 0.0
|
||||
if stat.MemoryStats.Limit > 0 {
|
||||
memPercent = float64(stat.MemoryStats.Usage) / float64(stat.MemoryStats.Limit) * 100.0
|
||||
}
|
||||
return memPercent
|
||||
}
|
||||
|
||||
func calculateCPUPercent(stat *types.StatsJSON) float64 {
|
||||
var cpuPercent = 0.0
|
||||
// calculate the change for the cpu and system usage of the container in between readings
|
||||
cpuDelta := float64(stat.CPUStats.CPUUsage.TotalUsage) - float64(stat.PreCPUStats.CPUUsage.TotalUsage)
|
||||
systemDelta := float64(stat.CPUStats.SystemUsage) - float64(stat.PreCPUStats.SystemUsage)
|
||||
|
||||
if systemDelta > 0.0 && cpuDelta > 0.0 {
|
||||
cpuPercent = (cpuDelta / systemDelta) * float64(len(stat.CPUStats.CPUUsage.PercpuUsage)) * 100.0
|
||||
}
|
||||
return cpuPercent
|
||||
}
|
||||
|
||||
func gatherBlockIOMetrics(
|
||||
stat *types.StatsJSON,
|
||||
acc telegraf.Accumulator,
|
||||
@@ -665,32 +624,8 @@ func parseSize(sizeStr string) (int64, error) {
|
||||
return int64(size), nil
|
||||
}
|
||||
|
||||
func (d *Docker) createContainerFilters() error {
|
||||
if len(d.ContainerNames) > 0 {
|
||||
d.ContainerInclude = append(d.ContainerInclude, d.ContainerNames...)
|
||||
}
|
||||
|
||||
if len(d.ContainerInclude) != 0 {
|
||||
var err error
|
||||
d.ContainerFilter.containerInclude, err = filter.Compile(d.ContainerInclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(d.ContainerExclude) != 0 {
|
||||
var err error
|
||||
d.ContainerFilter.containerExclude, err = filter.Compile(d.ContainerExclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Docker) createLabelFilters() error {
|
||||
if len(d.LabelInclude) != 0 {
|
||||
if len(d.LabelInclude) != 0 && d.LabelFilter.labelInclude == nil {
|
||||
var err error
|
||||
d.LabelFilter.labelInclude, err = filter.Compile(d.LabelInclude)
|
||||
if err != nil {
|
||||
@@ -698,7 +633,7 @@ func (d *Docker) createLabelFilters() error {
|
||||
}
|
||||
}
|
||||
|
||||
if len(d.LabelExclude) != 0 {
|
||||
if len(d.LabelExclude) != 0 && d.LabelFilter.labelExclude == nil {
|
||||
var err error
|
||||
d.LabelFilter.labelExclude, err = filter.Compile(d.LabelExclude)
|
||||
if err != nil {
|
||||
@@ -712,12 +647,9 @@ func (d *Docker) createLabelFilters() error {
|
||||
func init() {
|
||||
inputs.Add("docker", func() telegraf.Input {
|
||||
return &Docker{
|
||||
PerDevice: true,
|
||||
Timeout: internal.Duration{Duration: time.Second * 5},
|
||||
Endpoint: defaultEndpoint,
|
||||
newEnvClient: NewEnvClient,
|
||||
newClient: NewClient,
|
||||
filtersCreated: false,
|
||||
PerDevice: true,
|
||||
Timeout: internal.Duration{Duration: time.Second * 5},
|
||||
labelFiltersCreated: false,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
@@ -11,56 +10,6 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type MockClient struct {
|
||||
InfoF func(ctx context.Context) (types.Info, error)
|
||||
ContainerListF func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
|
||||
ContainerStatsF func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error)
|
||||
ContainerInspectF func(ctx context.Context, containerID string) (types.ContainerJSON, error)
|
||||
}
|
||||
|
||||
func (c *MockClient) Info(ctx context.Context) (types.Info, error) {
|
||||
return c.InfoF(ctx)
|
||||
}
|
||||
|
||||
func (c *MockClient) ContainerList(
|
||||
ctx context.Context,
|
||||
options types.ContainerListOptions,
|
||||
) ([]types.Container, error) {
|
||||
return c.ContainerListF(ctx, options)
|
||||
}
|
||||
|
||||
func (c *MockClient) ContainerStats(
|
||||
ctx context.Context,
|
||||
containerID string,
|
||||
stream bool,
|
||||
) (types.ContainerStats, error) {
|
||||
return c.ContainerStatsF(ctx, containerID, stream)
|
||||
}
|
||||
|
||||
func (c *MockClient) ContainerInspect(
|
||||
ctx context.Context,
|
||||
containerID string,
|
||||
) (types.ContainerJSON, error) {
|
||||
return c.ContainerInspectF(ctx, containerID)
|
||||
}
|
||||
|
||||
func newClient(host string, tlsConfig *tls.Config) (Client, error) {
|
||||
return &MockClient{
|
||||
InfoF: func(context.Context) (types.Info, error) {
|
||||
return info, nil
|
||||
},
|
||||
ContainerListF: func(context.Context, types.ContainerListOptions) ([]types.Container, error) {
|
||||
return containerList, nil
|
||||
},
|
||||
ContainerStatsF: func(context.Context, string, bool) (types.ContainerStats, error) {
|
||||
return containerStats(), nil
|
||||
},
|
||||
ContainerInspectF: func(context.Context, string) (types.ContainerJSON, error) {
|
||||
return containerInspect, nil
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestDockerGatherContainerStats(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
stats := testStats()
|
||||
@@ -69,8 +18,7 @@ func TestDockerGatherContainerStats(t *testing.T) {
|
||||
"container_name": "redis",
|
||||
"container_image": "redis/image",
|
||||
}
|
||||
|
||||
gatherContainerStats(stats, &acc, tags, "123456789", true, true, "linux")
|
||||
gatherContainerStats(stats, &acc, tags, "123456789", true, true)
|
||||
|
||||
// test docker_container_net measurement
|
||||
netfields := map[string]interface{}{
|
||||
@@ -124,41 +72,41 @@ func TestDockerGatherContainerStats(t *testing.T) {
|
||||
|
||||
// test docker_container_mem measurement
|
||||
memfields := map[string]interface{}{
|
||||
"active_anon": uint64(0),
|
||||
"active_file": uint64(1),
|
||||
"cache": uint64(0),
|
||||
"container_id": "123456789",
|
||||
"fail_count": uint64(1),
|
||||
"hierarchical_memory_limit": uint64(0),
|
||||
"inactive_anon": uint64(0),
|
||||
"inactive_file": uint64(3),
|
||||
"limit": uint64(2000),
|
||||
"mapped_file": uint64(0),
|
||||
"max_usage": uint64(1001),
|
||||
"pgfault": uint64(2),
|
||||
"pgmajfault": uint64(0),
|
||||
"pgpgin": uint64(0),
|
||||
"pgpgout": uint64(0),
|
||||
"rss_huge": uint64(0),
|
||||
"rss": uint64(0),
|
||||
"total_active_anon": uint64(0),
|
||||
"total_active_file": uint64(0),
|
||||
"total_cache": uint64(0),
|
||||
"total_inactive_anon": uint64(0),
|
||||
"total_inactive_file": uint64(0),
|
||||
"total_mapped_file": uint64(0),
|
||||
"total_pgfault": uint64(0),
|
||||
"total_pgmajfault": uint64(0),
|
||||
"total_pgpgin": uint64(4),
|
||||
"total_pgpgout": uint64(0),
|
||||
"total_rss_huge": uint64(444),
|
||||
"total_rss": uint64(44),
|
||||
"total_unevictable": uint64(0),
|
||||
"total_writeback": uint64(55),
|
||||
"unevictable": uint64(0),
|
||||
"usage_percent": float64(55.55),
|
||||
"usage": uint64(1111),
|
||||
"fail_count": uint64(1),
|
||||
"limit": uint64(2000),
|
||||
"total_pgmafault": uint64(0),
|
||||
"cache": uint64(0),
|
||||
"mapped_file": uint64(0),
|
||||
"total_inactive_file": uint64(0),
|
||||
"pgpgout": uint64(0),
|
||||
"rss": uint64(0),
|
||||
"total_mapped_file": uint64(0),
|
||||
"writeback": uint64(0),
|
||||
"unevictable": uint64(0),
|
||||
"pgpgin": uint64(0),
|
||||
"total_unevictable": uint64(0),
|
||||
"pgmajfault": uint64(0),
|
||||
"total_rss": uint64(44),
|
||||
"total_rss_huge": uint64(444),
|
||||
"total_writeback": uint64(55),
|
||||
"total_inactive_anon": uint64(0),
|
||||
"rss_huge": uint64(0),
|
||||
"hierarchical_memory_limit": uint64(0),
|
||||
"total_pgfault": uint64(0),
|
||||
"total_active_file": uint64(0),
|
||||
"active_anon": uint64(0),
|
||||
"total_active_anon": uint64(0),
|
||||
"total_pgpgout": uint64(0),
|
||||
"total_cache": uint64(0),
|
||||
"inactive_anon": uint64(0),
|
||||
"active_file": uint64(1),
|
||||
"pgfault": uint64(2),
|
||||
"inactive_file": uint64(3),
|
||||
"total_pgpgin": uint64(4),
|
||||
"usage_percent": float64(55.55),
|
||||
"container_id": "123456789",
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "docker_container_mem", memfields, tags)
|
||||
@@ -192,174 +140,166 @@ func TestDockerGatherContainerStats(t *testing.T) {
|
||||
"container_id": "123456789",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "docker_container_cpu", cpu1fields, cputags)
|
||||
|
||||
// Those tagged filed should not be present because of offline CPUs
|
||||
cputags["cpu"] = "cpu2"
|
||||
cpu2fields := map[string]interface{}{
|
||||
"usage_total": uint64(0),
|
||||
"container_id": "123456789",
|
||||
}
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "docker_container_cpu", cpu2fields, cputags)
|
||||
|
||||
cputags["cpu"] = "cpu3"
|
||||
cpu3fields := map[string]interface{}{
|
||||
"usage_total": uint64(0),
|
||||
"container_id": "123456789",
|
||||
}
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "docker_container_cpu", cpu3fields, cputags)
|
||||
}
|
||||
|
||||
func TestDocker_WindowsMemoryContainerStats(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
func testStats() *types.StatsJSON {
|
||||
stats := &types.StatsJSON{}
|
||||
stats.Read = time.Now()
|
||||
stats.Networks = make(map[string]types.NetworkStats)
|
||||
|
||||
d := Docker{
|
||||
newClient: func(string, *tls.Config) (Client, error) {
|
||||
return &MockClient{
|
||||
InfoF: func(ctx context.Context) (types.Info, error) {
|
||||
return info, nil
|
||||
},
|
||||
ContainerListF: func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
|
||||
return containerList, nil
|
||||
},
|
||||
ContainerStatsF: func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) {
|
||||
return containerStatsWindows(), nil
|
||||
},
|
||||
ContainerInspectF: func(ctx context.Context, containerID string) (types.ContainerJSON, error) {
|
||||
return containerInspect, nil
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
stats.CPUStats.CPUUsage.PercpuUsage = []uint64{1, 1002}
|
||||
stats.CPUStats.CPUUsage.UsageInUsermode = 100
|
||||
stats.CPUStats.CPUUsage.TotalUsage = 500
|
||||
stats.CPUStats.CPUUsage.UsageInKernelmode = 200
|
||||
stats.CPUStats.SystemUsage = 100
|
||||
stats.CPUStats.ThrottlingData.Periods = 1
|
||||
|
||||
stats.PreCPUStats.CPUUsage.TotalUsage = 400
|
||||
stats.PreCPUStats.SystemUsage = 50
|
||||
|
||||
stats.MemoryStats.Stats = make(map[string]uint64)
|
||||
stats.MemoryStats.Stats["total_pgmajfault"] = 0
|
||||
stats.MemoryStats.Stats["cache"] = 0
|
||||
stats.MemoryStats.Stats["mapped_file"] = 0
|
||||
stats.MemoryStats.Stats["total_inactive_file"] = 0
|
||||
stats.MemoryStats.Stats["pagpgout"] = 0
|
||||
stats.MemoryStats.Stats["rss"] = 0
|
||||
stats.MemoryStats.Stats["total_mapped_file"] = 0
|
||||
stats.MemoryStats.Stats["writeback"] = 0
|
||||
stats.MemoryStats.Stats["unevictable"] = 0
|
||||
stats.MemoryStats.Stats["pgpgin"] = 0
|
||||
stats.MemoryStats.Stats["total_unevictable"] = 0
|
||||
stats.MemoryStats.Stats["pgmajfault"] = 0
|
||||
stats.MemoryStats.Stats["total_rss"] = 44
|
||||
stats.MemoryStats.Stats["total_rss_huge"] = 444
|
||||
stats.MemoryStats.Stats["total_write_back"] = 55
|
||||
stats.MemoryStats.Stats["total_inactive_anon"] = 0
|
||||
stats.MemoryStats.Stats["rss_huge"] = 0
|
||||
stats.MemoryStats.Stats["hierarchical_memory_limit"] = 0
|
||||
stats.MemoryStats.Stats["total_pgfault"] = 0
|
||||
stats.MemoryStats.Stats["total_active_file"] = 0
|
||||
stats.MemoryStats.Stats["active_anon"] = 0
|
||||
stats.MemoryStats.Stats["total_active_anon"] = 0
|
||||
stats.MemoryStats.Stats["total_pgpgout"] = 0
|
||||
stats.MemoryStats.Stats["total_cache"] = 0
|
||||
stats.MemoryStats.Stats["inactive_anon"] = 0
|
||||
stats.MemoryStats.Stats["active_file"] = 1
|
||||
stats.MemoryStats.Stats["pgfault"] = 2
|
||||
stats.MemoryStats.Stats["inactive_file"] = 3
|
||||
stats.MemoryStats.Stats["total_pgpgin"] = 4
|
||||
|
||||
stats.MemoryStats.MaxUsage = 1001
|
||||
stats.MemoryStats.Usage = 1111
|
||||
stats.MemoryStats.Failcnt = 1
|
||||
stats.MemoryStats.Limit = 2000
|
||||
|
||||
stats.Networks["eth0"] = types.NetworkStats{
|
||||
RxDropped: 1,
|
||||
RxBytes: 2,
|
||||
RxErrors: 3,
|
||||
TxPackets: 4,
|
||||
TxDropped: 1,
|
||||
RxPackets: 2,
|
||||
TxErrors: 3,
|
||||
TxBytes: 4,
|
||||
}
|
||||
err := d.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
stats.Networks["eth1"] = types.NetworkStats{
|
||||
RxDropped: 5,
|
||||
RxBytes: 6,
|
||||
RxErrors: 7,
|
||||
TxPackets: 8,
|
||||
TxDropped: 5,
|
||||
RxPackets: 6,
|
||||
TxErrors: 7,
|
||||
TxBytes: 8,
|
||||
}
|
||||
|
||||
sbr := types.BlkioStatEntry{
|
||||
Major: 6,
|
||||
Minor: 0,
|
||||
Op: "read",
|
||||
Value: 100,
|
||||
}
|
||||
sr := types.BlkioStatEntry{
|
||||
Major: 6,
|
||||
Minor: 0,
|
||||
Op: "write",
|
||||
Value: 101,
|
||||
}
|
||||
sr2 := types.BlkioStatEntry{
|
||||
Major: 6,
|
||||
Minor: 1,
|
||||
Op: "write",
|
||||
Value: 201,
|
||||
}
|
||||
|
||||
stats.BlkioStats.IoServiceBytesRecursive = append(
|
||||
stats.BlkioStats.IoServiceBytesRecursive, sbr)
|
||||
stats.BlkioStats.IoServicedRecursive = append(
|
||||
stats.BlkioStats.IoServicedRecursive, sr)
|
||||
stats.BlkioStats.IoServicedRecursive = append(
|
||||
stats.BlkioStats.IoServicedRecursive, sr2)
|
||||
|
||||
return stats
|
||||
}
|
||||
|
||||
var gatherLabelsTests = []struct {
|
||||
include []string
|
||||
exclude []string
|
||||
expected []string
|
||||
notexpected []string
|
||||
}{
|
||||
{[]string{}, []string{}, []string{"label1", "label2"}, []string{}},
|
||||
{[]string{"*"}, []string{}, []string{"label1", "label2"}, []string{}},
|
||||
{[]string{"lab*"}, []string{}, []string{"label1", "label2"}, []string{}},
|
||||
{[]string{"label1"}, []string{}, []string{"label1"}, []string{"label2"}},
|
||||
{[]string{"label1*"}, []string{}, []string{"label1"}, []string{"label2"}},
|
||||
{[]string{}, []string{"*"}, []string{}, []string{"label1", "label2"}},
|
||||
{[]string{}, []string{"lab*"}, []string{}, []string{"label1", "label2"}},
|
||||
{[]string{}, []string{"label1"}, []string{"label2"}, []string{"label1"}},
|
||||
{[]string{"*"}, []string{"*"}, []string{}, []string{"label1", "label2"}},
|
||||
}
|
||||
|
||||
func TestDockerGatherLabels(t *testing.T) {
|
||||
var gatherLabelsTests = []struct {
|
||||
include []string
|
||||
exclude []string
|
||||
expected []string
|
||||
notexpected []string
|
||||
}{
|
||||
{[]string{}, []string{}, []string{"label1", "label2"}, []string{}},
|
||||
{[]string{"*"}, []string{}, []string{"label1", "label2"}, []string{}},
|
||||
{[]string{"lab*"}, []string{}, []string{"label1", "label2"}, []string{}},
|
||||
{[]string{"label1"}, []string{}, []string{"label1"}, []string{"label2"}},
|
||||
{[]string{"label1*"}, []string{}, []string{"label1"}, []string{"label2"}},
|
||||
{[]string{}, []string{"*"}, []string{}, []string{"label1", "label2"}},
|
||||
{[]string{}, []string{"lab*"}, []string{}, []string{"label1", "label2"}},
|
||||
{[]string{}, []string{"label1"}, []string{"label2"}, []string{"label1"}},
|
||||
{[]string{"*"}, []string{"*"}, []string{}, []string{"label1", "label2"}},
|
||||
}
|
||||
|
||||
for _, tt := range gatherLabelsTests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
d := Docker{
|
||||
newClient: newClient,
|
||||
var acc testutil.Accumulator
|
||||
d := Docker{
|
||||
client: nil,
|
||||
testing: true,
|
||||
}
|
||||
|
||||
for _, label := range tt.include {
|
||||
d.LabelInclude = append(d.LabelInclude, label)
|
||||
}
|
||||
for _, label := range tt.exclude {
|
||||
d.LabelExclude = append(d.LabelExclude, label)
|
||||
}
|
||||
|
||||
err := d.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, label := range tt.expected {
|
||||
if !acc.HasTag("docker_container_cpu", label) {
|
||||
t.Errorf("Didn't get expected label of %s. Test was: Include: %s Exclude %s",
|
||||
label, tt.include, tt.exclude)
|
||||
}
|
||||
}
|
||||
|
||||
for _, label := range tt.include {
|
||||
d.LabelInclude = append(d.LabelInclude, label)
|
||||
for _, label := range tt.notexpected {
|
||||
if acc.HasTag("docker_container_cpu", label) {
|
||||
t.Errorf("Got unexpected label of %s. Test was: Include: %s Exclude %s",
|
||||
label, tt.include, tt.exclude)
|
||||
}
|
||||
for _, label := range tt.exclude {
|
||||
d.LabelExclude = append(d.LabelExclude, label)
|
||||
}
|
||||
|
||||
err := d.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, label := range tt.expected {
|
||||
if !acc.HasTag("docker_container_cpu", label) {
|
||||
t.Errorf("Didn't get expected label of %s. Test was: Include: %s Exclude %s",
|
||||
label, tt.include, tt.exclude)
|
||||
}
|
||||
}
|
||||
|
||||
for _, label := range tt.notexpected {
|
||||
if acc.HasTag("docker_container_cpu", label) {
|
||||
t.Errorf("Got unexpected label of %s. Test was: Include: %s Exclude %s",
|
||||
label, tt.include, tt.exclude)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainerNames(t *testing.T) {
|
||||
var gatherContainerNames = []struct {
|
||||
include []string
|
||||
exclude []string
|
||||
expected []string
|
||||
notexpected []string
|
||||
}{
|
||||
{[]string{}, []string{}, []string{"etcd", "etcd2"}, []string{}},
|
||||
{[]string{"*"}, []string{}, []string{"etcd", "etcd2"}, []string{}},
|
||||
{[]string{"etc*"}, []string{}, []string{"etcd", "etcd2"}, []string{}},
|
||||
{[]string{"etcd"}, []string{}, []string{"etcd"}, []string{"etcd2"}},
|
||||
{[]string{"etcd2*"}, []string{}, []string{"etcd2"}, []string{"etcd"}},
|
||||
{[]string{}, []string{"etc*"}, []string{}, []string{"etcd", "etcd2"}},
|
||||
{[]string{}, []string{"etcd"}, []string{"etcd2"}, []string{"etcd"}},
|
||||
{[]string{"*"}, []string{"*"}, []string{"etcd", "etcd2"}, []string{}},
|
||||
{[]string{}, []string{"*"}, []string{""}, []string{"etcd", "etcd2"}},
|
||||
}
|
||||
|
||||
for _, tt := range gatherContainerNames {
|
||||
t.Run("", func(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
d := Docker{
|
||||
newClient: newClient,
|
||||
ContainerInclude: tt.include,
|
||||
ContainerExclude: tt.exclude,
|
||||
}
|
||||
|
||||
err := d.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, metric := range acc.Metrics {
|
||||
if metric.Measurement == "docker_container_cpu" {
|
||||
if val, ok := metric.Tags["container_name"]; ok {
|
||||
var found bool = false
|
||||
for _, cname := range tt.expected {
|
||||
if val == cname {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("Got unexpected container of %s. Test was -> Include: %s, Exclude: %s", val, tt.include, tt.exclude)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, metric := range acc.Metrics {
|
||||
if metric.Measurement == "docker_container_cpu" {
|
||||
if val, ok := metric.Tags["container_name"]; ok {
|
||||
var found bool = false
|
||||
for _, cname := range tt.notexpected {
|
||||
if val == cname {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if found {
|
||||
t.Errorf("Got unexpected container of %s. Test was -> Include: %s, Exclude: %s", val, tt.include, tt.exclude)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDockerGatherInfo(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
d := Docker{
|
||||
newClient: newClient,
|
||||
TagEnvironment: []string{"ENVVAR1", "ENVVAR2", "ENVVAR3", "ENVVAR5",
|
||||
"ENVVAR6", "ENVVAR7", "ENVVAR8", "ENVVAR9"},
|
||||
client: nil,
|
||||
testing: true,
|
||||
}
|
||||
|
||||
err := acc.GatherError(d.Gather)
|
||||
@@ -405,10 +345,6 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
"cpu": "cpu3",
|
||||
"container_version": "v2.2.2",
|
||||
"engine_host": "absol",
|
||||
"ENVVAR1": "loremipsum",
|
||||
"ENVVAR2": "dolorsitamet",
|
||||
"ENVVAR3": "=ubuntu:10.04",
|
||||
"ENVVAR7": "ENVVAR8=ENVVAR9",
|
||||
"label1": "test_value_1",
|
||||
"label2": "test_value_2",
|
||||
},
|
||||
@@ -416,23 +352,51 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"docker_container_mem",
|
||||
map[string]interface{}{
|
||||
"container_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
||||
"limit": uint64(18935443456),
|
||||
"max_usage": uint64(0),
|
||||
"usage": uint64(0),
|
||||
"usage_percent": float64(0),
|
||||
"total_pgpgout": uint64(0),
|
||||
"usage_percent": float64(0),
|
||||
"rss": uint64(0),
|
||||
"total_writeback": uint64(0),
|
||||
"active_anon": uint64(0),
|
||||
"total_pgmafault": uint64(0),
|
||||
"total_rss": uint64(0),
|
||||
"total_unevictable": uint64(0),
|
||||
"active_file": uint64(0),
|
||||
"total_mapped_file": uint64(0),
|
||||
"pgpgin": uint64(0),
|
||||
"total_active_file": uint64(0),
|
||||
"total_active_anon": uint64(0),
|
||||
"total_cache": uint64(0),
|
||||
"inactive_anon": uint64(0),
|
||||
"pgmajfault": uint64(0),
|
||||
"total_inactive_anon": uint64(0),
|
||||
"total_rss_huge": uint64(0),
|
||||
"rss_huge": uint64(0),
|
||||
"hierarchical_memory_limit": uint64(0),
|
||||
"pgpgout": uint64(0),
|
||||
"unevictable": uint64(0),
|
||||
"total_inactive_file": uint64(0),
|
||||
"writeback": uint64(0),
|
||||
"total_pgfault": uint64(0),
|
||||
"total_pgpgin": uint64(0),
|
||||
"cache": uint64(0),
|
||||
"mapped_file": uint64(0),
|
||||
"inactive_file": uint64(0),
|
||||
"max_usage": uint64(0),
|
||||
"fail_count": uint64(0),
|
||||
"pgfault": uint64(0),
|
||||
"usage": uint64(0),
|
||||
"limit": uint64(18935443456),
|
||||
"container_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
||||
},
|
||||
map[string]string{
|
||||
"engine_host": "absol",
|
||||
"container_name": "etcd2",
|
||||
"container_image": "quay.io:4443/coreos/etcd",
|
||||
"container_version": "v2.2.2",
|
||||
"ENVVAR1": "loremipsum",
|
||||
"ENVVAR2": "dolorsitamet",
|
||||
"ENVVAR3": "=ubuntu:10.04",
|
||||
"ENVVAR7": "ENVVAR8=ENVVAR9",
|
||||
"label1": "test_value_1",
|
||||
"label2": "test_value_2",
|
||||
},
|
||||
)
|
||||
|
||||
//fmt.Print(info)
|
||||
}
|
||||
|
||||
@@ -1,406 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
)
|
||||
|
||||
var info = types.Info{
|
||||
Containers: 108,
|
||||
ContainersRunning: 98,
|
||||
ContainersStopped: 6,
|
||||
ContainersPaused: 3,
|
||||
OomKillDisable: false,
|
||||
SystemTime: "2016-02-24T00:55:09.15073105-05:00",
|
||||
NEventsListener: 0,
|
||||
ID: "5WQQ:TFWR:FDNG:OKQ3:37Y4:FJWG:QIKK:623T:R3ME:QTKB:A7F7:OLHD",
|
||||
Debug: false,
|
||||
LoggingDriver: "json-file",
|
||||
KernelVersion: "4.3.0-1-amd64",
|
||||
IndexServerAddress: "https://index.docker.io/v1/",
|
||||
MemTotal: 3840757760,
|
||||
Images: 199,
|
||||
CPUCfsQuota: true,
|
||||
Name: "absol",
|
||||
SwapLimit: false,
|
||||
IPv4Forwarding: true,
|
||||
ExperimentalBuild: false,
|
||||
CPUCfsPeriod: true,
|
||||
RegistryConfig: ®istry.ServiceConfig{
|
||||
IndexConfigs: map[string]*registry.IndexInfo{
|
||||
"docker.io": {
|
||||
Name: "docker.io",
|
||||
Mirrors: []string{},
|
||||
Official: true,
|
||||
Secure: true,
|
||||
},
|
||||
}, InsecureRegistryCIDRs: []*registry.NetIPNet{{IP: []byte{127, 0, 0, 0}, Mask: []byte{255, 0, 0, 0}}}, Mirrors: []string{}},
|
||||
OperatingSystem: "Linux Mint LMDE (containerized)",
|
||||
BridgeNfIptables: true,
|
||||
HTTPSProxy: "",
|
||||
Labels: []string{},
|
||||
MemoryLimit: false,
|
||||
DriverStatus: [][2]string{{"Pool Name", "docker-8:1-1182287-pool"}, {"Pool Blocksize", "65.54 kB"}, {"Backing Filesystem", "extfs"}, {"Data file", "/dev/loop0"}, {"Metadata file", "/dev/loop1"}, {"Data Space Used", "17.3 GB"}, {"Data Space Total", "107.4 GB"}, {"Data Space Available", "36.53 GB"}, {"Metadata Space Used", "20.97 MB"}, {"Metadata Space Total", "2.147 GB"}, {"Metadata Space Available", "2.127 GB"}, {"Udev Sync Supported", "true"}, {"Deferred Removal Enabled", "false"}, {"Data loop file", "/var/lib/docker/devicemapper/devicemapper/data"}, {"Metadata loop file", "/var/lib/docker/devicemapper/devicemapper/metadata"}, {"Library Version", "1.02.115 (2016-01-25)"}},
|
||||
NFd: 19,
|
||||
HTTPProxy: "",
|
||||
Driver: "devicemapper",
|
||||
NGoroutines: 39,
|
||||
NCPU: 4,
|
||||
DockerRootDir: "/var/lib/docker",
|
||||
NoProxy: "",
|
||||
BridgeNfIP6tables: true,
|
||||
}
|
||||
|
||||
var containerList = []types.Container{
|
||||
types.Container{
|
||||
ID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb",
|
||||
Names: []string{"/etcd"},
|
||||
Image: "quay.io/coreos/etcd:v2.2.2",
|
||||
Command: "/etcd -name etcd0 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
|
||||
Created: 1455941930,
|
||||
Status: "Up 4 hours",
|
||||
Ports: []types.Port{
|
||||
types.Port{
|
||||
PrivatePort: 7001,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 4001,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2380,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2379,
|
||||
PublicPort: 2379,
|
||||
Type: "tcp",
|
||||
IP: "0.0.0.0",
|
||||
},
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"label1": "test_value_1",
|
||||
"label2": "test_value_2",
|
||||
},
|
||||
SizeRw: 0,
|
||||
SizeRootFs: 0,
|
||||
},
|
||||
types.Container{
|
||||
ID: "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
||||
Names: []string{"/etcd2"},
|
||||
Image: "quay.io:4443/coreos/etcd:v2.2.2",
|
||||
Command: "/etcd -name etcd2 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
|
||||
Created: 1455941933,
|
||||
Status: "Up 4 hours",
|
||||
Ports: []types.Port{
|
||||
types.Port{
|
||||
PrivatePort: 7002,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 4002,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2381,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2382,
|
||||
PublicPort: 2382,
|
||||
Type: "tcp",
|
||||
IP: "0.0.0.0",
|
||||
},
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"label1": "test_value_1",
|
||||
"label2": "test_value_2",
|
||||
},
|
||||
SizeRw: 0,
|
||||
SizeRootFs: 0,
|
||||
},
|
||||
}
|
||||
|
||||
func containerStats() types.ContainerStats {
|
||||
var stat types.ContainerStats
|
||||
jsonStat := `
|
||||
{
|
||||
"blkio_stats": {
|
||||
"io_service_bytes_recursive": [
|
||||
{
|
||||
"major": 252,
|
||||
"minor": 1,
|
||||
"op": "Read",
|
||||
"value": 753664
|
||||
},
|
||||
{
|
||||
"major": 252,
|
||||
"minor": 1,
|
||||
"op": "Write"
|
||||
},
|
||||
{
|
||||
"major": 252,
|
||||
"minor": 1,
|
||||
"op": "Sync"
|
||||
},
|
||||
{
|
||||
"major": 252,
|
||||
"minor": 1,
|
||||
"op": "Async",
|
||||
"value": 753664
|
||||
},
|
||||
{
|
||||
"major": 252,
|
||||
"minor": 1,
|
||||
"op": "Total",
|
||||
"value": 753664
|
||||
}
|
||||
],
|
||||
"io_serviced_recursive": [
|
||||
{
|
||||
"major": 252,
|
||||
"minor": 1,
|
||||
"op": "Read",
|
||||
"value": 26
|
||||
},
|
||||
{
|
||||
"major": 252,
|
||||
"minor": 1,
|
||||
"op": "Write"
|
||||
},
|
||||
{
|
||||
"major": 252,
|
||||
"minor": 1,
|
||||
"op": "Sync"
|
||||
},
|
||||
{
|
||||
"major": 252,
|
||||
"minor": 1,
|
||||
"op": "Async",
|
||||
"value": 26
|
||||
},
|
||||
{
|
||||
"major": 252,
|
||||
"minor": 1,
|
||||
"op": "Total",
|
||||
"value": 26
|
||||
}
|
||||
]
|
||||
},
|
||||
"cpu_stats": {
|
||||
"cpu_usage": {
|
||||
"percpu_usage": [
|
||||
17871,
|
||||
4959158,
|
||||
1646137,
|
||||
1231652,
|
||||
11829401,
|
||||
244656,
|
||||
369972,
|
||||
0
|
||||
],
|
||||
"total_usage": 20298847,
|
||||
"usage_in_usermode": 10000000
|
||||
},
|
||||
"system_cpu_usage": 24052607520000000,
|
||||
"throttling_data": {}
|
||||
},
|
||||
"memory_stats": {
|
||||
"limit": 18935443456,
|
||||
"stats": {}
|
||||
},
|
||||
"precpu_stats": {
|
||||
"cpu_usage": {
|
||||
"percpu_usage": [
|
||||
17871,
|
||||
4959158,
|
||||
1646137,
|
||||
1231652,
|
||||
11829401,
|
||||
244656,
|
||||
369972,
|
||||
0
|
||||
],
|
||||
"total_usage": 20298847,
|
||||
"usage_in_usermode": 10000000
|
||||
},
|
||||
"system_cpu_usage": 24052599550000000,
|
||||
"throttling_data": {}
|
||||
},
|
||||
"read": "2016-02-24T11:42:27.472459608-05:00"
|
||||
}`
|
||||
stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat))
|
||||
return stat
|
||||
}
|
||||
|
||||
func testStats() *types.StatsJSON {
|
||||
stats := &types.StatsJSON{}
|
||||
stats.Read = time.Now()
|
||||
stats.Networks = make(map[string]types.NetworkStats)
|
||||
stats.CPUStats.OnlineCPUs = 2
|
||||
stats.CPUStats.CPUUsage.PercpuUsage = []uint64{1, 1002, 0, 0}
|
||||
stats.CPUStats.CPUUsage.UsageInUsermode = 100
|
||||
stats.CPUStats.CPUUsage.TotalUsage = 500
|
||||
stats.CPUStats.CPUUsage.UsageInKernelmode = 200
|
||||
stats.CPUStats.SystemUsage = 100
|
||||
stats.CPUStats.ThrottlingData.Periods = 1
|
||||
|
||||
stats.PreCPUStats.CPUUsage.TotalUsage = 400
|
||||
stats.PreCPUStats.SystemUsage = 50
|
||||
|
||||
stats.MemoryStats.Stats = make(map[string]uint64)
|
||||
stats.MemoryStats.Stats["active_anon"] = 0
|
||||
stats.MemoryStats.Stats["active_file"] = 1
|
||||
stats.MemoryStats.Stats["cache"] = 0
|
||||
stats.MemoryStats.Stats["hierarchical_memory_limit"] = 0
|
||||
stats.MemoryStats.Stats["inactive_anon"] = 0
|
||||
stats.MemoryStats.Stats["inactive_file"] = 3
|
||||
stats.MemoryStats.Stats["mapped_file"] = 0
|
||||
stats.MemoryStats.Stats["pgfault"] = 2
|
||||
stats.MemoryStats.Stats["pgmajfault"] = 0
|
||||
stats.MemoryStats.Stats["pgpgin"] = 0
|
||||
stats.MemoryStats.Stats["pgpgout"] = 0
|
||||
stats.MemoryStats.Stats["rss"] = 0
|
||||
stats.MemoryStats.Stats["rss_huge"] = 0
|
||||
stats.MemoryStats.Stats["total_active_anon"] = 0
|
||||
stats.MemoryStats.Stats["total_active_file"] = 0
|
||||
stats.MemoryStats.Stats["total_cache"] = 0
|
||||
stats.MemoryStats.Stats["total_inactive_anon"] = 0
|
||||
stats.MemoryStats.Stats["total_inactive_file"] = 0
|
||||
stats.MemoryStats.Stats["total_mapped_file"] = 0
|
||||
stats.MemoryStats.Stats["total_pgfault"] = 0
|
||||
stats.MemoryStats.Stats["total_pgmajfault"] = 0
|
||||
stats.MemoryStats.Stats["total_pgpgin"] = 4
|
||||
stats.MemoryStats.Stats["total_pgpgout"] = 0
|
||||
stats.MemoryStats.Stats["total_rss"] = 44
|
||||
stats.MemoryStats.Stats["total_rss_huge"] = 444
|
||||
stats.MemoryStats.Stats["total_unevictable"] = 0
|
||||
stats.MemoryStats.Stats["total_writeback"] = 55
|
||||
stats.MemoryStats.Stats["unevictable"] = 0
|
||||
stats.MemoryStats.Stats["writeback"] = 0
|
||||
|
||||
stats.MemoryStats.MaxUsage = 1001
|
||||
stats.MemoryStats.Usage = 1111
|
||||
stats.MemoryStats.Failcnt = 1
|
||||
stats.MemoryStats.Limit = 2000
|
||||
|
||||
stats.Networks["eth0"] = types.NetworkStats{
|
||||
RxDropped: 1,
|
||||
RxBytes: 2,
|
||||
RxErrors: 3,
|
||||
TxPackets: 4,
|
||||
TxDropped: 1,
|
||||
RxPackets: 2,
|
||||
TxErrors: 3,
|
||||
TxBytes: 4,
|
||||
}
|
||||
|
||||
stats.Networks["eth1"] = types.NetworkStats{
|
||||
RxDropped: 5,
|
||||
RxBytes: 6,
|
||||
RxErrors: 7,
|
||||
TxPackets: 8,
|
||||
TxDropped: 5,
|
||||
RxPackets: 6,
|
||||
TxErrors: 7,
|
||||
TxBytes: 8,
|
||||
}
|
||||
|
||||
sbr := types.BlkioStatEntry{
|
||||
Major: 6,
|
||||
Minor: 0,
|
||||
Op: "read",
|
||||
Value: 100,
|
||||
}
|
||||
sr := types.BlkioStatEntry{
|
||||
Major: 6,
|
||||
Minor: 0,
|
||||
Op: "write",
|
||||
Value: 101,
|
||||
}
|
||||
sr2 := types.BlkioStatEntry{
|
||||
Major: 6,
|
||||
Minor: 1,
|
||||
Op: "write",
|
||||
Value: 201,
|
||||
}
|
||||
|
||||
stats.BlkioStats.IoServiceBytesRecursive = append(
|
||||
stats.BlkioStats.IoServiceBytesRecursive, sbr)
|
||||
stats.BlkioStats.IoServicedRecursive = append(
|
||||
stats.BlkioStats.IoServicedRecursive, sr)
|
||||
stats.BlkioStats.IoServicedRecursive = append(
|
||||
stats.BlkioStats.IoServicedRecursive, sr2)
|
||||
|
||||
return stats
|
||||
}
|
||||
|
||||
func containerStatsWindows() types.ContainerStats {
|
||||
var stat types.ContainerStats
|
||||
jsonStat := `
|
||||
{
|
||||
"read":"2017-01-11T08:32:46.2413794Z",
|
||||
"preread":"0001-01-01T00:00:00Z",
|
||||
"num_procs":64,
|
||||
"cpu_stats":{
|
||||
"cpu_usage":{
|
||||
"total_usage":536718750,
|
||||
"usage_in_kernelmode":390468750,
|
||||
"usage_in_usermode":390468750
|
||||
},
|
||||
"throttling_data":{
|
||||
"periods":0,
|
||||
"throttled_periods":0,
|
||||
"throttled_time":0
|
||||
}
|
||||
},
|
||||
"precpu_stats":{
|
||||
"cpu_usage":{
|
||||
"total_usage":0,
|
||||
"usage_in_kernelmode":0,
|
||||
"usage_in_usermode":0
|
||||
},
|
||||
"throttling_data":{
|
||||
"periods":0,
|
||||
"throttled_periods":0,
|
||||
"throttled_time":0
|
||||
}
|
||||
},
|
||||
"memory_stats":{
|
||||
"commitbytes":77160448,
|
||||
"commitpeakbytes":105000960,
|
||||
"privateworkingset":59961344
|
||||
},
|
||||
"name":"/gt_test_iis",
|
||||
}`
|
||||
stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat))
|
||||
return stat
|
||||
}
|
||||
|
||||
var containerInspect = types.ContainerJSON{
|
||||
Config: &container.Config{
|
||||
Env: []string{
|
||||
"ENVVAR1=loremipsum",
|
||||
"ENVVAR1FOO=loremipsum",
|
||||
"ENVVAR2=dolorsitamet",
|
||||
"ENVVAR3==ubuntu:10.04",
|
||||
"ENVVAR4",
|
||||
"ENVVAR5=",
|
||||
"ENVVAR6= ",
|
||||
"ENVVAR7=ENVVAR8=ENVVAR9",
|
||||
"PATH=/bin:/sbin",
|
||||
},
|
||||
},
|
||||
}
|
||||
151
plugins/inputs/docker/fake_client.go
Normal file
151
plugins/inputs/docker/fake_client.go
Normal file
@@ -0,0 +1,151 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
)
|
||||
|
||||
type FakeDockerClient struct {
|
||||
}
|
||||
|
||||
func (d FakeDockerClient) Info(ctx context.Context) (types.Info, error) {
|
||||
env := types.Info{
|
||||
Containers: 108,
|
||||
ContainersRunning: 98,
|
||||
ContainersStopped: 6,
|
||||
ContainersPaused: 3,
|
||||
OomKillDisable: false,
|
||||
SystemTime: "2016-02-24T00:55:09.15073105-05:00",
|
||||
NEventsListener: 0,
|
||||
ID: "5WQQ:TFWR:FDNG:OKQ3:37Y4:FJWG:QIKK:623T:R3ME:QTKB:A7F7:OLHD",
|
||||
Debug: false,
|
||||
LoggingDriver: "json-file",
|
||||
KernelVersion: "4.3.0-1-amd64",
|
||||
IndexServerAddress: "https://index.docker.io/v1/",
|
||||
MemTotal: 3840757760,
|
||||
Images: 199,
|
||||
CPUCfsQuota: true,
|
||||
Name: "absol",
|
||||
SwapLimit: false,
|
||||
IPv4Forwarding: true,
|
||||
ExperimentalBuild: false,
|
||||
CPUCfsPeriod: true,
|
||||
RegistryConfig: ®istry.ServiceConfig{
|
||||
IndexConfigs: map[string]*registry.IndexInfo{
|
||||
"docker.io": {
|
||||
Name: "docker.io",
|
||||
Mirrors: []string{},
|
||||
Official: true,
|
||||
Secure: true,
|
||||
},
|
||||
}, InsecureRegistryCIDRs: []*registry.NetIPNet{{IP: []byte{127, 0, 0, 0}, Mask: []byte{255, 0, 0, 0}}}, Mirrors: []string{}},
|
||||
OperatingSystem: "Linux Mint LMDE (containerized)",
|
||||
BridgeNfIptables: true,
|
||||
HTTPSProxy: "",
|
||||
Labels: []string{},
|
||||
MemoryLimit: false,
|
||||
DriverStatus: [][2]string{{"Pool Name", "docker-8:1-1182287-pool"}, {"Pool Blocksize", "65.54 kB"}, {"Backing Filesystem", "extfs"}, {"Data file", "/dev/loop0"}, {"Metadata file", "/dev/loop1"}, {"Data Space Used", "17.3 GB"}, {"Data Space Total", "107.4 GB"}, {"Data Space Available", "36.53 GB"}, {"Metadata Space Used", "20.97 MB"}, {"Metadata Space Total", "2.147 GB"}, {"Metadata Space Available", "2.127 GB"}, {"Udev Sync Supported", "true"}, {"Deferred Removal Enabled", "false"}, {"Data loop file", "/var/lib/docker/devicemapper/devicemapper/data"}, {"Metadata loop file", "/var/lib/docker/devicemapper/devicemapper/metadata"}, {"Library Version", "1.02.115 (2016-01-25)"}},
|
||||
NFd: 19,
|
||||
HTTPProxy: "",
|
||||
Driver: "devicemapper",
|
||||
NGoroutines: 39,
|
||||
NCPU: 4,
|
||||
DockerRootDir: "/var/lib/docker",
|
||||
NoProxy: "",
|
||||
BridgeNfIP6tables: true,
|
||||
}
|
||||
return env, nil
|
||||
}
|
||||
|
||||
func (d FakeDockerClient) ContainerList(octx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
|
||||
container1 := types.Container{
|
||||
ID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb",
|
||||
Names: []string{"/etcd"},
|
||||
Image: "quay.io/coreos/etcd:v2.2.2",
|
||||
Command: "/etcd -name etcd0 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
|
||||
Created: 1455941930,
|
||||
Status: "Up 4 hours",
|
||||
Ports: []types.Port{
|
||||
types.Port{
|
||||
PrivatePort: 7001,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 4001,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2380,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2379,
|
||||
PublicPort: 2379,
|
||||
Type: "tcp",
|
||||
IP: "0.0.0.0",
|
||||
},
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"label1": "test_value_1",
|
||||
"label2": "test_value_2",
|
||||
},
|
||||
SizeRw: 0,
|
||||
SizeRootFs: 0,
|
||||
}
|
||||
container2 := types.Container{
|
||||
ID: "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
||||
Names: []string{"/etcd2"},
|
||||
Image: "quay.io:4443/coreos/etcd:v2.2.2",
|
||||
Command: "/etcd -name etcd2 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
|
||||
Created: 1455941933,
|
||||
Status: "Up 4 hours",
|
||||
Ports: []types.Port{
|
||||
types.Port{
|
||||
PrivatePort: 7002,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 4002,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2381,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2382,
|
||||
PublicPort: 2382,
|
||||
Type: "tcp",
|
||||
IP: "0.0.0.0",
|
||||
},
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"label1": "test_value_1",
|
||||
"label2": "test_value_2",
|
||||
},
|
||||
SizeRw: 0,
|
||||
SizeRootFs: 0,
|
||||
}
|
||||
|
||||
containers := []types.Container{container1, container2}
|
||||
return containers, nil
|
||||
|
||||
//#{e6a96c84ca91a5258b7cb752579fb68826b68b49ff957487695cd4d13c343b44 titilambert/snmpsim /bin/sh -c 'snmpsimd --agent-udpv4-endpoint=0.0.0.0:31161 --process-user=root --process-group=user' 1455724831 Up 4 hours [{31161 31161 udp 0.0.0.0}] 0 0 [/snmp] map[]}]2016/02/24 01:05:01 Gathered metrics, (3s interval), from 1 inputs in 1.233836656s
|
||||
}
|
||||
|
||||
func (d FakeDockerClient) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) {
|
||||
var stat types.ContainerStats
|
||||
jsonStat := `{"read":"2016-02-24T11:42:27.472459608-05:00","memory_stats":{"stats":{},"limit":18935443456},"blkio_stats":{"io_service_bytes_recursive":[{"major":252,"minor":1,"op":"Read","value":753664},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":753664},{"major":252,"minor":1,"op":"Total","value":753664}],"io_serviced_recursive":[{"major":252,"minor":1,"op":"Read","value":26},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":26},{"major":252,"minor":1,"op":"Total","value":26}]},"cpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052607520000000,"throttling_data":{}},"precpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052599550000000,"throttling_data":{}}}`
|
||||
stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat))
|
||||
return stat, nil
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
// Helper functions copied from
|
||||
// https://github.com/docker/cli/blob/master/cli/command/container/stats_helpers.go
|
||||
package docker
|
||||
|
||||
import "github.com/docker/docker/api/types"
|
||||
|
||||
func calculateCPUPercentUnix(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 {
|
||||
var (
|
||||
cpuPercent = 0.0
|
||||
// calculate the change for the cpu usage of the container in between readings
|
||||
cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU)
|
||||
// calculate the change for the entire system between readings
|
||||
systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem)
|
||||
onlineCPUs = float64(v.CPUStats.OnlineCPUs)
|
||||
)
|
||||
|
||||
if onlineCPUs == 0.0 {
|
||||
onlineCPUs = float64(len(v.CPUStats.CPUUsage.PercpuUsage))
|
||||
}
|
||||
if systemDelta > 0.0 && cpuDelta > 0.0 {
|
||||
cpuPercent = (cpuDelta / systemDelta) * onlineCPUs * 100.0
|
||||
}
|
||||
return cpuPercent
|
||||
}
|
||||
|
||||
func calculateCPUPercentWindows(v *types.StatsJSON) float64 {
|
||||
// Max number of 100ns intervals between the previous time read and now
|
||||
possIntervals := uint64(v.Read.Sub(v.PreRead).Nanoseconds()) // Start with number of ns intervals
|
||||
possIntervals /= 100 // Convert to number of 100ns intervals
|
||||
possIntervals *= uint64(v.NumProcs) // Multiple by the number of processors
|
||||
|
||||
// Intervals used
|
||||
intervalsUsed := v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage
|
||||
|
||||
// Percentage avoiding divide-by-zero
|
||||
if possIntervals > 0 {
|
||||
return float64(intervalsUsed) / float64(possIntervals) * 100.0
|
||||
}
|
||||
return 0.00
|
||||
}
|
||||
|
||||
// calculateMemUsageUnixNoCache calculate memory usage of the container.
|
||||
// Page cache is intentionally excluded to avoid misinterpretation of the output.
|
||||
func calculateMemUsageUnixNoCache(mem types.MemoryStats) float64 {
|
||||
return float64(mem.Usage - mem.Stats["cache"])
|
||||
}
|
||||
|
||||
func calculateMemPercentUnixNoCache(limit float64, usedNoCache float64) float64 {
|
||||
// MemoryStats.Limit will never be 0 unless the container is not running and we haven't
|
||||
// got any data from cgroup
|
||||
if limit != 0 {
|
||||
return usedNoCache / limit * 100.0
|
||||
}
|
||||
return 0
|
||||
}
|
||||
@@ -62,7 +62,7 @@ domains. You can read Dovecot's documentation
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
telegraf --config t.cfg --input-filter dovecot --test
|
||||
telegraf -config t.cfg -input-filter dovecot -test
|
||||
* Plugin: dovecot, Collection 1
|
||||
> dovecot,ip=192.168.0.1,server=dovecot-1.domain.test,type=ip clock_time=0,disk_input=0i,disk_output=0i,invol_cs=0i,last_update="2016-04-08 10:59:47.000208479 +0200 CEST",mail_cache_hits=0i,mail_lookup_attr=0i,mail_lookup_path=0i,mail_read_bytes=0i,mail_read_count=0i,maj_faults=0i,min_faults=0i,num_cmds=12i,num_connected_sessions=0i,num_logins=6i,read_bytes=0i,read_count=0i,reset_timestamp="2016-04-08 10:33:34 +0200 CEST",sys_cpu=0,user_cpu=0,vol_cs=0i,write_bytes=0i,write_count=0i 1460106251633824223
|
||||
* Plugin: dovecot, Collection 1
|
||||
@@ -71,4 +71,4 @@ telegraf --config t.cfg --input-filter dovecot --test
|
||||
> dovecot,domain=domain.test,server=dovecot-1.domain.test,type=domain clock_time=100896189179847.7,disk_input=6467588263936i,disk_output=17933680439296i,invol_cs=1194808498i,last_update="2016-04-08 11:04:08.000377367 +0200 CEST",mail_cache_hits=46455781i,mail_lookup_attr=0i,mail_lookup_path=571490i,mail_read_bytes=79287033067i,mail_read_count=491243i,maj_faults=16992i,min_faults=1278442541i,num_cmds=606005i,num_connected_sessions=6597i,num_logins=166381i,read_bytes=30231409780721i,read_count=1624912080i,reset_timestamp="2016-04-08 10:28:45 +0200 CEST",sys_cpu=156440.372,user_cpu=216676.476,vol_cs=2749291157i,write_bytes=17097106707594i,write_count=944448998i 1460106261639672622
|
||||
* Plugin: dovecot, Collection 1
|
||||
> dovecot,server=dovecot-1.domain.test,type=global clock_time=101196971074203.94,disk_input=6493168218112i,disk_output=17978638815232i,invol_cs=1198855447i,last_update="2016-04-08 11:04:13.000379245 +0200 CEST",mail_cache_hits=68192209i,mail_lookup_attr=0i,mail_lookup_path=653861i,mail_read_bytes=86705151847i,mail_read_count=566125i,maj_faults=17208i,min_faults=1286179702i,num_cmds=917469i,num_connected_sessions=8896i,num_logins=174827i,read_bytes=30327690466186i,read_count=1772396430i,reset_timestamp="2016-04-08 10:28:45 +0200 CEST",sys_cpu=157965.692,user_cpu=219337.48,vol_cs=2827615787i,write_bytes=17150837661940i,write_count=992653220i 1460106266642153907
|
||||
```
|
||||
```
|
||||
@@ -169,10 +169,7 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
||||
if e.ClusterStats {
|
||||
// get cat/master information here so NodeStats can determine
|
||||
// whether this node is the Master
|
||||
if err := e.setCatMaster(s + "/_cat/master"); err != nil {
|
||||
acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
|
||||
return
|
||||
}
|
||||
e.setCatMaster(s + "/_cat/master")
|
||||
}
|
||||
|
||||
// Always gather node states
|
||||
@@ -356,7 +353,7 @@ func (e *Elasticsearch) setCatMaster(url string) error {
|
||||
// NOTE: we are not going to read/discard r.Body under the assumption we'd prefer
|
||||
// to let the underlying transport close the connection and re-establish a new one for
|
||||
// future calls.
|
||||
return fmt.Errorf("elasticsearch: Unable to retrieve master node information. API responded with status-code %d, expected %d", r.StatusCode, http.StatusOK)
|
||||
return fmt.Errorf("status-code %d, expected %d", r.StatusCode, http.StatusOK)
|
||||
}
|
||||
response, err := ioutil.ReadAll(r.Body)
|
||||
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
# Fail2ban Input Plugin
|
||||
|
||||
The fail2ban plugin gathers the count of failed and banned ip addresses using [fail2ban](https://www.fail2ban.org).
|
||||
|
||||
This plugin runs the `fail2ban-client` command which generally requires root access.
|
||||
Acquiring the required permissions can be done using several methods:
|
||||
|
||||
- Use sudo run fail2ban-client.
|
||||
- Run telegraf as root. (not recommended)
|
||||
|
||||
### Using sudo
|
||||
|
||||
You may edit your sudo configuration with the following:
|
||||
|
||||
``` sudo
|
||||
telegraf ALL=(root) NOEXEC: NOPASSWD: /usr/bin/fail2ban-client status, /usr/bin/fail2ban-client status *
|
||||
```
|
||||
|
||||
### Configuration:
|
||||
|
||||
``` toml
|
||||
# Read metrics from fail2ban.
|
||||
[[inputs.fail2ban]]
|
||||
## Use sudo to run fail2ban-client
|
||||
use_sudo = false
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- fail2ban
|
||||
- failed (integer, count)
|
||||
- banned (integer, count)
|
||||
|
||||
### Tags:
|
||||
|
||||
- All measurements have the following tags:
|
||||
- jail
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
# fail2ban-client status sshd
|
||||
Status for the jail: sshd
|
||||
|- Filter
|
||||
| |- Currently failed: 5
|
||||
| |- Total failed: 20
|
||||
| `- File list: /var/log/secure
|
||||
`- Actions
|
||||
|- Currently banned: 2
|
||||
|- Total banned: 10
|
||||
`- Banned IP list: 192.168.0.1 192.168.0.2
|
||||
```
|
||||
|
||||
```
|
||||
fail2ban,jail=sshd failed=5i,banned=2i 1495868667000000000
|
||||
```
|
||||
@@ -1,131 +0,0 @@
|
||||
package fail2ban
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"strconv"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
var (
|
||||
execCommand = exec.Command // execCommand is used to mock commands in tests.
|
||||
)
|
||||
|
||||
type Fail2ban struct {
|
||||
path string
|
||||
UseSudo bool
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## Use sudo to run fail2ban-client
|
||||
use_sudo = false
|
||||
`
|
||||
|
||||
var metricsTargets = []struct {
|
||||
target string
|
||||
field string
|
||||
}{
|
||||
{
|
||||
target: "Currently failed:",
|
||||
field: "failed",
|
||||
},
|
||||
{
|
||||
target: "Currently banned:",
|
||||
field: "banned",
|
||||
},
|
||||
}
|
||||
|
||||
func (f *Fail2ban) Description() string {
|
||||
return "Read metrics from fail2ban."
|
||||
}
|
||||
|
||||
func (f *Fail2ban) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (f *Fail2ban) Gather(acc telegraf.Accumulator) error {
|
||||
if len(f.path) == 0 {
|
||||
return errors.New("fail2ban-client not found: verify that fail2ban is installed and that fail2ban-client is in your PATH")
|
||||
}
|
||||
|
||||
name := f.path
|
||||
var arg []string
|
||||
|
||||
if f.UseSudo {
|
||||
name = "sudo"
|
||||
arg = append(arg, f.path)
|
||||
}
|
||||
|
||||
args := append(arg, "status")
|
||||
|
||||
cmd := execCommand(name, args...)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out))
|
||||
}
|
||||
lines := strings.Split(string(out), "\n")
|
||||
const targetString = "Jail list:"
|
||||
var jails []string
|
||||
for _, line := range lines {
|
||||
idx := strings.LastIndex(line, targetString)
|
||||
if idx < 0 {
|
||||
// not target line, skip.
|
||||
continue
|
||||
}
|
||||
jails = strings.Split(strings.TrimSpace(line[idx+len(targetString):]), ", ")
|
||||
break
|
||||
}
|
||||
|
||||
for _, jail := range jails {
|
||||
fields := make(map[string]interface{})
|
||||
args := append(arg, "status", jail)
|
||||
cmd := execCommand(name, args...)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out))
|
||||
}
|
||||
|
||||
lines := strings.Split(string(out), "\n")
|
||||
for _, line := range lines {
|
||||
key, value := extractCount(line)
|
||||
if key != "" {
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
acc.AddFields("fail2ban", fields, map[string]string{"jail": jail})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func extractCount(line string) (string, int) {
|
||||
for _, metricsTarget := range metricsTargets {
|
||||
idx := strings.LastIndex(line, metricsTarget.target)
|
||||
if idx < 0 {
|
||||
continue
|
||||
}
|
||||
ban := strings.TrimSpace(line[idx+len(metricsTarget.target):])
|
||||
banCount, err := strconv.Atoi(ban)
|
||||
if err != nil {
|
||||
return "", -1
|
||||
}
|
||||
return metricsTarget.field, banCount
|
||||
}
|
||||
return "", -1
|
||||
}
|
||||
|
||||
func init() {
|
||||
f := Fail2ban{}
|
||||
path, _ := exec.LookPath("fail2ban-client")
|
||||
if len(path) > 0 {
|
||||
f.path = path
|
||||
}
|
||||
inputs.Add("fail2ban", func() telegraf.Input {
|
||||
f := f
|
||||
return &f
|
||||
})
|
||||
}
|
||||
@@ -1,125 +0,0 @@
|
||||
package fail2ban
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
// By all rights, we should use `string literal`, but the string contains "`".
|
||||
var execStatusOutput = "Status\n" +
|
||||
"|- Number of jail:\t3\n" +
|
||||
"`- Jail list:\tdovecot, postfix, sshd"
|
||||
var execStatusDovecotOutput = "Status for the jail: dovecot\n" +
|
||||
"|- Filter\n" +
|
||||
"| |- Currently failed:\t11\n" +
|
||||
"| |- Total failed:\t22\n" +
|
||||
"| `- File list:\t/var/log/maillog\n" +
|
||||
"`- Actions\n" +
|
||||
" |- Currently banned:\t0\n" +
|
||||
" |- Total banned:\t100\n" +
|
||||
" `- Banned IP list:"
|
||||
var execStatusPostfixOutput = "Status for the jail: postfix\n" +
|
||||
"|- Filter\n" +
|
||||
"| |- Currently failed:\t4\n" +
|
||||
"| |- Total failed:\t10\n" +
|
||||
"| `- File list:\t/var/log/maillog\n" +
|
||||
"`- Actions\n" +
|
||||
" |- Currently banned:\t3\n" +
|
||||
" |- Total banned:\t60\n" +
|
||||
" `- Banned IP list:\t192.168.10.1 192.168.10.3"
|
||||
var execStatusSshdOutput = "Status for the jail: sshd\n" +
|
||||
"|- Filter\n" +
|
||||
"| |- Currently failed:\t0\n" +
|
||||
"| |- Total failed:\t5\n" +
|
||||
"| `- File list:\t/var/log/secure\n" +
|
||||
"`- Actions\n" +
|
||||
" |- Currently banned:\t2\n" +
|
||||
" |- Total banned:\t50\n" +
|
||||
" `- Banned IP list:\t192.168.0.1 192.168.1.1"
|
||||
|
||||
func TestGather(t *testing.T) {
|
||||
f := Fail2ban{
|
||||
path: "/usr/bin/fail2ban-client",
|
||||
}
|
||||
|
||||
execCommand = fakeExecCommand
|
||||
defer func() { execCommand = exec.Command }()
|
||||
var acc testutil.Accumulator
|
||||
err := f.Gather(&acc)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fields1 := map[string]interface{}{
|
||||
"banned": 2,
|
||||
"failed": 0,
|
||||
}
|
||||
tags1 := map[string]string{
|
||||
"jail": "sshd",
|
||||
}
|
||||
|
||||
fields2 := map[string]interface{}{
|
||||
"banned": 3,
|
||||
"failed": 4,
|
||||
}
|
||||
tags2 := map[string]string{
|
||||
"jail": "postfix",
|
||||
}
|
||||
|
||||
fields3 := map[string]interface{}{
|
||||
"banned": 0,
|
||||
"failed": 11,
|
||||
}
|
||||
tags3 := map[string]string{
|
||||
"jail": "dovecot",
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "fail2ban", fields1, tags1)
|
||||
acc.AssertContainsTaggedFields(t, "fail2ban", fields2, tags2)
|
||||
acc.AssertContainsTaggedFields(t, "fail2ban", fields3, tags3)
|
||||
}
|
||||
|
||||
func fakeExecCommand(command string, args ...string) *exec.Cmd {
|
||||
cs := []string{"-test.run=TestHelperProcess", "--", command}
|
||||
cs = append(cs, args...)
|
||||
cmd := exec.Command(os.Args[0], cs...)
|
||||
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func TestHelperProcess(t *testing.T) {
|
||||
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
|
||||
return
|
||||
}
|
||||
|
||||
args := os.Args
|
||||
cmd, args := args[3], args[4:]
|
||||
|
||||
if !strings.HasSuffix(cmd, "fail2ban-client") {
|
||||
fmt.Fprint(os.Stdout, "command not found")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if len(args) == 1 && args[0] == "status" {
|
||||
fmt.Fprint(os.Stdout, execStatusOutput)
|
||||
os.Exit(0)
|
||||
} else if len(args) == 2 && args[0] == "status" {
|
||||
if args[1] == "sshd" {
|
||||
fmt.Fprint(os.Stdout, execStatusSshdOutput)
|
||||
os.Exit(0)
|
||||
} else if args[1] == "postfix" {
|
||||
fmt.Fprint(os.Stdout, execStatusPostfixOutput)
|
||||
os.Exit(0)
|
||||
} else if args[1] == "dovecot" {
|
||||
fmt.Fprint(os.Stdout, execStatusDovecotOutput)
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
fmt.Fprint(os.Stdout, "invalid argument")
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -30,7 +30,7 @@ The filestat plugin gathers metrics about file existence, size, and other stats.
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ telegraf --config /etc/telegraf/telegraf.conf --input-filter filestat --test
|
||||
$ telegraf -config /etc/telegraf/telegraf.conf -input-filter filestat -test
|
||||
* Plugin: filestat, Collection 1
|
||||
> filestat,file=/tmp/foo/bar,host=tyrion exists=0i 1461203374493128216
|
||||
> filestat,file=/Users/sparrc/ws/telegraf.conf,host=tyrion exists=1i,size=47894i 1461203374493199335
|
||||
|
||||
@@ -1,64 +0,0 @@
|
||||
# Fluentd Input Plugin
|
||||
|
||||
The fluentd plugin gathers metrics from plugin endpoint provided by [in_monitor plugin](http://docs.fluentd.org/v0.12/articles/monitoring).
|
||||
This plugin understands data provided by /api/plugin.json resource (/api/config.json is not covered).
|
||||
|
||||
You might need to adjust your fluentd configuration, in order to reduce series cardinality in case whene your fluentd restarts frequently. Every time when fluentd starts, `plugin_id` value is given a new random value.
|
||||
According to [fluentd documentation](http://docs.fluentd.org/v0.12/articles/config-file), you are able to add `@id` parameter for each plugin to avoid this behaviour and define custom `plugin_id`.
|
||||
|
||||
example configuratio with `@id` parameter for http plugin:
|
||||
```
|
||||
<source>
|
||||
@type http
|
||||
@id http
|
||||
port 8888
|
||||
</source>
|
||||
```
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Read metrics exposed by fluentd in_monitor plugin
|
||||
[[inputs.fluentd]]
|
||||
## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
|
||||
##
|
||||
## Endpoint:
|
||||
## - only one URI is allowed
|
||||
## - https is not supported
|
||||
endpoint = "http://localhost:24220/api/plugins.json"
|
||||
|
||||
## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
|
||||
exclude = [
|
||||
"monitor_agent",
|
||||
"dummy",
|
||||
]
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
Fields may vary depends on type of the plugin
|
||||
|
||||
- fluentd
|
||||
- retry_count (float, unit)
|
||||
- buffer_queue_length (float, unit)
|
||||
- buffer_total_queued_size (float, unit)
|
||||
|
||||
### Tags:
|
||||
|
||||
- All measurements have the following tags:
|
||||
- plugin_id (unique plugin id)
|
||||
- plugin_type (type of the plugin e.g. s3)
|
||||
- plugin_category (plugin category e.g. output)
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ telegraf --config fluentd.conf --input-filter fluentd --test
|
||||
* Plugin: inputs.fluentd, Collection 1
|
||||
> fluentd,host=T440s,plugin_id=object:9f748c,plugin_category=input,plugin_type=dummy buffer_total_queued_size=0,buffer_queue_length=0,retry_count=0 1492006105000000000
|
||||
> fluentd,plugin_category=input,plugin_type=dummy,host=T440s,plugin_id=object:8da98c buffer_queue_length=0,retry_count=0,buffer_total_queued_size=0 1492006105000000000
|
||||
> fluentd,plugin_id=object:820190,plugin_category=input,plugin_type=monitor_agent,host=T440s retry_count=0,buffer_total_queued_size=0,buffer_queue_length=0 1492006105000000000
|
||||
> fluentd,plugin_id=object:c5e054,plugin_category=output,plugin_type=stdout,host=T440s buffer_queue_length=0,retry_count=0,buffer_total_queued_size=0 1492006105000000000
|
||||
> fluentd,plugin_type=s3,host=T440s,plugin_id=object:bd7a90,plugin_category=output buffer_queue_length=0,retry_count=0,buffer_total_queued_size=0 1492006105000000000
|
||||
|
||||
```
|
||||
@@ -1,173 +0,0 @@
|
||||
package fluentd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
const (
|
||||
measurement = "fluentd"
|
||||
description = "Read metrics exposed by fluentd in_monitor plugin"
|
||||
sampleConfig = `
|
||||
## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
|
||||
##
|
||||
## Endpoint:
|
||||
## - only one URI is allowed
|
||||
## - https is not supported
|
||||
endpoint = "http://localhost:24220/api/plugins.json"
|
||||
|
||||
## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
|
||||
exclude = [
|
||||
"monitor_agent",
|
||||
"dummy",
|
||||
]
|
||||
`
|
||||
)
|
||||
|
||||
// Fluentd - plugin main structure
|
||||
type Fluentd struct {
|
||||
Endpoint string
|
||||
Exclude []string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
type endpointInfo struct {
|
||||
Payload []pluginData `json:"plugins"`
|
||||
}
|
||||
|
||||
type pluginData struct {
|
||||
PluginID string `json:"plugin_id"`
|
||||
PluginType string `json:"type"`
|
||||
PluginCategory string `json:"plugin_category"`
|
||||
RetryCount *float64 `json:"retry_count"`
|
||||
BufferQueueLength *float64 `json:"buffer_queue_length"`
|
||||
BufferTotalQueuedSize *float64 `json:"buffer_total_queued_size"`
|
||||
}
|
||||
|
||||
// parse JSON from fluentd Endpoint
|
||||
// Parameters:
|
||||
// data: unprocessed json recivied from endpoint
|
||||
//
|
||||
// Returns:
|
||||
// pluginData: slice that contains parsed plugins
|
||||
// error: error that may have occurred
|
||||
func parse(data []byte) (datapointArray []pluginData, err error) {
|
||||
var endpointData endpointInfo
|
||||
|
||||
if err = json.Unmarshal(data, &endpointData); err != nil {
|
||||
err = fmt.Errorf("Processing JSON structure")
|
||||
return
|
||||
}
|
||||
|
||||
for _, point := range endpointData.Payload {
|
||||
datapointArray = append(datapointArray, point)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Description - display description
|
||||
func (h *Fluentd) Description() string { return description }
|
||||
|
||||
// SampleConfig - generate configuretion
|
||||
func (h *Fluentd) SampleConfig() string { return sampleConfig }
|
||||
|
||||
// Gather - Main code responsible for gathering, processing and creating metrics
|
||||
func (h *Fluentd) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
_, err := url.Parse(h.Endpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid URL \"%s\"", h.Endpoint)
|
||||
}
|
||||
|
||||
if h.client == nil {
|
||||
|
||||
tr := &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
|
||||
h.client = client
|
||||
}
|
||||
|
||||
resp, err := h.client.Get(h.Endpoint)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to perform HTTP client GET on \"%s\": %s", h.Endpoint, err)
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to read the HTTP body \"%s\": %s", string(body), err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("http status ok not met")
|
||||
}
|
||||
|
||||
dataPoints, err := parse(body)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Problem with parsing")
|
||||
}
|
||||
|
||||
// Go through all plugins one by one
|
||||
for _, p := range dataPoints {
|
||||
|
||||
skip := false
|
||||
|
||||
// Check if this specific type was excluded in configuration
|
||||
for _, exclude := range h.Exclude {
|
||||
if exclude == p.PluginType {
|
||||
skip = true
|
||||
}
|
||||
}
|
||||
|
||||
// If not, create new metric and add it to Accumulator
|
||||
if !skip {
|
||||
tmpFields := make(map[string]interface{})
|
||||
|
||||
tmpTags := map[string]string{
|
||||
"plugin_id": p.PluginID,
|
||||
"plugin_category": p.PluginCategory,
|
||||
"plugin_type": p.PluginType,
|
||||
}
|
||||
|
||||
if p.BufferQueueLength != nil {
|
||||
tmpFields["buffer_queue_length"] = p.BufferQueueLength
|
||||
|
||||
}
|
||||
if p.RetryCount != nil {
|
||||
tmpFields["retry_count"] = p.RetryCount
|
||||
}
|
||||
|
||||
if p.BufferTotalQueuedSize != nil {
|
||||
tmpFields["buffer_total_queued_size"] = p.BufferTotalQueuedSize
|
||||
}
|
||||
|
||||
if !((p.BufferQueueLength == nil) && (p.RetryCount == nil) && (p.BufferTotalQueuedSize == nil)) {
|
||||
acc.AddFields(measurement, tmpFields, tmpTags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("fluentd", func() telegraf.Input { return &Fluentd{} })
|
||||
}
|
||||
@@ -1,169 +0,0 @@
|
||||
package fluentd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// sampleJSON from fluentd version '0.14.9'
|
||||
const sampleJSON = `
|
||||
{
|
||||
"plugins": [
|
||||
{
|
||||
"plugin_id": "object:f48698",
|
||||
"plugin_category": "input",
|
||||
"type": "dummy",
|
||||
"config": {
|
||||
"@type": "dummy",
|
||||
"@log_level": "info",
|
||||
"tag": "stdout.page.node",
|
||||
"rate": "",
|
||||
"dummy": "{\"hello\":\"world_from_first_dummy\"}",
|
||||
"auto_increment_key": "id1"
|
||||
},
|
||||
"output_plugin": false,
|
||||
"retry_count": null
|
||||
},
|
||||
{
|
||||
"plugin_id": "object:e27138",
|
||||
"plugin_category": "input",
|
||||
"type": "dummy",
|
||||
"config": {
|
||||
"@type": "dummy",
|
||||
"@log_level": "info",
|
||||
"tag": "stdout.superproject.supercontainer",
|
||||
"rate": "",
|
||||
"dummy": "{\"hello\":\"world_from_second_dummy\"}",
|
||||
"auto_increment_key": "id1"
|
||||
},
|
||||
"output_plugin": false,
|
||||
"retry_count": null
|
||||
},
|
||||
{
|
||||
"plugin_id": "object:d74060",
|
||||
"plugin_category": "input",
|
||||
"type": "monitor_agent",
|
||||
"config": {
|
||||
"@type": "monitor_agent",
|
||||
"@log_level": "error",
|
||||
"bind": "0.0.0.0",
|
||||
"port": "24220"
|
||||
},
|
||||
"output_plugin": false,
|
||||
"retry_count": null
|
||||
},
|
||||
{
|
||||
"plugin_id": "object:11a5e2c",
|
||||
"plugin_category": "output",
|
||||
"type": "stdout",
|
||||
"config": {
|
||||
"@type": "stdout"
|
||||
},
|
||||
"output_plugin": true,
|
||||
"retry_count": 0
|
||||
},
|
||||
{
|
||||
"plugin_id": "object:11237ec",
|
||||
"plugin_category": "output",
|
||||
"type": "s3",
|
||||
"config": {
|
||||
"@type": "s3",
|
||||
"@log_level": "info",
|
||||
"aws_key_id": "xxxxxx",
|
||||
"aws_sec_key": "xxxxxx",
|
||||
"s3_bucket": "bucket",
|
||||
"s3_endpoint": "http://mock:4567",
|
||||
"path": "logs/%Y%m%d_%H/${tag[1]}/",
|
||||
"time_slice_format": "%M",
|
||||
"s3_object_key_format": "%{path}%{time_slice}_%{hostname}_%{index}_%{hex_random}.%{file_extension}",
|
||||
"store_as": "gzip"
|
||||
},
|
||||
"output_plugin": true,
|
||||
"buffer_queue_length": 0,
|
||||
"buffer_total_queued_size": 0,
|
||||
"retry_count": 0
|
||||
}
|
||||
]
|
||||
}
|
||||
`
|
||||
|
||||
var (
|
||||
zero float64
|
||||
err error
|
||||
pluginOutput []pluginData
|
||||
expectedOutput = []pluginData{
|
||||
// {"object:f48698", "dummy", "input", nil, nil, nil},
|
||||
// {"object:e27138", "dummy", "input", nil, nil, nil},
|
||||
// {"object:d74060", "monitor_agent", "input", nil, nil, nil},
|
||||
{"object:11a5e2c", "stdout", "output", (*float64)(&zero), nil, nil},
|
||||
{"object:11237ec", "s3", "output", (*float64)(&zero), (*float64)(&zero), (*float64)(&zero)},
|
||||
}
|
||||
fluentdTest = &Fluentd{
|
||||
Endpoint: "http://localhost:8081",
|
||||
}
|
||||
)
|
||||
|
||||
func Test_parse(t *testing.T) {
|
||||
|
||||
t.Log("Testing parser function")
|
||||
_, err := parse([]byte(sampleJSON))
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func Test_Gather(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping Gather function test")
|
||||
}
|
||||
|
||||
t.Log("Testing Gather function")
|
||||
|
||||
t.Logf("Start HTTP mock (%s) with sampleJSON", fluentdTest.Endpoint)
|
||||
|
||||
ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
fmt.Fprintf(w, "%s", string(sampleJSON))
|
||||
}))
|
||||
|
||||
requestURL, err := url.Parse(fluentdTest.Endpoint)
|
||||
|
||||
ts.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port()))
|
||||
|
||||
ts.Start()
|
||||
|
||||
defer ts.Close()
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err = fluentdTest.Gather(&acc)
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !acc.HasMeasurement("fluentd") {
|
||||
t.Errorf("acc.HasMeasurement: expected fluentd")
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedOutput[0].PluginID, acc.Metrics[0].Tags["plugin_id"])
|
||||
assert.Equal(t, expectedOutput[0].PluginType, acc.Metrics[0].Tags["plugin_type"])
|
||||
assert.Equal(t, expectedOutput[0].PluginCategory, acc.Metrics[0].Tags["plugin_category"])
|
||||
assert.Equal(t, expectedOutput[0].RetryCount, acc.Metrics[0].Fields["retry_count"])
|
||||
|
||||
assert.Equal(t, expectedOutput[1].PluginID, acc.Metrics[1].Tags["plugin_id"])
|
||||
assert.Equal(t, expectedOutput[1].PluginType, acc.Metrics[1].Tags["plugin_type"])
|
||||
assert.Equal(t, expectedOutput[1].PluginCategory, acc.Metrics[1].Tags["plugin_category"])
|
||||
assert.Equal(t, expectedOutput[1].RetryCount, acc.Metrics[1].Fields["retry_count"])
|
||||
assert.Equal(t, expectedOutput[1].BufferQueueLength, acc.Metrics[1].Fields["buffer_queue_length"])
|
||||
assert.Equal(t, expectedOutput[1].BufferTotalQueuedSize, acc.Metrics[1].Fields["buffer_total_queued_size"])
|
||||
|
||||
}
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build linux
|
||||
|
||||
package hddtemp
|
||||
|
||||
import (
|
||||
|
||||
3
plugins/inputs/hddtemp/hddtemp_nocompile.go
Normal file
3
plugins/inputs/hddtemp/hddtemp_nocompile.go
Normal file
@@ -0,0 +1,3 @@
|
||||
// +build !linux
|
||||
|
||||
package hddtemp
|
||||
@@ -8,26 +8,25 @@ This input plugin will test HTTP/HTTPS connections.
|
||||
# HTTP/HTTPS request given an address a method and a timeout
|
||||
[[inputs.http_response]]
|
||||
## Server address (default http://localhost)
|
||||
# address = "http://localhost"
|
||||
|
||||
address = "http://github.com"
|
||||
## Set response_timeout (default 5 seconds)
|
||||
# response_timeout = "5s"
|
||||
|
||||
response_timeout = "5s"
|
||||
## HTTP Request Method
|
||||
# method = "GET"
|
||||
|
||||
method = "GET"
|
||||
## Whether to follow redirects from the server (defaults to false)
|
||||
# follow_redirects = false
|
||||
|
||||
follow_redirects = true
|
||||
## HTTP Request Headers (all values must be strings)
|
||||
# [inputs.http_response.headers]
|
||||
# Host = "github.com"
|
||||
## Optional HTTP Request Body
|
||||
# body = '''
|
||||
# {'fake':'data'}
|
||||
# '''
|
||||
|
||||
## Optional substring or regex match in body of the response
|
||||
# response_string_match = "\"service_status\": \"up\""
|
||||
# response_string_match = "ok"
|
||||
# response_string_match = "\".*_status\".?:.?\"up\""
|
||||
## response_string_match = "\"service_status\": \"up\""
|
||||
## response_string_match = "ok"
|
||||
## response_string_match = "\".*_status\".?:.?\"up\""
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
@@ -35,10 +34,6 @@ This input plugin will test HTTP/HTTPS connections.
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## HTTP Request Headers (all values must be strings)
|
||||
# [inputs.http_response.headers]
|
||||
# Host = "github.com"
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
@@ -46,7 +41,6 @@ This input plugin will test HTTP/HTTPS connections.
|
||||
- http_response
|
||||
- response_time (float, seconds)
|
||||
- http_response_code (int) #The code received
|
||||
- result_type (string) # success, timeout, response_string_mismatch, connection_failed
|
||||
|
||||
### Tags:
|
||||
|
||||
@@ -57,5 +51,6 @@ This input plugin will test HTTP/HTTPS connections.
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ ./telegraf -config telegraf.conf -input-filter http_response -test
|
||||
http_response,method=GET,server=http://www.github.com http_response_code=200i,response_time=6.223266528 1459419354977857955
|
||||
```
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
@@ -26,6 +25,7 @@ type HTTPResponse struct {
|
||||
Headers map[string]string
|
||||
FollowRedirects bool
|
||||
ResponseStringMatch string
|
||||
compiledStringMatch *regexp.Regexp
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
@@ -35,9 +35,6 @@ type HTTPResponse struct {
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
|
||||
compiledStringMatch *regexp.Regexp
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// Description returns the plugin Description
|
||||
@@ -47,26 +44,25 @@ func (h *HTTPResponse) Description() string {
|
||||
|
||||
var sampleConfig = `
|
||||
## Server address (default http://localhost)
|
||||
# address = "http://localhost"
|
||||
|
||||
address = "http://github.com"
|
||||
## Set response_timeout (default 5 seconds)
|
||||
# response_timeout = "5s"
|
||||
|
||||
response_timeout = "5s"
|
||||
## HTTP Request Method
|
||||
# method = "GET"
|
||||
|
||||
method = "GET"
|
||||
## Whether to follow redirects from the server (defaults to false)
|
||||
# follow_redirects = false
|
||||
|
||||
follow_redirects = true
|
||||
## HTTP Request Headers (all values must be strings)
|
||||
# [inputs.http_response.headers]
|
||||
# Host = "github.com"
|
||||
## Optional HTTP Request Body
|
||||
# body = '''
|
||||
# {'fake':'data'}
|
||||
# '''
|
||||
|
||||
## Optional substring or regex match in body of the response
|
||||
# response_string_match = "\"service_status\": \"up\""
|
||||
# response_string_match = "ok"
|
||||
# response_string_match = "\".*_status\".?:.?\"up\""
|
||||
## response_string_match = "\"service_status\": \"up\""
|
||||
## response_string_match = "ok"
|
||||
## response_string_match = "\".*_status\".?:.?\"up\""
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
@@ -74,10 +70,6 @@ var sampleConfig = `
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## HTTP Request Headers (all values must be strings)
|
||||
# [inputs.http_response.headers]
|
||||
# Host = "github.com"
|
||||
`
|
||||
|
||||
// SampleConfig returns the plugin SampleConfig
|
||||
@@ -96,12 +88,13 @@ func (h *HTTPResponse) createHttpClient() (*http.Client, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tr := &http.Transport{
|
||||
ResponseHeaderTimeout: h.ResponseTimeout.Duration,
|
||||
TLSClientConfig: tlsCfg,
|
||||
}
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
DisableKeepAlives: true,
|
||||
TLSClientConfig: tlsCfg,
|
||||
},
|
||||
Timeout: h.ResponseTimeout.Duration,
|
||||
Transport: tr,
|
||||
Timeout: h.ResponseTimeout.Duration,
|
||||
}
|
||||
|
||||
if h.FollowRedirects == false {
|
||||
@@ -113,10 +106,15 @@ func (h *HTTPResponse) createHttpClient() (*http.Client, error) {
|
||||
}
|
||||
|
||||
// HTTPGather gathers all fields and returns any errors it encounters
|
||||
func (h *HTTPResponse) httpGather() (map[string]interface{}, error) {
|
||||
func (h *HTTPResponse) HTTPGather() (map[string]interface{}, error) {
|
||||
// Prepare fields
|
||||
fields := make(map[string]interface{})
|
||||
|
||||
client, err := h.createHttpClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var body io.Reader
|
||||
if h.Body != "" {
|
||||
body = strings.NewReader(h.Body)
|
||||
@@ -135,29 +133,18 @@ func (h *HTTPResponse) httpGather() (map[string]interface{}, error) {
|
||||
|
||||
// Start Timer
|
||||
start := time.Now()
|
||||
resp, err := h.client.Do(request)
|
||||
|
||||
resp, err := client.Do(request)
|
||||
if err != nil {
|
||||
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
||||
fields["result_type"] = "timeout"
|
||||
return fields, nil
|
||||
}
|
||||
fields["result_type"] = "connection_failed"
|
||||
if h.FollowRedirects {
|
||||
return fields, nil
|
||||
return nil, err
|
||||
}
|
||||
if urlError, ok := err.(*url.Error); ok &&
|
||||
urlError.Err == ErrRedirectAttempted {
|
||||
err = nil
|
||||
} else {
|
||||
return fields, nil
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
defer func() {
|
||||
io.Copy(ioutil.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
}()
|
||||
|
||||
fields["response_time"] = time.Since(start).Seconds()
|
||||
fields["http_response_code"] = resp.StatusCode
|
||||
|
||||
@@ -169,7 +156,7 @@ func (h *HTTPResponse) httpGather() (map[string]interface{}, error) {
|
||||
h.compiledStringMatch = regexp.MustCompile(h.ResponseStringMatch)
|
||||
if err != nil {
|
||||
log.Printf("E! Failed to compile regular expression %s : %s", h.ResponseStringMatch, err)
|
||||
fields["result_type"] = "response_string_mismatch"
|
||||
fields["response_string_match"] = 0
|
||||
return fields, nil
|
||||
}
|
||||
}
|
||||
@@ -177,20 +164,16 @@ func (h *HTTPResponse) httpGather() (map[string]interface{}, error) {
|
||||
bodyBytes, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Printf("E! Failed to read body of HTTP Response : %s", err)
|
||||
fields["result_type"] = "response_string_mismatch"
|
||||
fields["response_string_match"] = 0
|
||||
return fields, nil
|
||||
}
|
||||
|
||||
if h.compiledStringMatch.Match(bodyBytes) {
|
||||
fields["result_type"] = "success"
|
||||
fields["response_string_match"] = 1
|
||||
} else {
|
||||
fields["result_type"] = "response_string_mismatch"
|
||||
fields["response_string_match"] = 0
|
||||
}
|
||||
} else {
|
||||
fields["result_type"] = "success"
|
||||
|
||||
}
|
||||
|
||||
return fields, nil
|
||||
@@ -219,17 +202,8 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
|
||||
// Prepare data
|
||||
tags := map[string]string{"server": h.Address, "method": h.Method}
|
||||
var fields map[string]interface{}
|
||||
|
||||
if h.client == nil {
|
||||
client, err := h.createHttpClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.client = client
|
||||
}
|
||||
|
||||
// Gather data
|
||||
fields, err = h.httpGather()
|
||||
fields, err = h.HTTPGather()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -74,13 +73,13 @@ func TestHeaders(t *testing.T) {
|
||||
"Host": "Hello",
|
||||
},
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
err := h.Gather(&acc)
|
||||
fields, err := h.HTTPGather()
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
assert.NotEmpty(t, fields)
|
||||
if assert.NotNil(t, fields["http_response_code"]) {
|
||||
assert.Equal(t, http.StatusOK, fields["http_response_code"])
|
||||
}
|
||||
assert.NotNil(t, fields["response_time"])
|
||||
}
|
||||
|
||||
func TestFields(t *testing.T) {
|
||||
@@ -98,17 +97,13 @@ func TestFields(t *testing.T) {
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := h.Gather(&acc)
|
||||
fields, err := h.HTTPGather()
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
response_value, ok := acc.StringField("http_response", "result_type")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "success", response_value)
|
||||
assert.NotEmpty(t, fields)
|
||||
if assert.NotNil(t, fields["http_response_code"]) {
|
||||
assert.Equal(t, http.StatusOK, fields["http_response_code"])
|
||||
}
|
||||
assert.NotNil(t, fields["response_time"])
|
||||
}
|
||||
|
||||
func TestRedirects(t *testing.T) {
|
||||
@@ -126,13 +121,12 @@ func TestRedirects(t *testing.T) {
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
err := h.Gather(&acc)
|
||||
fields, err := h.HTTPGather()
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
assert.NotEmpty(t, fields)
|
||||
if assert.NotNil(t, fields["http_response_code"]) {
|
||||
assert.Equal(t, http.StatusOK, fields["http_response_code"])
|
||||
}
|
||||
|
||||
h = &HTTPResponse{
|
||||
Address: ts.URL + "/badredirect",
|
||||
@@ -144,15 +138,8 @@ func TestRedirects(t *testing.T) {
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
acc = testutil.Accumulator{}
|
||||
err = h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok = acc.IntField("http_response", "http_response_code")
|
||||
require.False(t, ok)
|
||||
response_value, ok := acc.StringField("http_response", "result_type")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "connection_failed", response_value)
|
||||
fields, err = h.HTTPGather()
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestMethod(t *testing.T) {
|
||||
@@ -170,13 +157,12 @@ func TestMethod(t *testing.T) {
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
err := h.Gather(&acc)
|
||||
fields, err := h.HTTPGather()
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
assert.NotEmpty(t, fields)
|
||||
if assert.NotNil(t, fields["http_response_code"]) {
|
||||
assert.Equal(t, http.StatusOK, fields["http_response_code"])
|
||||
}
|
||||
|
||||
h = &HTTPResponse{
|
||||
Address: ts.URL + "/mustbepostmethod",
|
||||
@@ -188,13 +174,12 @@ func TestMethod(t *testing.T) {
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
acc = testutil.Accumulator{}
|
||||
err = h.Gather(&acc)
|
||||
fields, err = h.HTTPGather()
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok = acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusMethodNotAllowed, value)
|
||||
assert.NotEmpty(t, fields)
|
||||
if assert.NotNil(t, fields["http_response_code"]) {
|
||||
assert.Equal(t, http.StatusMethodNotAllowed, fields["http_response_code"])
|
||||
}
|
||||
|
||||
//check that lowercase methods work correctly
|
||||
h = &HTTPResponse{
|
||||
@@ -207,13 +192,12 @@ func TestMethod(t *testing.T) {
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
acc = testutil.Accumulator{}
|
||||
err = h.Gather(&acc)
|
||||
fields, err = h.HTTPGather()
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok = acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusMethodNotAllowed, value)
|
||||
assert.NotEmpty(t, fields)
|
||||
if assert.NotNil(t, fields["http_response_code"]) {
|
||||
assert.Equal(t, http.StatusMethodNotAllowed, fields["http_response_code"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestBody(t *testing.T) {
|
||||
@@ -231,13 +215,12 @@ func TestBody(t *testing.T) {
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
err := h.Gather(&acc)
|
||||
fields, err := h.HTTPGather()
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
assert.NotEmpty(t, fields)
|
||||
if assert.NotNil(t, fields["http_response_code"]) {
|
||||
assert.Equal(t, http.StatusOK, fields["http_response_code"])
|
||||
}
|
||||
|
||||
h = &HTTPResponse{
|
||||
Address: ts.URL + "/musthaveabody",
|
||||
@@ -248,13 +231,12 @@ func TestBody(t *testing.T) {
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
acc = testutil.Accumulator{}
|
||||
err = h.Gather(&acc)
|
||||
fields, err = h.HTTPGather()
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok = acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusBadRequest, value)
|
||||
assert.NotEmpty(t, fields)
|
||||
if assert.NotNil(t, fields["http_response_code"]) {
|
||||
assert.Equal(t, http.StatusBadRequest, fields["http_response_code"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringMatch(t *testing.T) {
|
||||
@@ -273,21 +255,15 @@ func TestStringMatch(t *testing.T) {
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
err := h.Gather(&acc)
|
||||
fields, err := h.HTTPGather()
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, fields)
|
||||
if assert.NotNil(t, fields["http_response_code"]) {
|
||||
assert.Equal(t, http.StatusOK, fields["http_response_code"])
|
||||
}
|
||||
assert.Equal(t, 1, fields["response_string_match"])
|
||||
assert.NotNil(t, fields["response_time"])
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
value, ok = acc.IntField("http_response", "response_string_match")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, 1, value)
|
||||
response_value, ok := acc.StringField("http_response", "result_type")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "success", response_value)
|
||||
_, ok = acc.FloatField("http_response", "response_time")
|
||||
require.True(t, ok)
|
||||
}
|
||||
|
||||
func TestStringMatchJson(t *testing.T) {
|
||||
@@ -306,21 +282,15 @@ func TestStringMatchJson(t *testing.T) {
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
err := h.Gather(&acc)
|
||||
fields, err := h.HTTPGather()
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, fields)
|
||||
if assert.NotNil(t, fields["http_response_code"]) {
|
||||
assert.Equal(t, http.StatusOK, fields["http_response_code"])
|
||||
}
|
||||
assert.Equal(t, 1, fields["response_string_match"])
|
||||
assert.NotNil(t, fields["response_time"])
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
value, ok = acc.IntField("http_response", "response_string_match")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, 1, value)
|
||||
response_value, ok := acc.StringField("http_response", "result_type")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "success", response_value)
|
||||
_, ok = acc.FloatField("http_response", "response_time")
|
||||
require.True(t, ok)
|
||||
}
|
||||
|
||||
func TestStringMatchFail(t *testing.T) {
|
||||
@@ -339,29 +309,18 @@ func TestStringMatchFail(t *testing.T) {
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := h.Gather(&acc)
|
||||
fields, err := h.HTTPGather()
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, fields)
|
||||
if assert.NotNil(t, fields["http_response_code"]) {
|
||||
assert.Equal(t, http.StatusOK, fields["http_response_code"])
|
||||
}
|
||||
assert.Equal(t, 0, fields["response_string_match"])
|
||||
assert.NotNil(t, fields["response_time"])
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
value, ok = acc.IntField("http_response", "response_string_match")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, 0, value)
|
||||
response_value, ok := acc.StringField("http_response", "result_type")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "response_string_mismatch", response_value)
|
||||
_, ok = acc.FloatField("http_response", "response_time")
|
||||
require.True(t, ok)
|
||||
}
|
||||
|
||||
func TestTimeout(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping test with sleep in short mode.")
|
||||
}
|
||||
|
||||
mux := setUpTestMux()
|
||||
ts := httptest.NewServer(mux)
|
||||
defer ts.Close()
|
||||
@@ -370,21 +329,12 @@ func TestTimeout(t *testing.T) {
|
||||
Address: ts.URL + "/twosecondnap",
|
||||
Body: "{ 'test': 'data'}",
|
||||
Method: "GET",
|
||||
ResponseTimeout: internal.Duration{Duration: time.Second},
|
||||
ResponseTimeout: internal.Duration{Duration: time.Millisecond},
|
||||
Headers: map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
err := h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.False(t, ok)
|
||||
response_value, ok := acc.StringField("http_response", "result_type")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "timeout", response_value)
|
||||
_, ok = acc.FloatField("http_response", "response_time")
|
||||
require.False(t, ok)
|
||||
_, err := h.HTTPGather()
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
@@ -19,16 +19,6 @@ InfluxDB-formatted endpoints. See below for more information.
|
||||
urls = [
|
||||
"http://localhost:8086/debug/vars"
|
||||
]
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## http request & header timeout
|
||||
timeout = "5s"
|
||||
```
|
||||
|
||||
### Measurements & Fields
|
||||
@@ -74,7 +64,7 @@ InfluxDB-formatted endpoints. See below for more information.
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
telegraf --config ~/ws/telegraf.conf --input-filter influxdb --test
|
||||
telegraf -config ~/ws/telegraf.conf -input-filter influxdb -test
|
||||
* Plugin: influxdb, Collection 1
|
||||
> influxdb_database,database=_internal,host=tyrion,url=http://localhost:8086/debug/vars numMeasurements=10,numSeries=29 1463590500247354636
|
||||
> influxdb_httpd,bind=:8086,host=tyrion,url=http://localhost:8086/debug/vars req=7,reqActive=1,reqDurationNs=14227734 1463590500247354636
|
||||
|
||||
@@ -15,14 +15,6 @@ import (
|
||||
|
||||
type InfluxDB struct {
|
||||
URLs []string `toml:"urls"`
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
|
||||
Timeout internal.Duration
|
||||
|
||||
@@ -45,13 +37,6 @@ func (*InfluxDB) SampleConfig() string {
|
||||
"http://localhost:8086/debug/vars"
|
||||
]
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## http request & header timeout
|
||||
timeout = "5s"
|
||||
`
|
||||
@@ -63,15 +48,9 @@ func (i *InfluxDB) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
if i.client == nil {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
i.SSLCert, i.SSLKey, i.SSLCA, i.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i.client = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
ResponseHeaderTimeout: i.Timeout.Duration,
|
||||
TLSClientConfig: tlsCfg,
|
||||
},
|
||||
Timeout: i.Timeout.Duration,
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ There are two measurements reported by this plugin.
|
||||
|
||||
### Example Output
|
||||
```
|
||||
./telegraf --config ~/interrupts_config.conf --test
|
||||
./telegraf -config ~/interrupts_config.conf -test
|
||||
* Plugin: inputs.interrupts, Collection 1
|
||||
> interrupts,irq=0,type=IO-APIC,device=2-edge\ timer,host=hostname CPU0=23i,total=23i 1489346531000000000
|
||||
> interrupts,irq=1,host=hostname,type=IO-APIC,device=1-edge\ i8042 CPU0=9i,total=9i 1489346531000000000
|
||||
|
||||
@@ -43,13 +43,6 @@ The `server` tag will be made available when retrieving stats from remote server
|
||||
## if no servers are specified, local machine sensor stats will be queried
|
||||
##
|
||||
# servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
|
||||
|
||||
## Recomended: use metric 'interval' that is a multiple of 'timeout' to avoid
|
||||
## gaps or overlap in pulled data
|
||||
interval = "30s"
|
||||
|
||||
## Timeout for the ipmitool command to complete. Default is 20 seconds.
|
||||
timeout = "20s"
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
@@ -19,7 +19,6 @@ var (
|
||||
type Ipmi struct {
|
||||
Path string
|
||||
Servers []string
|
||||
Timeout internal.Duration
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
@@ -34,13 +33,6 @@ var sampleConfig = `
|
||||
## if no servers are specified, local machine sensor stats will be queried
|
||||
##
|
||||
# servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
|
||||
|
||||
## Recomended: use metric 'interval' that is a multiple of 'timeout' to avoid
|
||||
## gaps or overlap in pulled data
|
||||
interval = "30s"
|
||||
|
||||
## Timeout for the ipmitool command to complete
|
||||
timeout = "20s"
|
||||
`
|
||||
|
||||
func (m *Ipmi) SampleConfig() string {
|
||||
@@ -86,7 +78,7 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error {
|
||||
|
||||
opts = append(opts, "sdr")
|
||||
cmd := execCommand(m.Path, opts...)
|
||||
out, err := internal.CombinedOutputTimeout(cmd, m.Timeout.Duration)
|
||||
out, err := internal.CombinedOutputTimeout(cmd, time.Second*5)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out))
|
||||
}
|
||||
@@ -160,7 +152,6 @@ func init() {
|
||||
if len(path) > 0 {
|
||||
m.Path = path
|
||||
}
|
||||
m.Timeout = internal.Duration{Duration: time.Second * 20}
|
||||
inputs.Add("ipmi_sensor", func() telegraf.Input {
|
||||
m := m
|
||||
return &m
|
||||
|
||||
@@ -5,9 +5,7 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -17,7 +15,6 @@ func TestGather(t *testing.T) {
|
||||
i := &Ipmi{
|
||||
Servers: []string{"USERID:PASSW0RD@lan(192.168.1.1)"},
|
||||
Path: "ipmitool",
|
||||
Timeout: internal.Duration{Duration: time.Second * 5},
|
||||
}
|
||||
// overwriting exec commands with mock commands
|
||||
execCommand = fakeExecCommand
|
||||
@@ -121,8 +118,7 @@ func TestGather(t *testing.T) {
|
||||
}
|
||||
|
||||
i = &Ipmi{
|
||||
Path: "ipmitool",
|
||||
Timeout: internal.Duration{Duration: time.Second * 5},
|
||||
Path: "ipmitool",
|
||||
}
|
||||
|
||||
err = acc.GatherError(i.Gather)
|
||||
|
||||
@@ -78,7 +78,7 @@ pkts bytes target prot opt in out source destination
|
||||
```
|
||||
|
||||
```
|
||||
$ ./telegraf --config telegraf.conf --input-filter iptables --test
|
||||
$ ./telegraf -config telegraf.conf -input-filter iptables -test
|
||||
iptables,table=filter,chain=INPUT,ruleid=ssh pkts=100i,bytes=1024i 1453831884664956455
|
||||
iptables,table=filter,chain=INPUT,ruleid=httpd pkts=42i,bytes=2048i 1453831884664956455
|
||||
```
|
||||
|
||||
@@ -95,7 +95,7 @@ const measurement = "iptables"
|
||||
var errParse = errors.New("Cannot parse iptables list information")
|
||||
var chainNameRe = regexp.MustCompile(`^Chain\s+(\S+)`)
|
||||
var fieldsHeaderRe = regexp.MustCompile(`^\s*pkts\s+bytes\s+`)
|
||||
var commentRe = regexp.MustCompile(`\s*/\*\s*(.+?)\s*\*/\s*`)
|
||||
var valuesRe = regexp.MustCompile(`^\s*([0-9]+)\s+([0-9]+)\s+.*?(/\*\s(.*)\s\*/)?$`)
|
||||
|
||||
func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error {
|
||||
lines := strings.Split(data, "\n")
|
||||
@@ -110,34 +110,17 @@ func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error
|
||||
return errParse
|
||||
}
|
||||
for _, line := range lines[2:] {
|
||||
tokens := strings.Fields(line)
|
||||
if len(tokens) < 10 {
|
||||
mv := valuesRe.FindAllStringSubmatch(line, -1)
|
||||
// best effort : if line does not match or rule is not commented forget about it
|
||||
if len(mv) == 0 || len(mv[0]) != 5 || mv[0][4] == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
pkts := tokens[0]
|
||||
bytes := tokens[1]
|
||||
end := strings.Join(tokens[9:], " ")
|
||||
|
||||
matches := commentRe.FindStringSubmatch(end)
|
||||
if matches == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
comment := matches[1]
|
||||
|
||||
tags := map[string]string{"table": ipt.Table, "chain": mchain[1], "ruleid": comment}
|
||||
tags := map[string]string{"table": ipt.Table, "chain": mchain[1], "ruleid": mv[0][4]}
|
||||
fields := make(map[string]interface{})
|
||||
|
||||
var err error
|
||||
fields["pkts"], err = strconv.ParseUint(pkts, 10, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
fields["bytes"], err = strconv.ParseUint(bytes, 10, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// since parse error is already catched by the regexp,
|
||||
// we never enter ther error case here => no error check (but still need a test to cover the case)
|
||||
fields["pkts"], _ = strconv.ParseUint(mv[0][1], 10, 64)
|
||||
fields["bytes"], _ = strconv.ParseUint(mv[0][2], 10, 64)
|
||||
acc.AddFields(measurement, fields, tags)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -124,36 +124,6 @@ func TestIptables_Gather(t *testing.T) {
|
||||
{map[string]interface{}{"pkts": uint64(57), "bytes": uint64(4520)}},
|
||||
},
|
||||
},
|
||||
{ // 10 - allow trailing text
|
||||
table: "mangle",
|
||||
chains: []string{"SHAPER"},
|
||||
values: []string{
|
||||
`Chain SHAPER (policy ACCEPT 58 packets, 5096 bytes)
|
||||
pkts bytes target prot opt in out source destination
|
||||
0 0 ACCEPT all -- * * 1.3.5.7 0.0.0.0/0 /* test */
|
||||
0 0 CLASSIFY all -- * * 1.3.5.7 0.0.0.0/0 /* test2 */ CLASSIFY set 1:4
|
||||
`},
|
||||
tags: []map[string]string{
|
||||
map[string]string{"table": "mangle", "chain": "SHAPER", "ruleid": "test"},
|
||||
map[string]string{"table": "mangle", "chain": "SHAPER", "ruleid": "test2"},
|
||||
},
|
||||
fields: [][]map[string]interface{}{
|
||||
{map[string]interface{}{"pkts": uint64(0), "bytes": uint64(0)}},
|
||||
{map[string]interface{}{"pkts": uint64(0), "bytes": uint64(0)}},
|
||||
},
|
||||
},
|
||||
{ // 11 - invalid pkts/bytes
|
||||
table: "mangle",
|
||||
chains: []string{"SHAPER"},
|
||||
values: []string{
|
||||
`Chain SHAPER (policy ACCEPT 58 packets, 5096 bytes)
|
||||
pkts bytes target prot opt in out source destination
|
||||
a a ACCEPT all -- * * 1.3.5.7 0.0.0.0/0 /* test */
|
||||
a a CLASSIFY all -- * * 1.3.5.7 0.0.0.0/0 /* test2 */ CLASSIFY set 1:4
|
||||
`},
|
||||
tags: []map[string]string{},
|
||||
fields: [][]map[string]interface{}{},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
|
||||
@@ -6,9 +6,6 @@ line protocol. [Consumer Group](http://godoc.org/github.com/wvanbergen/kafka/con
|
||||
is used to talk to the Kafka cluster so multiple instances of telegraf can read
|
||||
from the same topic in parallel.
|
||||
|
||||
For old kafka version (< 0.8), please use the kafka_consumer_legacy input plugin
|
||||
and use the old zookeeper connection method.
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml
|
||||
@@ -16,24 +13,17 @@ and use the old zookeeper connection method.
|
||||
[[inputs.kafka_consumer]]
|
||||
## topic(s) to consume
|
||||
topics = ["telegraf"]
|
||||
brokers = ["localhost:9092"]
|
||||
## an array of Zookeeper connection strings
|
||||
zookeeper_peers = ["localhost:2181"]
|
||||
## the name of the consumer group
|
||||
consumer_group = "telegraf_metrics_consumers"
|
||||
## Maximum number of metrics to buffer between collection intervals
|
||||
metric_buffer = 100000
|
||||
## Offset (must be either "oldest" or "newest")
|
||||
offset = "oldest"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Optional SASL Config
|
||||
# sasl_username = "kafka"
|
||||
# sasl_password = "secret"
|
||||
|
||||
## Data format to consume.
|
||||
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
|
||||
@@ -7,35 +7,20 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
cluster "github.com/bsm/sarama-cluster"
|
||||
"github.com/wvanbergen/kafka/consumergroup"
|
||||
)
|
||||
|
||||
type Kafka struct {
|
||||
ConsumerGroup string
|
||||
Topics []string
|
||||
Brokers []string
|
||||
MaxMessageLen int
|
||||
|
||||
Cluster *cluster.Consumer
|
||||
|
||||
// Verify Kafka SSL Certificate
|
||||
InsecureSkipVerify bool
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
|
||||
// SASL Username
|
||||
SASLUsername string `toml:"sasl_username"`
|
||||
// SASL Password
|
||||
SASLPassword string `toml:"sasl_password"`
|
||||
ConsumerGroup string
|
||||
Topics []string
|
||||
MaxMessageLen int
|
||||
ZookeeperPeers []string
|
||||
ZookeeperChroot string
|
||||
Consumer *consumergroup.ConsumerGroup
|
||||
|
||||
// Legacy metric buffer support
|
||||
MetricBuffer int
|
||||
@@ -62,22 +47,12 @@ type Kafka struct {
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## kafka servers
|
||||
brokers = ["localhost:9092"]
|
||||
## topic(s) to consume
|
||||
topics = ["telegraf"]
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Optional SASL Config
|
||||
# sasl_username = "kafka"
|
||||
# sasl_password = "secret"
|
||||
|
||||
## an array of Zookeeper connection strings
|
||||
zookeeper_peers = ["localhost:2181"]
|
||||
## Zookeeper Chroot
|
||||
zookeeper_chroot = ""
|
||||
## the name of the consumer group
|
||||
consumer_group = "telegraf_metrics_consumers"
|
||||
## Offset (must be either "oldest" or "newest")
|
||||
@@ -109,67 +84,45 @@ func (k *Kafka) SetParser(parser parsers.Parser) {
|
||||
func (k *Kafka) Start(acc telegraf.Accumulator) error {
|
||||
k.Lock()
|
||||
defer k.Unlock()
|
||||
var clusterErr error
|
||||
var consumerErr error
|
||||
|
||||
k.acc = acc
|
||||
|
||||
config := cluster.NewConfig()
|
||||
config.Consumer.Return.Errors = true
|
||||
|
||||
tlsConfig, err := internal.GetTLSConfig(
|
||||
k.SSLCert, k.SSLKey, k.SSLCA, k.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tlsConfig != nil {
|
||||
log.Printf("D! TLS Enabled")
|
||||
config.Net.TLS.Config = tlsConfig
|
||||
config.Net.TLS.Enable = true
|
||||
}
|
||||
if k.SASLUsername != "" && k.SASLPassword != "" {
|
||||
log.Printf("D! Using SASL auth with username '%s',",
|
||||
k.SASLUsername)
|
||||
config.Net.SASL.User = k.SASLUsername
|
||||
config.Net.SASL.Password = k.SASLPassword
|
||||
config.Net.SASL.Enable = true
|
||||
}
|
||||
|
||||
config := consumergroup.NewConfig()
|
||||
config.Zookeeper.Chroot = k.ZookeeperChroot
|
||||
switch strings.ToLower(k.Offset) {
|
||||
case "oldest", "":
|
||||
config.Consumer.Offsets.Initial = sarama.OffsetOldest
|
||||
config.Offsets.Initial = sarama.OffsetOldest
|
||||
case "newest":
|
||||
config.Consumer.Offsets.Initial = sarama.OffsetNewest
|
||||
config.Offsets.Initial = sarama.OffsetNewest
|
||||
default:
|
||||
log.Printf("I! WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n",
|
||||
k.Offset)
|
||||
config.Consumer.Offsets.Initial = sarama.OffsetOldest
|
||||
config.Offsets.Initial = sarama.OffsetOldest
|
||||
}
|
||||
|
||||
if k.Cluster == nil {
|
||||
k.Cluster, clusterErr = cluster.NewConsumer(
|
||||
k.Brokers,
|
||||
if k.Consumer == nil || k.Consumer.Closed() {
|
||||
k.Consumer, consumerErr = consumergroup.JoinConsumerGroup(
|
||||
k.ConsumerGroup,
|
||||
k.Topics,
|
||||
k.ZookeeperPeers,
|
||||
config,
|
||||
)
|
||||
|
||||
if clusterErr != nil {
|
||||
log.Printf("E! Error when creating Kafka Consumer, brokers: %v, topics: %v\n",
|
||||
k.Brokers, k.Topics)
|
||||
return clusterErr
|
||||
if consumerErr != nil {
|
||||
return consumerErr
|
||||
}
|
||||
|
||||
// Setup message and error channels
|
||||
k.in = k.Cluster.Messages()
|
||||
k.errs = k.Cluster.Errors()
|
||||
k.in = k.Consumer.Messages()
|
||||
k.errs = k.Consumer.Errors()
|
||||
}
|
||||
|
||||
k.done = make(chan struct{})
|
||||
|
||||
// Start the kafka message reader
|
||||
go k.receiver()
|
||||
log.Printf("I! Started the kafka consumer service, brokers: %v, topics: %v\n",
|
||||
k.Brokers, k.Topics)
|
||||
log.Printf("I! Started the kafka consumer service, peers: %v, topics: %v\n",
|
||||
k.ZookeeperPeers, k.Topics)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -203,7 +156,7 @@ func (k *Kafka) receiver() {
|
||||
// TODO(cam) this locking can be removed if this PR gets merged:
|
||||
// https://github.com/wvanbergen/kafka/pull/84
|
||||
k.Lock()
|
||||
k.Cluster.MarkOffset(msg, "")
|
||||
k.Consumer.CommitUpto(msg)
|
||||
k.Unlock()
|
||||
}
|
||||
}
|
||||
@@ -214,7 +167,7 @@ func (k *Kafka) Stop() {
|
||||
k.Lock()
|
||||
defer k.Unlock()
|
||||
close(k.done)
|
||||
if err := k.Cluster.Close(); err != nil {
|
||||
if err := k.Consumer.Close(); err != nil {
|
||||
k.acc.AddError(fmt.Errorf("Error closing consumer: %s\n", err.Error()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ func TestReadsMetricsFromKafka(t *testing.T) {
|
||||
}
|
||||
|
||||
brokerPeers := []string{testutil.GetLocalHost() + ":9092"}
|
||||
zkPeers := []string{testutil.GetLocalHost() + ":2181"}
|
||||
testTopic := fmt.Sprintf("telegraf_test_topic_%d", time.Now().Unix())
|
||||
|
||||
// Send a Kafka message to the kafka host
|
||||
@@ -35,11 +36,11 @@ func TestReadsMetricsFromKafka(t *testing.T) {
|
||||
|
||||
// Start the Kafka Consumer
|
||||
k := &Kafka{
|
||||
ConsumerGroup: "telegraf_test_consumers",
|
||||
Topics: []string{testTopic},
|
||||
Brokers: brokerPeers,
|
||||
PointBuffer: 100000,
|
||||
Offset: "oldest",
|
||||
ConsumerGroup: "telegraf_test_consumers",
|
||||
Topics: []string{testTopic},
|
||||
ZookeeperPeers: zkPeers,
|
||||
PointBuffer: 100000,
|
||||
Offset: "oldest",
|
||||
}
|
||||
p, _ := parsers.NewInfluxParser()
|
||||
k.SetParser(p)
|
||||
|
||||
@@ -23,7 +23,7 @@ func newTestKafka() (*Kafka, chan *sarama.ConsumerMessage) {
|
||||
k := Kafka{
|
||||
ConsumerGroup: "test",
|
||||
Topics: []string{"telegraf"},
|
||||
Brokers: []string{"localhost:9092"},
|
||||
ZookeeperPeers: []string{"localhost:2181"},
|
||||
Offset: "oldest",
|
||||
in: in,
|
||||
doNotCommitMsgs: true,
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
# Kafka Consumer Input Plugin
|
||||
|
||||
The [Kafka](http://kafka.apache.org/) consumer plugin polls a specified Kafka
|
||||
topic and adds messages to InfluxDB. The plugin assumes messages follow the
|
||||
line protocol. [Consumer Group](http://godoc.org/github.com/wvanbergen/kafka/consumergroup)
|
||||
is used to talk to the Kafka cluster so multiple instances of telegraf can read
|
||||
from the same topic in parallel.
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml
|
||||
# Read metrics from Kafka topic(s)
|
||||
[[inputs.kafka_consumer]]
|
||||
## topic(s) to consume
|
||||
topics = ["telegraf"]
|
||||
## an array of Zookeeper connection strings
|
||||
zookeeper_peers = ["localhost:2181"]
|
||||
## Zookeeper Chroot
|
||||
zookeeper_chroot = ""
|
||||
## the name of the consumer group
|
||||
consumer_group = "telegraf_metrics_consumers"
|
||||
## Offset (must be either "oldest" or "newest")
|
||||
offset = "oldest"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
|
||||
## Maximum length of a message to consume, in bytes (default 0/unlimited);
|
||||
## larger messages are dropped
|
||||
max_message_len = 65536
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
Running integration tests requires running Zookeeper & Kafka. See Makefile
|
||||
for kafka container command.
|
||||
@@ -1,183 +0,0 @@
|
||||
package kafka_consumer_legacy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/wvanbergen/kafka/consumergroup"
|
||||
)
|
||||
|
||||
type Kafka struct {
|
||||
ConsumerGroup string
|
||||
Topics []string
|
||||
MaxMessageLen int
|
||||
ZookeeperPeers []string
|
||||
ZookeeperChroot string
|
||||
Consumer *consumergroup.ConsumerGroup
|
||||
|
||||
// Legacy metric buffer support
|
||||
MetricBuffer int
|
||||
// TODO remove PointBuffer, legacy support
|
||||
PointBuffer int
|
||||
|
||||
Offset string
|
||||
parser parsers.Parser
|
||||
|
||||
sync.Mutex
|
||||
|
||||
// channel for all incoming kafka messages
|
||||
in <-chan *sarama.ConsumerMessage
|
||||
// channel for all kafka consumer errors
|
||||
errs <-chan error
|
||||
done chan struct{}
|
||||
|
||||
// keep the accumulator internally:
|
||||
acc telegraf.Accumulator
|
||||
|
||||
// doNotCommitMsgs tells the parser not to call CommitUpTo on the consumer
|
||||
// this is mostly for test purposes, but there may be a use-case for it later.
|
||||
doNotCommitMsgs bool
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## topic(s) to consume
|
||||
topics = ["telegraf"]
|
||||
## an array of Zookeeper connection strings
|
||||
zookeeper_peers = ["localhost:2181"]
|
||||
## Zookeeper Chroot
|
||||
zookeeper_chroot = ""
|
||||
## the name of the consumer group
|
||||
consumer_group = "telegraf_metrics_consumers"
|
||||
## Offset (must be either "oldest" or "newest")
|
||||
offset = "oldest"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
|
||||
## Maximum length of a message to consume, in bytes (default 0/unlimited);
|
||||
## larger messages are dropped
|
||||
max_message_len = 65536
|
||||
`
|
||||
|
||||
func (k *Kafka) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (k *Kafka) Description() string {
|
||||
return "Read metrics from Kafka topic(s)"
|
||||
}
|
||||
|
||||
func (k *Kafka) SetParser(parser parsers.Parser) {
|
||||
k.parser = parser
|
||||
}
|
||||
|
||||
func (k *Kafka) Start(acc telegraf.Accumulator) error {
|
||||
k.Lock()
|
||||
defer k.Unlock()
|
||||
var consumerErr error
|
||||
|
||||
k.acc = acc
|
||||
|
||||
config := consumergroup.NewConfig()
|
||||
config.Zookeeper.Chroot = k.ZookeeperChroot
|
||||
switch strings.ToLower(k.Offset) {
|
||||
case "oldest", "":
|
||||
config.Offsets.Initial = sarama.OffsetOldest
|
||||
case "newest":
|
||||
config.Offsets.Initial = sarama.OffsetNewest
|
||||
default:
|
||||
log.Printf("I! WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n",
|
||||
k.Offset)
|
||||
config.Offsets.Initial = sarama.OffsetOldest
|
||||
}
|
||||
|
||||
if k.Consumer == nil || k.Consumer.Closed() {
|
||||
k.Consumer, consumerErr = consumergroup.JoinConsumerGroup(
|
||||
k.ConsumerGroup,
|
||||
k.Topics,
|
||||
k.ZookeeperPeers,
|
||||
config,
|
||||
)
|
||||
if consumerErr != nil {
|
||||
return consumerErr
|
||||
}
|
||||
|
||||
// Setup message and error channels
|
||||
k.in = k.Consumer.Messages()
|
||||
k.errs = k.Consumer.Errors()
|
||||
}
|
||||
|
||||
k.done = make(chan struct{})
|
||||
|
||||
// Start the kafka message reader
|
||||
go k.receiver()
|
||||
log.Printf("I! Started the kafka consumer service, peers: %v, topics: %v\n",
|
||||
k.ZookeeperPeers, k.Topics)
|
||||
return nil
|
||||
}
|
||||
|
||||
// receiver() reads all incoming messages from the consumer, and parses them into
|
||||
// influxdb metric points.
|
||||
func (k *Kafka) receiver() {
|
||||
for {
|
||||
select {
|
||||
case <-k.done:
|
||||
return
|
||||
case err := <-k.errs:
|
||||
if err != nil {
|
||||
k.acc.AddError(fmt.Errorf("Consumer Error: %s\n", err))
|
||||
}
|
||||
case msg := <-k.in:
|
||||
if k.MaxMessageLen != 0 && len(msg.Value) > k.MaxMessageLen {
|
||||
k.acc.AddError(fmt.Errorf("Message longer than max_message_len (%d > %d)",
|
||||
len(msg.Value), k.MaxMessageLen))
|
||||
} else {
|
||||
metrics, err := k.parser.Parse(msg.Value)
|
||||
if err != nil {
|
||||
k.acc.AddError(fmt.Errorf("Message Parse Error\nmessage: %s\nerror: %s",
|
||||
string(msg.Value), err.Error()))
|
||||
}
|
||||
for _, metric := range metrics {
|
||||
k.acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time())
|
||||
}
|
||||
}
|
||||
|
||||
if !k.doNotCommitMsgs {
|
||||
// TODO(cam) this locking can be removed if this PR gets merged:
|
||||
// https://github.com/wvanbergen/kafka/pull/84
|
||||
k.Lock()
|
||||
k.Consumer.CommitUpto(msg)
|
||||
k.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (k *Kafka) Stop() {
|
||||
k.Lock()
|
||||
defer k.Unlock()
|
||||
close(k.done)
|
||||
if err := k.Consumer.Close(); err != nil {
|
||||
k.acc.AddError(fmt.Errorf("Error closing consumer: %s\n", err.Error()))
|
||||
}
|
||||
}
|
||||
|
||||
func (k *Kafka) Gather(acc telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("kafka_consumer_legacy", func() telegraf.Input {
|
||||
return &Kafka{}
|
||||
})
|
||||
}
|
||||
@@ -1,97 +0,0 @@
|
||||
package kafka_consumer_legacy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
)
|
||||
|
||||
func TestReadsMetricsFromKafka(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
t.Skip("Skipping test due to circleci issue; ref #2487")
|
||||
|
||||
brokerPeers := []string{testutil.GetLocalHost() + ":9092"}
|
||||
zkPeers := []string{testutil.GetLocalHost() + ":2181"}
|
||||
testTopic := fmt.Sprintf("telegraf_test_topic_legacy_%d", time.Now().Unix())
|
||||
|
||||
// Send a Kafka message to the kafka host
|
||||
msg := "cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257\n"
|
||||
producer, err := sarama.NewSyncProducer(brokerPeers, nil)
|
||||
require.NoError(t, err)
|
||||
_, _, err = producer.SendMessage(
|
||||
&sarama.ProducerMessage{
|
||||
Topic: testTopic,
|
||||
Value: sarama.StringEncoder(msg),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
defer producer.Close()
|
||||
|
||||
// Start the Kafka Consumer
|
||||
k := &Kafka{
|
||||
ConsumerGroup: "telegraf_test_consumers",
|
||||
Topics: []string{testTopic},
|
||||
ZookeeperPeers: zkPeers,
|
||||
PointBuffer: 100000,
|
||||
Offset: "oldest",
|
||||
}
|
||||
p, _ := parsers.NewInfluxParser()
|
||||
k.SetParser(p)
|
||||
|
||||
// Verify that we can now gather the sent message
|
||||
var acc testutil.Accumulator
|
||||
|
||||
// Sanity check
|
||||
assert.Equal(t, 0, len(acc.Metrics), "There should not be any points")
|
||||
if err := k.Start(&acc); err != nil {
|
||||
t.Fatal(err.Error())
|
||||
} else {
|
||||
defer k.Stop()
|
||||
}
|
||||
|
||||
waitForPoint(&acc, t)
|
||||
|
||||
// Gather points
|
||||
err = acc.GatherError(k.Gather)
|
||||
require.NoError(t, err)
|
||||
if len(acc.Metrics) == 1 {
|
||||
point := acc.Metrics[0]
|
||||
assert.Equal(t, "cpu_load_short", point.Measurement)
|
||||
assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields)
|
||||
assert.Equal(t, map[string]string{
|
||||
"host": "server01",
|
||||
"direction": "in",
|
||||
"region": "us-west",
|
||||
}, point.Tags)
|
||||
assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix())
|
||||
} else {
|
||||
t.Errorf("No points found in accumulator, expected 1")
|
||||
}
|
||||
}
|
||||
|
||||
// Waits for the metric that was sent to the kafka broker to arrive at the kafka
|
||||
// consumer
|
||||
func waitForPoint(acc *testutil.Accumulator, t *testing.T) {
|
||||
// Give the kafka container up to 2 seconds to get the point to the consumer
|
||||
ticker := time.NewTicker(5 * time.Millisecond)
|
||||
counter := 0
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
counter++
|
||||
if counter > 1000 {
|
||||
t.Fatal("Waited for 5s, point never arrived to consumer")
|
||||
} else if acc.NFields() == 1 {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user