From 4ad551be9a3098b8829666600a78a583530c3ed7 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 28 Mar 2016 13:36:44 -0600 Subject: [PATCH 01/13] add '*' to metric prefixes for consistency --- docs/CONFIGURATION.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 853dc6d05..810dc9470 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -141,12 +141,12 @@ fields which begin with `time_`. # Drop all metrics about containers for kubelet [[inputs.prometheus]] urls = ["http://kube-node-1:4194/metrics"] - namedrop = ["container_"] + namedrop = ["container_*"] # Only store rest client related metrics for kubelet [[inputs.prometheus]] urls = ["http://kube-node-1:4194/metrics"] - namepass = ["rest_client_"] + namepass = ["rest_client_*"] ``` #### Input config: prefix, suffix, and override From 2f41ae24f87863c119bfdb35fbb3f8b46a1bb6fa Mon Sep 17 00:00:00 2001 From: Ross McDonald Date: Wed, 30 Mar 2016 10:51:21 -0500 Subject: [PATCH 02/13] Swap systemd command, as it was causing issues on Debian. --- scripts/post-install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/post-install.sh b/scripts/post-install.sh index d4c5df443..53d745ca9 100644 --- a/scripts/post-install.sh +++ b/scripts/post-install.sh @@ -64,7 +64,7 @@ elif [[ -f /etc/debian_version ]]; then which systemctl &>/dev/null if [[ $? -eq 0 ]]; then install_systemd - deb-systemd-invoke restart telegraf.service + systemctl restart telegraf else # Assuming sysv install_init From e03f684508ef6182d04e38c64b6534fa3e604155 Mon Sep 17 00:00:00 2001 From: Rudenkovk Konstantin Date: Fri, 25 Mar 2016 19:24:46 +0400 Subject: [PATCH 03/13] Fix parse fcgi URI path in php-fpm input module closes #934 --- CHANGELOG.md | 1 + plugins/inputs/phpfpm/phpfpm.go | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 001630ae0..08c4b6ceb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ - [#907](https://github.com/influxdata/telegraf/issues/907): Fix prometheus invalid label/measurement name key. - [#841](https://github.com/influxdata/telegraf/issues/841): Fix memcached unix socket panic. - [#873](https://github.com/influxdata/telegraf/issues/873): Fix SNMP plugin sometimes not returning metrics. Thanks @titiliambert! +- [#934](https://github.com/influxdata/telegraf/pull/934): phpfpm: Fix fcgi uri path. Thanks @rudenkovk! ## v0.11.1 [2016-03-17] diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index 199b0005b..169fe2194 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -122,6 +122,11 @@ func (g *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { fcgiIp := socketAddr[0] fcgiPort, _ := strconv.Atoi(socketAddr[1]) fcgi, err = newFcgiClient(fcgiIp, fcgiPort) + if len(u.Path) > 1 { + statusPath = strings.Trim(u.Path, "/") + } else { + statusPath = "status" + } } else { socketAddr := strings.Split(addr, ":") if len(socketAddr) >= 2 { From 62105bb3531bad2e9c25d68731bbdb215163c8a5 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 30 Mar 2016 11:54:01 -0600 Subject: [PATCH 04/13] Use github paho mqtt client instead of gerrit this might fix #921 --- Godeps | 2 +- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 6 +++--- plugins/inputs/mqtt_consumer/mqtt_consumer_test.go | 2 +- plugins/outputs/mqtt/mqtt.go | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Godeps b/Godeps index 75cb813ba..2fc53d8c5 100644 --- a/Godeps +++ b/Godeps @@ -1,4 +1,3 @@ -git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git 617c801af238c3af2d9e72c5d4a0f02edad03ce5 github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9 github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687 @@ -12,6 +11,7 @@ github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3 github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367 +github.com/eclipse/paho.mqtt.golang 4ab3e867810d1ec5f35157c59e965054dbf43a0d github.com/fsouza/go-dockerclient a49c8269a6899cae30da1f8a4b82e0ce945f9967 github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032 diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index e36889703..50a20740a 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -11,7 +11,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" - "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" + "github.com/eclipse/paho.mqtt.golang" ) type MQTTConsumer struct { @@ -39,7 +39,7 @@ type MQTTConsumer struct { InsecureSkipVerify bool sync.Mutex - client *mqtt.Client + client mqtt.Client // channel of all incoming raw mqtt messages in chan mqtt.Message done chan struct{} @@ -163,7 +163,7 @@ func (m *MQTTConsumer) receiver() { } } -func (m *MQTTConsumer) recvMessage(_ *mqtt.Client, msg mqtt.Message) { +func (m *MQTTConsumer) recvMessage(_ mqtt.Client, msg mqtt.Message) { m.in <- msg } diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index e926ebbb2..7090a46c3 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/assert" - "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" + "github.com/eclipse/paho.mqtt.golang" ) const ( diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index 10c1b1a9e..f13500db9 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -10,7 +10,7 @@ import ( "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" - paho "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" + paho "github.com/eclipse/paho.mqtt.golang" ) var sampleConfig = ` @@ -57,7 +57,7 @@ type MQTT struct { // Use SSL but skip chain & host verification InsecureSkipVerify bool - client *paho.Client + client paho.Client opts *paho.ClientOptions serializer serializers.Serializer From 91957f084800ad000ad1a2f46920277de4fa6322 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 30 Mar 2016 14:43:05 -0600 Subject: [PATCH 05/13] Update Godeps_windows file to HEAD --- Godeps_windows | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/Godeps_windows b/Godeps_windows index c4a2561d1..f499fa915 100644 --- a/Godeps_windows +++ b/Godeps_windows @@ -1,4 +1,3 @@ -git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git 617c801af238c3af2d9e72c5d4a0f02edad03ce5 github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9 github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5 @@ -6,22 +5,28 @@ github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687 github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857 github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4 github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99 +github.com/couchbase/go-couchbase cb664315a324d87d19c879d9cc67fda6be8c2ac1 +github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2 +github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6 github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc -github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d +github.com/davecgh/go-spew fc32781af5e85e548d3f1abaf0fa3dbe8a72495c github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3 github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367 +github.com/eclipse/paho.mqtt.golang 4ab3e867810d1ec5f35157c59e965054dbf43a0d github.com/fsouza/go-dockerclient a49c8269a6899cae30da1f8a4b82e0ce945f9967 +github.com/go-ini/ini 776aa739ce9373377cd16f526cdf06cb4c89b40f github.com/go-ole/go-ole 50055884d646dd9434f16bbb5c9801749b9bafe4 github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032 -github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7 +github.com/golang/snappy 5979233c5d6225d4a8e438cdd0b411888449ddab github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2 github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478 github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da -github.com/influxdata/influxdb e3fef5593c21644f2b43af55d6e17e70910b0e48 +github.com/influxdata/influxdb c190778997f4154294e6160c41b90140641ac915 github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0 +github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74 github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720 github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36 github.com/lxn/win 9a7734ea4db26bc593d52f6a8a957afdad39c5c1 @@ -32,15 +37,17 @@ github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3 github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980 +github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2 github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831 github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37 github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8 github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f -github.com/shirou/gopsutil 1de1357e7737a536c7f4ff6be7bd27977db4d2cb +github.com/shirou/gopsutil 1f32ce1bb380845be7f5d174ac641a2c592c0c42 github.com/shirou/w32 ada3ba68f000aa1b58580e45c9d308fe0b7fc5c5 github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744 +github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94 github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3 github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8 From 9347a70425cccfc2d619b391fc61891c945746ef Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 31 Mar 2016 20:37:04 -0600 Subject: [PATCH 06/13] Fix httpjson README closes #947 --- plugins/inputs/httpjson/README.md | 65 +++++++++++++------------------ 1 file changed, 28 insertions(+), 37 deletions(-) diff --git a/plugins/inputs/httpjson/README.md b/plugins/inputs/httpjson/README.md index fc45dd567..707b256df 100644 --- a/plugins/inputs/httpjson/README.md +++ b/plugins/inputs/httpjson/README.md @@ -6,7 +6,7 @@ For example, if you have a service called _mycollector_, which has HTTP endpoint plugin like this: ``` -[[httpjson.services]] +[[inputs.httpjson]] name = "mycollector" servers = [ @@ -24,7 +24,7 @@ plugin like this: You can also specify which keys from server response should be considered tags: ``` -[[httpjson.services]] +[[inputs.httpjson]] ... tag_keys = [ @@ -36,10 +36,10 @@ You can also specify which keys from server response should be considered tags: You can also specify additional request parameters for the service: ``` -[[httpjson.services]] +[[inputs.httpjson]] ... - [httpjson.services.parameters] + [inputs.httpjson.parameters] event_type = "cpu_spike" threshold = "0.75" @@ -48,10 +48,10 @@ You can also specify additional request parameters for the service: You can also specify additional request header parameters for the service: ``` -[[httpjson.services]] +[[inputs.httpjson]] ... - [httpjson.services.headers] + [inputs.httpjson.headers] X-Auth-Token = "my-xauth-token" apiVersion = "v1" ``` @@ -61,18 +61,14 @@ You can also specify additional request header parameters for the service: Let's say that we have a service named "mycollector" configured like this: ``` -[httpjson] - [[httpjson.services]] - name = "mycollector" - - servers = [ - "http://my.service.com/_stats" - ] - - # HTTP method to use (case-sensitive) - method = "GET" - - tag_keys = ["service"] +[[inputs.httpjson]] + name = "mycollector" + servers = [ + "http://my.service.com/_stats" + ] + # HTTP method to use (case-sensitive) + method = "GET" + tag_keys = ["service"] ``` which responds with the following JSON: @@ -102,26 +98,21 @@ There is also the option to collect JSON from multiple services, here is an example doing that. ``` -[httpjson] - [[httpjson.services]] - name = "mycollector1" +[[inputs.httpjson]] + name = "mycollector1" + servers = [ + "http://my.service1.com/_stats" + ] + # HTTP method to use (case-sensitive) + method = "GET" - servers = [ - "http://my.service1.com/_stats" - ] - - # HTTP method to use (case-sensitive) - method = "GET" - - [[httpjson.services]] - name = "mycollector2" - - servers = [ - "http://service.net/json/stats" - ] - - # HTTP method to use (case-sensitive) - method = "POST" +[[inputs.httpjson]] + name = "mycollector2" + servers = [ + "http://service.net/json/stats" + ] + # HTTP method to use (case-sensitive) + method = "POST" ``` The services respond with the following JSON: From 6ff0fc6d831a360b21ba87a6baaa1263a2d3cd23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Florent=20Rami=C3=A8re?= Date: Thu, 31 Mar 2016 11:14:20 +0200 Subject: [PATCH 07/13] Add compression/acks/retry conf to Kafka output plugin The following configuration is now possible ## CompressionCodec represents the various compression codecs recognized by Kafka in messages. ## "none" : No compression ## "gzip" : Gzip compression ## "snappy" : Snappy compression # compression_codec = "none" ## RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements it must see before responding ## "none" : the producer never waits for an acknowledgement from the broker. This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails). ## "leader" : the producer gets an acknowledgement after the leader replica has received the data. This option provides better durability as the client waits until the server acknowledges the request as successful (only messages that were written to the now-dead leader but not yet replicated will be lost). ## "leader_and_replicas" : the producer gets an acknowledgement after all in-sync replicas have received the data. This option provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains. # required_acks = "leader_and_replicas" ## The total number of times to retry sending a message # max_retry = "3" --- plugins/outputs/kafka/kafka.go | 85 ++++++++++++++++++++++++++++++++-- 1 file changed, 81 insertions(+), 4 deletions(-) diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 8dea2b2a1..2bba2e77e 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -3,6 +3,8 @@ package kafka import ( "crypto/tls" "fmt" + "strconv" + "strings" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" @@ -19,6 +21,12 @@ type Kafka struct { Topic string // Routing Key Tag RoutingTag string `toml:"routing_tag"` + // Compression Codec Tag + CompressionCodec string + // RequiredAcks Tag + RequiredAcks string + // MaxRetry Tag + MaxRetry string // Legacy SSL config options // TLS client certificate @@ -53,6 +61,21 @@ var sampleConfig = ` ## ie, if this tag exists, it's value will be used as the routing key routing_tag = "host" + ## CompressionCodec represents the various compression codecs recognized by Kafka in messages. + ## "none" : No compression + ## "gzip" : Gzip compression + ## "snappy" : Snappy compression + # compression_codec = "none" + + ## RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements it must see before responding + ## "none" : the producer never waits for an acknowledgement from the broker. This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails). + ## "leader" : the producer gets an acknowledgement after the leader replica has received the data. This option provides better durability as the client waits until the server acknowledges the request as successful (only messages that were written to the now-dead leader but not yet replicated will be lost). + ## "leader_and_replicas" : the producer gets an acknowledgement after all in-sync replicas have received the data. This option provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains. + # required_acks = "leader_and_replicas" + + ## The total number of times to retry sending a message + # max_retry = "3" + ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" # ssl_cert = "/etc/telegraf/cert.pem" @@ -71,12 +94,66 @@ func (k *Kafka) SetSerializer(serializer serializers.Serializer) { k.serializer = serializer } +func requiredAcks(value string) (sarama.RequiredAcks, error) { + switch strings.ToLower(value) { + case "none": + return sarama.NoResponse, nil + case "leader": + return sarama.WaitForLocal, nil + case "", "leader_and_replicas": + return sarama.WaitForAll, nil + default: + return 0, fmt.Errorf("Failed to recognize required_acks: %s", value) + } +} + +func compressionCodec(value string) (sarama.CompressionCodec, error) { + switch strings.ToLower(value) { + case "gzip": + return sarama.CompressionGZIP, nil + case "snappy": + return sarama.CompressionSnappy, nil + case "", "none": + return sarama.CompressionNone, nil + default: + return 0, fmt.Errorf("Failed to recognize compression_codec: %s", value) + } +} + +func maxRetry(value string) (int, error) { + if value == "" { + return 3, nil + } + maxRetry, err := strconv.Atoi(value) + if err != nil { + return -1, fmt.Errorf("Failed to parse max_retry: %s", value) + } + if maxRetry < 0 { + return -1, fmt.Errorf("max_retry is %s but it should not be negative", value) + } + return maxRetry, nil +} + func (k *Kafka) Connect() error { config := sarama.NewConfig() - // Wait for all in-sync replicas to ack the message - config.Producer.RequiredAcks = sarama.WaitForAll - // Retry up to 10 times to produce the message - config.Producer.Retry.Max = 10 + + requiredAcks, err := requiredAcks(k.RequiredAcks) + if err != nil { + return err + } + config.Producer.RequiredAcks = requiredAcks + + compressionCodec, err := compressionCodec(k.CompressionCodec) + if err != nil { + return err + } + config.Producer.Compression = compressionCodec + + maxRetry, err := maxRetry(k.MaxRetry) + if err != nil { + return err + } + config.Producer.Retry.Max = maxRetry // Legacy support ssl config if k.Certificate != "" { From 8c3371c4acf84775d5a206af8541cdfe7e648d1d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Florent=20Rami=C3=A8re?= Date: Thu, 31 Mar 2016 17:27:14 +0200 Subject: [PATCH 08/13] Use numerical codes instead of symbolic ones --- plugins/outputs/kafka/kafka.go | 92 +++++++--------------------------- 1 file changed, 18 insertions(+), 74 deletions(-) diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 2bba2e77e..3cecfeeab 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -3,8 +3,6 @@ package kafka import ( "crypto/tls" "fmt" - "strconv" - "strings" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" @@ -22,11 +20,11 @@ type Kafka struct { // Routing Key Tag RoutingTag string `toml:"routing_tag"` // Compression Codec Tag - CompressionCodec string + CompressionCodec int // RequiredAcks Tag - RequiredAcks string + RequiredAcks int // MaxRetry Tag - MaxRetry string + MaxRetry int // Legacy SSL config options // TLS client certificate @@ -61,20 +59,20 @@ var sampleConfig = ` ## ie, if this tag exists, it's value will be used as the routing key routing_tag = "host" - ## CompressionCodec represents the various compression codecs recognized by Kafka in messages. - ## "none" : No compression - ## "gzip" : Gzip compression - ## "snappy" : Snappy compression - # compression_codec = "none" + ## CompressionCodec represents the various compression codecs recognized by Kafka in messages. + ## 0 : No compression + ## 1 : Gzip compression + ## 2 : Snappy compression + compression_codec = 0 - ## RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements it must see before responding - ## "none" : the producer never waits for an acknowledgement from the broker. This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails). - ## "leader" : the producer gets an acknowledgement after the leader replica has received the data. This option provides better durability as the client waits until the server acknowledges the request as successful (only messages that were written to the now-dead leader but not yet replicated will be lost). - ## "leader_and_replicas" : the producer gets an acknowledgement after all in-sync replicas have received the data. This option provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains. - # required_acks = "leader_and_replicas" + ## RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements it must see before responding + ## 0 : the producer never waits for an acknowledgement from the broker. This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails). + ## 1 : the producer gets an acknowledgement after the leader replica has received the data. This option provides better durability as the client waits until the server acknowledges the request as successful (only messages that were written to the now-dead leader but not yet replicated will be lost). + ## -1 : the producer gets an acknowledgement after all in-sync replicas have received the data. This option provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains. + required_acks = -1 - ## The total number of times to retry sending a message - # max_retry = "3" + ## The total number of times to retry sending a message + max_retry = 3 ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" @@ -94,66 +92,12 @@ func (k *Kafka) SetSerializer(serializer serializers.Serializer) { k.serializer = serializer } -func requiredAcks(value string) (sarama.RequiredAcks, error) { - switch strings.ToLower(value) { - case "none": - return sarama.NoResponse, nil - case "leader": - return sarama.WaitForLocal, nil - case "", "leader_and_replicas": - return sarama.WaitForAll, nil - default: - return 0, fmt.Errorf("Failed to recognize required_acks: %s", value) - } -} - -func compressionCodec(value string) (sarama.CompressionCodec, error) { - switch strings.ToLower(value) { - case "gzip": - return sarama.CompressionGZIP, nil - case "snappy": - return sarama.CompressionSnappy, nil - case "", "none": - return sarama.CompressionNone, nil - default: - return 0, fmt.Errorf("Failed to recognize compression_codec: %s", value) - } -} - -func maxRetry(value string) (int, error) { - if value == "" { - return 3, nil - } - maxRetry, err := strconv.Atoi(value) - if err != nil { - return -1, fmt.Errorf("Failed to parse max_retry: %s", value) - } - if maxRetry < 0 { - return -1, fmt.Errorf("max_retry is %s but it should not be negative", value) - } - return maxRetry, nil -} - func (k *Kafka) Connect() error { config := sarama.NewConfig() - requiredAcks, err := requiredAcks(k.RequiredAcks) - if err != nil { - return err - } - config.Producer.RequiredAcks = requiredAcks - - compressionCodec, err := compressionCodec(k.CompressionCodec) - if err != nil { - return err - } - config.Producer.Compression = compressionCodec - - maxRetry, err := maxRetry(k.MaxRetry) - if err != nil { - return err - } - config.Producer.Retry.Max = maxRetry + config.Producer.RequiredAcks = sarama.RequiredAcks(k.RequiredAcks) + config.Producer.Compression = sarama.CompressionCodec(k.CompressionCodec) + config.Producer.Retry.Max = k.MaxRetry // Legacy support ssl config if k.Certificate != "" { From 51f4e9c0d3ee6889cba7ce86cd5a0566f8bf8be4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Florent=20Rami=C3=A8re?= Date: Thu, 31 Mar 2016 17:30:39 +0200 Subject: [PATCH 09/13] Update changelog closes #945 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 08c4b6ceb..f43aca161 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ - [#848](https://github.com/influxdata/telegraf/issues/848): Provide option to omit host tag from telegraf agent. - [#928](https://github.com/influxdata/telegraf/pull/928): Deprecating the statsd "convert_names" options, expose separator config. - [#919](https://github.com/influxdata/telegraf/pull/919): ipmi_sensor input plugin. Thanks @ebookbug! +- [#945](https://github.com/influxdata/telegraf/pull/945): KAFKA output: codec, acks, and retry configuration. Thanks @framiere! ### Bugfixes - [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided. From e436b2d72004a940549c21751ba33acf13117004 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 31 Mar 2016 17:50:24 -0600 Subject: [PATCH 10/13] Cleanup & standardize config file changes: - -sample-config will now comment out all but a few default plugins. - config file parse errors will output path to bad conf file. - cleanup 80-char line-length and some other style issues. - default package conf file will now have all plugins, but commented out. closes #199 closes #944 --- etc/telegraf.conf | 1178 ++++++++++++++++- internal/config/config.go | 164 ++- plugins/inputs/disque/disque.go | 5 +- plugins/inputs/dns_query/dns_query.go | 3 +- plugins/inputs/exec/exec.go | 2 +- .../inputs/kafka_consumer/kafka_consumer.go | 2 +- plugins/inputs/mesos/mesos.go | 11 +- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 2 +- plugins/inputs/nats_consumer/nats_consumer.go | 2 +- plugins/inputs/postgresql/postgresql.go | 3 +- .../postgresql_extensible.go | 57 +- plugins/inputs/prometheus/prometheus.go | 8 +- plugins/inputs/snmp/snmp.go | 5 +- plugins/inputs/tcp_listener/tcp_listener.go | 2 +- plugins/inputs/udp_listener/udp_listener.go | 2 +- plugins/outputs/amqp/amqp.go | 2 +- plugins/outputs/file/file.go | 2 +- plugins/outputs/kafka/kafka.go | 23 +- plugins/outputs/mqtt/mqtt.go | 2 +- plugins/outputs/nsq/nsq.go | 2 +- 20 files changed, 1347 insertions(+), 130 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 0e740f5c8..43d647beb 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -1,19 +1,21 @@ -# Telegraf configuration - +# Telegraf Configuration +# # Telegraf is entirely plugin driven. All metrics are gathered from the # declared inputs, and sent to the declared outputs. - +# # Plugins must be declared in here to be active. # To deactivate a plugin, comment out the name and any variables. - +# # Use 'telegraf -config telegraf.conf -test' to see what metrics a config # file would generate. + # Global tags can be specified here in key="value" format. [global_tags] # dc = "us-east-1" # will tag all metrics with dc=us-east-1 # rack = "1a" + # Configuration for telegraf agent [agent] ## Default data collection interval for all inputs @@ -48,10 +50,12 @@ quiet = false ## Override default hostname, if empty use os.Hostname() hostname = "" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = false ############################################################################### -# OUTPUTS # +# OUTPUT PLUGINS # ############################################################################### # Configuration for influxdb server to send metrics to @@ -87,59 +91,1189 @@ # insecure_skip_verify = false +# # Configuration for Amon Server to send metrics to. +# [[outputs.amon]] +# ## Amon Server Key +# server_key = "my-server-key" # required. +# +# ## Amon Instance URL +# amon_instance = "https://youramoninstance" # required +# +# ## Connection timeout. +# # timeout = "5s" + + +# # Configuration for the AMQP server to send metrics to +# [[outputs.amqp]] +# ## AMQP url +# url = "amqp://localhost:5672/influxdb" +# ## AMQP exchange +# exchange = "telegraf" +# ## Auth method. PLAIN and EXTERNAL are supported +# # auth_method = "PLAIN" +# ## Telegraf tag to use as a routing key +# ## ie, if this tag exists, it's value will be used as the routing key +# routing_tag = "host" +# +# ## InfluxDB retention policy +# # retention_policy = "default" +# ## InfluxDB database +# # database = "telegraf" +# ## InfluxDB precision +# # precision = "s" +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for AWS CloudWatch output. +# [[outputs.cloudwatch]] +# ## Amazon REGION +# region = 'us-east-1' +# +# ## Namespace for the CloudWatch MetricDatums +# namespace = 'InfluxData/Telegraf' + + +# # Configuration for DataDog API to send metrics to. +# [[outputs.datadog]] +# ## Datadog API key +# apikey = "my-secret-key" # required. +# +# ## Connection timeout. +# # timeout = "5s" + + +# # Send telegraf metrics to file(s) +# [[outputs.file]] +# ## Files to write to, "stdout" is a specially handled file. +# files = ["stdout", "/tmp/metrics.out"] +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for Graphite server to send metrics to +# [[outputs.graphite]] +# ## TCP endpoint for your graphite instance. +# servers = ["localhost:2003"] +# ## Prefix metrics name +# prefix = "" +# ## timeout in seconds for the write connection to graphite +# timeout = 2 + + +# # Configuration for the Kafka server to send metrics to +# [[outputs.kafka]] +# ## URLs of kafka brokers +# brokers = ["localhost:9092"] +# ## Kafka topic for producer messages +# topic = "telegraf" +# ## Telegraf tag to use as a routing key +# ## ie, if this tag exists, it's value will be used as the routing key +# routing_tag = "host" +# +# ## CompressionCodec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : No compression +# ## 1 : Gzip compression +# ## 2 : Snappy compression +# compression_codec = 0 +# +# ## RequiredAcks is used in Produce Requests to tell the broker how many +# ## replica acknowledgements it must see before responding +# ## 0 : the producer never waits for an acknowledgement from the broker. +# ## This option provides the lowest latency but the weakest durability +# ## guarantees (some data will be lost when a server fails). +# ## 1 : the producer gets an acknowledgement after the leader replica has +# ## received the data. This option provides better durability as the +# ## client waits until the server acknowledges the request as successful +# ## (only messages that were written to the now-dead leader but not yet +# ## replicated will be lost). +# ## -1: the producer gets an acknowledgement after all in-sync replicas have +# ## received the data. This option provides the best durability, we +# ## guarantee that no messages will be lost as long as at least one in +# ## sync replica remains. +# required_acks = -1 +# +# ## The total number of times to retry sending a message +# max_retry = 3 +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for the AWS Kinesis output. +# [[outputs.kinesis]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# ## PartitionKey as used for sharding data. +# partitionkey = "PartitionKey" +# ## format of the Data payload in the kinesis PutRecord, supported +# ## String and Custom. +# format = "string" +# ## debug will show upstream aws messages. +# debug = false + + +# # Configuration for Librato API to send metrics to. +# [[outputs.librato]] +# ## Librator API Docs +# ## http://dev.librato.com/v1/metrics-authentication +# +# ## Librato API user +# api_user = "telegraf@influxdb.com" # required. +# +# ## Librato API token +# api_token = "my-secret-token" # required. +# +# ### Debug +# # debug = false +# +# ### Tag Field to populate source attribute (optional) +# ### This is typically the _hostname_ from which the metric was obtained. +# source_tag = "host" +# +# ## Connection timeout. +# # timeout = "5s" + + +# # Configuration for MQTT server to send metrics to +# [[outputs.mqtt]] +# servers = ["localhost:1883"] # required. +# +# ## MQTT outputs send metrics to this topic format +# ## "///" +# ## ex: prefix/web01.example.com/mem +# topic_prefix = "telegraf" +# +# ## username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send telegraf measurements to NSQD +# [[outputs.nsq]] +# ## Location of nsqd instance listening on TCP +# server = "localhost:4150" +# ## NSQ topic for producer messages +# topic = "telegraf" +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for OpenTSDB server to send metrics to +# [[outputs.opentsdb]] +# ## prefix for metrics keys +# prefix = "my.specific.prefix." +# +# ## Telnet Mode ## +# ## DNS name of the OpenTSDB server in telnet mode +# host = "opentsdb.example.com" +# +# ## Port of the OpenTSDB server in telnet mode +# port = 4242 +# +# ## Debug true - Prints OpenTSDB communication +# debug = false + + +# # Configuration for the Prometheus client to spawn +# [[outputs.prometheus_client]] +# ## Address to listen on +# # listen = ":9126" + + +# # Configuration for the Riemann server to send metrics to +# [[outputs.riemann]] +# ## URL of server +# url = "localhost:5555" +# ## transport protocol to use either tcp or udp +# transport = "tcp" +# ## separator to use between input name and field name in Riemann service name +# separator = " " + + + ############################################################################### -# INPUTS # +# INPUT PLUGINS # ############################################################################### # Read metrics about cpu usage [[inputs.cpu]] - # Whether to report per-cpu stats or not + ## Whether to report per-cpu stats or not percpu = true - # Whether to report total system cpu stats or not + ## Whether to report total system cpu stats or not totalcpu = true - # Comment this line if you want the raw CPU time metrics + ## Comment this line if you want the raw CPU time metrics fielddrop = ["time_*"] + # Read metrics about disk usage by mount point [[inputs.disk]] - # By default, telegraf gather stats for all mountpoints. - # Setting mountpoints will restrict the stats to the specified mountpoints. - # mount_points=["/"] + ## By default, telegraf gather stats for all mountpoints. + ## Setting mountpoints will restrict the stats to the specified mountpoints. + # mount_points = ["/"] - # Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually - # present on /run, /var/run, /dev/shm or /dev). + ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually + ## present on /run, /var/run, /dev/shm or /dev). ignore_fs = ["tmpfs", "devtmpfs"] + # Read metrics about disk IO by device [[inputs.diskio]] - # By default, telegraf will gather stats for all devices including - # disk partitions. - # Setting devices will restrict the stats to the specified devices. + ## By default, telegraf will gather stats for all devices including + ## disk partitions. + ## Setting devices will restrict the stats to the specified devices. # devices = ["sda", "sdb"] - # Uncomment the following line if you do not need disk serial numbers. + ## Uncomment the following line if you do not need disk serial numbers. # skip_serial_number = true -# Get kernel statistics from /proc/stat -[[inputs.kernel]] - # no configuration # Read metrics about memory usage [[inputs.mem]] # no configuration + # Get the number of processes and group them by status [[inputs.processes]] # no configuration + # Read metrics about swap memory usage [[inputs.swap]] # no configuration + # Read metrics about system load & uptime [[inputs.system]] # no configuration +# # Read stats from an aerospike server +# [[inputs.aerospike]] +# ## Aerospike servers to connect to (with port) +# ## This plugin will query all namespaces the aerospike +# ## server has configured and get stats for them. +# servers = ["localhost:3000"] + + +# # Read Apache status information (mod_status) +# [[inputs.apache]] +# ## An array of Apache status URI to gather stats. +# urls = ["http://localhost/server-status?auto"] + + +# # Read metrics of bcache from stats_total and dirty_data +# [[inputs.bcache]] +# ## Bcache sets path +# ## If not specified, then default is: +# bcachePath = "/sys/fs/bcache" +# +# ## By default, telegraf gather stats for all bcache devices +# ## Setting devices will restrict the stats to the specified +# ## bcache devices. +# bcacheDevs = ["bcache0"] + + +# # Read metrics from one or many couchbase clusters +# [[inputs.couchbase]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## http://couchbase-0.example.com/ +# ## http://admin:secret@couchbase-0.example.com:8091/ +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no protocol is specifed, HTTP is used. +# ## If no port is specified, 8091 is used. +# servers = ["http://localhost:8091"] + + +# # Read CouchDB Stats from one or more servers +# [[inputs.couchdb]] +# ## Works with CouchDB stats endpoints out of the box +# ## Multiple HOSTs from which to read CouchDB stats: +# hosts = ["http://localhost:8086/_stats"] + + +# # Read metrics from one or many disque servers +# [[inputs.disque]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port and password. +# ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost"] + + +# # Query given DNS server and gives statistics +# [[inputs.dns_query]] +# ## servers to query +# servers = ["8.8.8.8"] # required +# +# ## Domains or subdomains to query. "."(root) is default +# domains = ["."] # optional +# +# ## Query record type. Default is "A" +# ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. +# record_type = "A" # optional +# +# ## Dns server port. 53 is default +# port = 53 # optional +# +# ## Query timeout in seconds. Default is 2 seconds +# timeout = 2 # optional + + +# # Read metrics about docker containers +# [[inputs.docker]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# endpoint = "unix:///var/run/docker.sock" +# ## Only collect metrics for these containers, collect all if empty +# container_names = [] + + +# # Read statistics from one or many dovecot servers +# [[inputs.dovecot]] +# ## specify dovecot servers via an address:port list +# ## e.g. +# ## localhost:24242 +# ## +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost:24242"] +# ## Only collect metrics for these domains, collect all if empty +# domains = [] + + +# # Read stats from one or more Elasticsearch servers or clusters +# [[inputs.elasticsearch]] +# ## specify a list of one or more Elasticsearch servers +# servers = ["http://localhost:9200"] +# +# ## set local to false when you want to read the indices stats from all nodes +# ## within the cluster +# local = true +# +# ## set cluster_health to true when you want to also obtain cluster level stats +# cluster_health = false + + +# # Read metrics from one or more commands that can output to stdout +# [[inputs.exec]] +# ## Commands array +# commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] +# +# ## measurement name suffix (for separating different commands) +# name_suffix = "_mycollector" +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics of haproxy, via socket or csv stats page +# [[inputs.haproxy]] +# ## An array of address to gather stats about. Specify an ip on hostname +# ## with optional port. ie localhost, 10.10.3.33:1936, etc. +# +# ## If no servers are specified, then default to 127.0.0.1:1936 +# servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"] +# ## Or you can also use local socket(not work yet) +# ## servers = ["socket://run/haproxy/admin.sock"] + + +# # Read flattened metrics from one or more JSON HTTP endpoints +# [[inputs.httpjson]] +# ## NOTE This plugin only reads numerical measurements, strings and booleans +# ## will be ignored. +# +# ## a name for the service being polled +# name = "webserver_stats" +# +# ## URL of each server in the service's cluster +# servers = [ +# "http://localhost:9999/stats/", +# "http://localhost:9998/stats/", +# ] +# +# ## HTTP method to use: GET or POST (case-sensitive) +# method = "GET" +# +# ## List of tag names to extract from top-level of JSON server response +# # tag_keys = [ +# # "my_tag_1", +# # "my_tag_2" +# # ] +# +# ## HTTP parameters (all values must be strings) +# [inputs.httpjson.parameters] +# event_type = "cpu_spike" +# threshold = "0.75" +# +# ## HTTP Header parameters (all values must be strings) +# # [inputs.httpjson.headers] +# # X-Auth-Token = "my-xauth-token" +# # apiVersion = "v1" +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.influxdb]] +# ## Works with InfluxDB debug endpoints out of the box, +# ## but other services can use this format too. +# ## See the influxdb plugin's README for more details. +# +# ## Multiple URLs from which to read InfluxDB-formatted JSON +# urls = [ +# "http://localhost:8086/debug/vars" +# ] + + +# # Read metrics from one or many bare metal servers +# [[inputs.ipmi_sensor]] +# ## specify servers via a url matching: +# ## [username[:password]@][protocol[(address)]] +# ## e.g. +# ## root:passwd@lan(127.0.0.1) +# ## +# servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] + + +# # Read JMX metrics through Jolokia +# [[inputs.jolokia]] +# ## This is the context root used to compose the jolokia url +# context = "/jolokia/read" +# +# ## List of servers exposing jolokia read service +# [[inputs.jolokia.servers]] +# name = "stable" +# host = "192.168.103.2" +# port = "8180" +# # username = "myuser" +# # password = "mypassword" +# +# ## List of metrics collected on above servers +# ## Each metric consists in a name, a jmx path and either +# ## a pass or drop slice attribute. +# ## This collect all heap memory usage metrics. +# [[inputs.jolokia.metrics]] +# name = "heap_memory_usage" +# jmx = "/java.lang:type=Memory/HeapMemoryUsage" + + +# # Read metrics from a LeoFS Server via SNMP +# [[inputs.leofs]] +# ## An array of URI to gather stats about LeoFS. +# ## Specify an ip or hostname with port. ie 127.0.0.1:4020 +# servers = ["127.0.0.1:4021"] + + +# # Read metrics from local Lustre service on OST, MDS +# [[inputs.lustre2]] +# ## An array of /proc globs to search for Lustre stats +# ## If not specified, the default will work on Lustre 2.5.x +# ## +# # ost_procfiles = [ +# # "/proc/fs/lustre/obdfilter/*/stats", +# # "/proc/fs/lustre/osd-ldiskfs/*/stats" +# # ] +# # mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"] + + +# # Gathers metrics from the /3.0/reports MailChimp API +# [[inputs.mailchimp]] +# ## MailChimp API key +# ## get from https://admin.mailchimp.com/account/api/ +# api_key = "" # required +# ## Reports for campaigns sent more than days_old ago will not be collected. +# ## 0 means collect all. +# days_old = 0 +# ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old +# # campaign_id = "" + + +# # Read metrics from one or many memcached servers +# [[inputs.memcached]] +# ## An array of address to gather stats about. Specify an ip on hostname +# ## with optional port. ie localhost, 10.0.0.1:11211, etc. +# servers = ["localhost:11211"] +# # unix_sockets = ["/var/run/memcached.sock"] + + +# # Telegraf plugin for gathering metrics from N Mesos masters +# [[inputs.mesos]] +# # Timeout, in ms. +# timeout = 100 +# # A list of Mesos masters, default value is localhost:5050. +# masters = ["localhost:5050"] +# # Metrics groups to be collected, by default, all enabled. +# master_collections = [ +# "resources", +# "master", +# "system", +# "slaves", +# "frameworks", +# "messages", +# "evqueue", +# "registrar", +# ] + + +# # Read metrics from one or many MongoDB servers +# [[inputs.mongodb]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port add password. ie, +# ## mongodb://user:auth_key@10.10.3.30:27017, +# ## mongodb://10.10.3.33:18832, +# ## 10.0.0.1:10000, etc. +# servers = ["127.0.0.1:27017"] + + +# # Read metrics from one or many mysql servers +# [[inputs.mysql]] +# ## specify servers via a url matching: +# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] +# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name +# ## e.g. +# ## root:passwd@tcp(127.0.0.1:3306)/?tls=false +# ## root@tcp(127.0.0.1:3306)/?tls=false +# ## +# ## If no servers are specified, then localhost is used as the host. +# servers = ["tcp(127.0.0.1:3306)/"] + + +# # Read metrics about network interface usage +# [[inputs.net]] +# ## By default, telegraf gathers stats from any up interface (excluding loopback) +# ## Setting interfaces will tell it to gather these explicit interfaces, +# ## regardless of status. +# ## +# # interfaces = ["eth0"] + + +# # TCP or UDP 'ping' given url and collect response time in seconds +# [[inputs.net_response]] +# ## Protocol, must be "tcp" or "udp" +# protocol = "tcp" +# ## Server address (default localhost) +# address = "github.com:80" +# ## Set timeout (default 1.0 seconds) +# timeout = 1.0 +# ## Set read timeout (default 1.0 seconds) +# read_timeout = 1.0 +# ## Optional string sent to the server +# # send = "ssh" +# ## Optional expected string in answer +# # expect = "ssh" + + +# # Read TCP metrics such as established, time wait and sockets counts. +# [[inputs.netstat]] +# # no configuration + + +# # Read Nginx's basic status information (ngx_http_stub_status_module) +# [[inputs.nginx]] +# ## An array of Nginx stub_status URI to gather stats. +# urls = ["http://localhost/status"] + + +# # Read NSQ topic and channel statistics. +# [[inputs.nsq]] +# ## An array of NSQD HTTP API endpoints +# endpoints = ["http://localhost:4151"] + + +# # Get standard NTP query metrics, requires ntpq executable. +# [[inputs.ntpq]] +# ## If false, set the -n ntpq flag. Can reduce metric gather time. +# dns_lookup = true + + +# # Read metrics of passenger using passenger-status +# [[inputs.passenger]] +# ## Path of passenger-status. +# ## +# ## Plugin gather metric via parsing XML output of passenger-status +# ## More information about the tool: +# ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html +# ## +# ## If no path is specified, then the plugin simply execute passenger-status +# ## hopefully it can be found in your PATH +# command = "passenger-status -v --show=xml" + + +# # Read metrics of phpfpm, via HTTP status page or socket +# [[inputs.phpfpm]] +# ## An array of addresses to gather stats about. Specify an ip or hostname +# ## with optional port and path +# ## +# ## Plugin can be configured in three modes (either can be used): +# ## - http: the URL must start with http:// or https://, ie: +# ## "http://localhost/status" +# ## "http://192.168.130.1/status?full" +# ## +# ## - unixsocket: path to fpm socket, ie: +# ## "/var/run/php5-fpm.sock" +# ## or using a custom fpm status path: +# ## "/var/run/php5-fpm.sock:fpm-custom-status-path" +# ## +# ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: +# ## "fcgi://10.0.0.12:9000/status" +# ## "cgi://10.0.10.12:9001/status" +# ## +# ## Example of multiple gathering from local socket and remove host +# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] +# urls = ["http://localhost/status"] + + +# # Ping given url(s) and return statistics +# [[inputs.ping]] +# ## NOTE: this plugin forks the ping command. You may need to set capabilities +# ## via setcap cap_net_raw+p /bin/ping +# +# ## urls to ping +# urls = ["www.google.com"] # required +# ## number of pings to send (ping -c ) +# count = 1 # required +# ## interval, in s, at which to ping. 0 == default (ping -i ) +# ping_interval = 0.0 +# ## ping timeout, in s. 0 == no timeout (ping -t ) +# timeout = 0.0 +# ## interface to send ping from (ping -I ) +# interface = "" + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# ## +# address = "host=localhost user=postgres sslmode=disable" +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. +# # databases = ["app_production", "testing"] + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql_extensible]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# # +# ## All connection parameters are optional. # +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# # +# address = "host=localhost user=postgres sslmode=disable" +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. +# ## databases = ["app_production", "testing"] +# # +# ## Define the toml config where the sql queries are stored +# ## New queries can be added, if the withdbname is set to true and there is no +# ## databases defined in the 'databases field', the sql query is ended by a +# ## 'is not null' in order to make the query succeed. +# ## Example : +# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become +# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" +# ## because the databases variable was set to ['postgres', 'pgbench' ] and the +# ## withdbname was true. Be careful that if the withdbname is set to false you +# ## don't have to define the where clause (aka with the dbname) the tagvalue +# ## field is used to define custom tags (separated by comas) +# # +# ## Structure : +# ## [[inputs.postgresql_extensible.query]] +# ## sqlquery string +# ## version string +# ## withdbname boolean +# ## tagvalue string (coma separated) +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_database" +# version=901 +# withdbname=false +# tagvalue="" +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_bgwriter" +# version=901 +# withdbname=false +# tagvalue="" + + +# # Read metrics from one or many PowerDNS servers +# [[inputs.powerdns]] +# ## An array of sockets to gather stats about. +# ## Specify a path to unix socket. +# unix_sockets = ["/var/run/pdns.controlsocket"] + + +# # Monitor process cpu and memory usage +# [[inputs.procstat]] +# ## Must specify one of: pid_file, exe, or pattern +# ## PID file to monitor process +# pid_file = "/var/run/nginx.pid" +# ## executable name (ie, pgrep ) +# # exe = "nginx" +# ## pattern as argument for pgrep (ie, pgrep -f ) +# # pattern = "nginx" +# ## user as argument for pgrep (ie, pgrep -u ) +# # user = "nginx" +# +# ## Field name prefix +# prefix = "" + + +# # Read metrics from one or many prometheus clients +# [[inputs.prometheus]] +# ## An array of urls to scrape metrics from. +# urls = ["http://localhost:9100/metrics"] +# +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# ## Use bearer token for authorization +# # bearer_token = /path/to/bearer/token + + +# # Reads last_run_summary.yaml file and converts to measurments +# [[inputs.puppetagent]] +# ## Location of puppet last run summary file +# location = "/var/lib/puppet/state/last_run_summary.yaml" + + +# # Read metrics from one or many RabbitMQ servers via the management API +# [[inputs.rabbitmq]] +# url = "http://localhost:15672" # required +# # name = "rmq-server-1" # optional tag +# # username = "guest" +# # password = "guest" +# +# ## A list of nodes to pull metrics about. If not specified, metrics for +# ## all nodes are gathered. +# # nodes = ["rabbit@node1", "rabbit@node2"] + + +# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers) +# [[inputs.raindrops]] +# ## An array of raindrops middleware URI to gather stats. +# urls = ["http://localhost:8080/_raindrops"] + + +# # Read metrics from one or many redis servers +# [[inputs.redis]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## tcp://localhost:6379 +# ## tcp://:password@192.168.99.100 +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 6379 is used +# servers = ["tcp://localhost:6379"] + + +# # Read metrics from one or many RethinkDB servers +# [[inputs.rethinkdb]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port add password. ie, +# ## rethinkdb://user:auth_key@10.10.3.30:28105, +# ## rethinkdb://10.10.3.33:18832, +# ## 10.0.0.1:10000, etc. +# servers = ["127.0.0.1:28015"] + + +# # Read metrics one or many Riak servers +# [[inputs.riak]] +# # Specify a list of one or more riak http servers +# servers = ["http://localhost:8098"] + + +# # Reads oids value from one or many snmp agents +# [[inputs.snmp]] +# ## Use 'oids.txt' file to translate oids to names +# ## To generate 'oids.txt' you need to run: +# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt +# ## Or if you have an other MIB folder with custom MIBs +# ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt +# snmptranslate_file = "/tmp/oids.txt" +# [[inputs.snmp.host]] +# address = "192.168.2.2:161" +# # SNMP community +# community = "public" # default public +# # SNMP version (1, 2 or 3) +# # Version 3 not supported yet +# version = 2 # default 2 +# # SNMP response timeout +# timeout = 2.0 # default 2.0 +# # SNMP request retries +# retries = 2 # default 2 +# # Which get/bulk do you want to collect for this host +# collect = ["mybulk", "sysservices", "sysdescr"] +# # Simple list of OIDs to get, in addition to "collect" +# get_oids = [] +# +# [[inputs.snmp.host]] +# address = "192.168.2.3:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# collect = ["mybulk"] +# get_oids = [ +# "ifNumber", +# ".1.3.6.1.2.1.1.3.0", +# ] +# +# [[inputs.snmp.get]] +# name = "ifnumber" +# oid = "ifNumber" +# +# [[inputs.snmp.get]] +# name = "interface_speed" +# oid = "ifSpeed" +# instance = "0" +# +# [[inputs.snmp.get]] +# name = "sysuptime" +# oid = ".1.3.6.1.2.1.1.3.0" +# unit = "second" +# +# [[inputs.snmp.bulk]] +# name = "mybulk" +# max_repetition = 127 +# oid = ".1.3.6.1.2.1.1" +# +# [[inputs.snmp.bulk]] +# name = "ifoutoctets" +# max_repetition = 127 +# oid = "ifOutOctets" +# +# [[inputs.snmp.host]] +# address = "192.168.2.13:161" +# #address = "127.0.0.1:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# #collect = ["mybulk", "sysservices", "sysdescr", "systype"] +# collect = ["sysuptime" ] +# [[inputs.snmp.host.table]] +# name = "iftable3" +# include_instances = ["enp5s0", "eth1"] +# +# # SNMP TABLEs +# # table without mapping neither subtables +# [[inputs.snmp.table]] +# name = "iftable1" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# +# # table without mapping but with subtables +# [[inputs.snmp.table]] +# name = "iftable2" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# sub_tables = [".1.3.6.1.2.1.2.2.1.13"] +# +# # table with mapping but without subtables +# [[inputs.snmp.table]] +# name = "iftable3" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty. get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty, get all subtables +# +# # table with both mapping and subtables +# [[inputs.snmp.table]] +# name = "iftable4" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty get all subtables +# # sub_tables could be not "real subtables" +# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] + + +# # Read metrics from Microsoft SQL Server +# [[inputs.sqlserver]] +# ## Specify instances to monitor with a list of connection strings. +# ## All connection parameters are optional. +# ## By default, the host is localhost, listening on default port, TCP 1433. +# ## for Windows, the user is the currently running AD user (SSO). +# ## See https://github.com/denisenkom/go-mssqldb for detailed connection +# ## parameters. +# # servers = [ +# # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +# # ] + + +# # Inserts sine and cosine waves for demonstration purposes +# [[inputs.trig]] +# ## Set the amplitude +# amplitude = 10.0 + + +# # Read Twemproxy stats data +# [[inputs.twemproxy]] +# ## Twemproxy stats address and port (no scheme) +# addr = "localhost:22222" +# ## Monitor pool name +# pools = ["redis_pool", "mc_pool"] + + +# # Read metrics of ZFS from arcstats, zfetchstats and vdev_cache_stats +# [[inputs.zfs]] +# ## ZFS kstat path +# ## If not specified, then default is: +# kstatPath = "/proc/spl/kstat/zfs" +# +# ## By default, telegraf gather all zfs stats +# ## If not specified, then default is: +# kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] +# +# ## By default, don't gather zpool stats +# poolMetrics = false + + +# # Reads 'mntr' stats from one or many zookeeper servers +# [[inputs.zookeeper]] +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie localhost:2181, 10.0.0.1:2181, etc. +# +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 2181 is used +# servers = [":2181"] + + + ############################################################################### -# SERVICE INPUTS # +# SERVICE INPUT PLUGINS # ############################################################################### + +# # Generic UDP listener +# [[inputs.udp_listener]] +# ## Address and port to host UDP listener on +# service_address = ":8092" +# +# ## Number of UDP messages allowed to queue up. Once filled, the +# ## UDP listener will start dropping packets. +# allowed_pending_messages = 10000 +# +# ## UDP packet size for the server to listen for. This will depend +# ## on the size of the packets that the client is sending, which is +# ## usually 1500 bytes, but can be as large as 65,535 bytes. +# udp_packet_size = 1500 +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # A Github Webhook Event collector +# [[inputs.github_webhooks]] +# ## Address and port to host Webhook listener on +# service_address = ":1618" + + +# # Read metrics from Kafka topic(s) +# [[inputs.kafka_consumer]] +# ## topic(s) to consume +# topics = ["telegraf"] +# ## an array of Zookeeper connection strings +# zookeeper_peers = ["localhost:2181"] +# ## Zookeeper Chroot +# zookeeper_chroot = "/" +# ## the name of the consumer group +# consumer_group = "telegraf_metrics_consumers" +# ## Offset (must be either "oldest" or "newest") +# offset = "oldest" +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from MQTT topic(s) +# [[inputs.mqtt_consumer]] +# servers = ["localhost:1883"] +# ## MQTT QoS, must be 0, 1, or 2 +# qos = 0 +# +# ## Topics to subscribe to +# topics = [ +# "telegraf/host01/cpu", +# "telegraf/+/mem", +# "sensors/#", +# ] +# +# # if true, messages that can't be delivered while the subscriber is offline +# # will be delivered when it comes back (such as on service restart). +# # NOTE: if true, client_id MUST be set +# persistent_session = false +# # If empty, a random client ID will be generated. +# client_id = "" +# +# ## username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from NATS subject(s) +# [[inputs.nats_consumer]] +# ## urls of NATS servers +# servers = ["nats://localhost:4222"] +# ## Use Transport Layer Security +# secure = false +# ## subject(s) to consume +# subjects = ["telegraf"] +# ## name a queue group +# queue_group = "telegraf_consumers" +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Statsd Server +# [[inputs.statsd]] +# ## Address and port to host UDP listener on +# service_address = ":8125" +# ## Delete gauges every interval (default=false) +# delete_gauges = false +# ## Delete counters every interval (default=false) +# delete_counters = false +# ## Delete sets every interval (default=false) +# delete_sets = false +# ## Delete timings & histograms every interval (default=true) +# delete_timings = true +# ## Percentiles to calculate for timing & histogram stats +# percentiles = [90] +# +# ## separator to use between elements of a statsd metric +# metric_separator = "_" +# +# ## Parses tags in the datadog statsd format +# ## http://docs.datadoghq.com/guides/dogstatsd/ +# parse_data_dog_tags = false +# +# ## Statsd data translation templates, more info can be read here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite +# # templates = [ +# # "cpu.* measurement*" +# # ] +# +# ## Number of UDP messages allowed to queue up, once filled, +# ## the statsd server will start dropping packets +# allowed_pending_messages = 10000 +# +# ## Number of timing/histogram values to track per-measurement in the +# ## calculation of percentiles. Raising this limit increases the accuracy +# ## of percentiles but also increases the memory usage and cpu time. +# percentile_limit = 1000 +# +# ## UDP packet size for the server to listen for. This will depend on the size +# ## of the packets that the client is sending, which is usually 1500 bytes. +# udp_packet_size = 1500 + + +# # Generic TCP listener +# [[inputs.tcp_listener]] +# ## Address and port to host TCP listener on +# service_address = ":8094" +# +# ## Number of TCP messages allowed to queue up. Once filled, the +# ## TCP listener will start dropping packets. +# allowed_pending_messages = 10000 +# +# ## Maximum number of concurrent TCP connections to allow +# max_tcp_connections = 250 +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + diff --git a/internal/config/config.go b/internal/config/config.go index b15c5e651..715fa777c 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -22,6 +22,15 @@ import ( "github.com/influxdata/toml/ast" ) +var ( + // Default input plugins + inputDefaults = []string{"cpu", "mem", "swap", "system", "kernel", + "processes", "disk", "diskio"} + + // Default output plugins + outputDefaults = []string{"influxdb"} +) + // Config specifies the URL/user/password for the database that telegraf // will be logging to, as well as all the plugins that the user has // specified @@ -135,21 +144,23 @@ func (c *Config) ListTags() string { } var header = `# Telegraf Configuration - +# # Telegraf is entirely plugin driven. All metrics are gathered from the # declared inputs, and sent to the declared outputs. - +# # Plugins must be declared in here to be active. # To deactivate a plugin, comment out the name and any variables. - +# # Use 'telegraf -config telegraf.conf -test' to see what metrics a config # file would generate. + # Global tags can be specified here in key="value" format. [global_tags] # dc = "us-east-1" # will tag all metrics with dc=us-east-1 # rack = "1a" + # Configuration for telegraf agent [agent] ## Default data collection interval for all inputs @@ -188,55 +199,72 @@ var header = `# Telegraf Configuration omit_hostname = false -# -# OUTPUTS: -# - +############################################################################### +# OUTPUT PLUGINS # +############################################################################### ` -var pluginHeader = ` -# -# INPUTS: -# +var inputHeader = ` + +############################################################################### +# INPUT PLUGINS # +############################################################################### ` var serviceInputHeader = ` -# -# SERVICE INPUTS: -# + +############################################################################### +# SERVICE INPUT PLUGINS # +############################################################################### ` // PrintSampleConfig prints the sample config -func PrintSampleConfig(pluginFilters []string, outputFilters []string) { +func PrintSampleConfig(inputFilters []string, outputFilters []string) { fmt.Printf(header) - // Filter outputs - var onames []string - for oname := range outputs.Outputs { - if len(outputFilters) == 0 || sliceContains(oname, outputFilters) { - onames = append(onames, oname) + if len(outputFilters) != 0 { + printFilteredOutputs(outputFilters, false) + } else { + printFilteredOutputs(outputDefaults, false) + // Print non-default outputs, commented + var pnames []string + for pname := range outputs.Outputs { + if !sliceContains(pname, outputDefaults) { + pnames = append(pnames, pname) + } } - } - sort.Strings(onames) - - // Print Outputs - for _, oname := range onames { - creator := outputs.Outputs[oname] - output := creator() - printConfig(oname, output, "outputs") + sort.Strings(pnames) + printFilteredOutputs(pnames, true) } + fmt.Printf(inputHeader) + if len(inputFilters) != 0 { + printFilteredInputs(inputFilters, false) + } else { + printFilteredInputs(inputDefaults, false) + // Print non-default inputs, commented + var pnames []string + for pname := range inputs.Inputs { + if !sliceContains(pname, inputDefaults) { + pnames = append(pnames, pname) + } + } + sort.Strings(pnames) + printFilteredInputs(pnames, true) + } +} + +func printFilteredInputs(inputFilters []string, commented bool) { // Filter inputs var pnames []string for pname := range inputs.Inputs { - if len(pluginFilters) == 0 || sliceContains(pname, pluginFilters) { + if sliceContains(pname, inputFilters) { pnames = append(pnames, pname) } } sort.Strings(pnames) // Print Inputs - fmt.Printf(pluginHeader) servInputs := make(map[string]telegraf.ServiceInput) for _, pname := range pnames { creator := inputs.Inputs[pname] @@ -248,13 +276,34 @@ func PrintSampleConfig(pluginFilters []string, outputFilters []string) { continue } - printConfig(pname, input, "inputs") + printConfig(pname, input, "inputs", commented) } // Print Service Inputs + if len(servInputs) == 0 { + return + } fmt.Printf(serviceInputHeader) for name, input := range servInputs { - printConfig(name, input, "inputs") + printConfig(name, input, "inputs", commented) + } +} + +func printFilteredOutputs(outputFilters []string, commented bool) { + // Filter outputs + var onames []string + for oname := range outputs.Outputs { + if sliceContains(oname, outputFilters) { + onames = append(onames, oname) + } + } + sort.Strings(onames) + + // Print Outputs + for _, oname := range onames { + creator := outputs.Outputs[oname] + output := creator() + printConfig(oname, output, "outputs", commented) } } @@ -263,13 +312,26 @@ type printer interface { SampleConfig() string } -func printConfig(name string, p printer, op string) { - fmt.Printf("\n# %s\n[[%s.%s]]", p.Description(), op, name) +func printConfig(name string, p printer, op string, commented bool) { + comment := "" + if commented { + comment = "# " + } + fmt.Printf("\n%s# %s\n%s[[%s.%s]]", comment, p.Description(), comment, + op, name) + config := p.SampleConfig() if config == "" { - fmt.Printf("\n # no configuration\n") + fmt.Printf("\n%s # no configuration\n\n", comment) } else { - fmt.Printf(config) + lines := strings.Split(config, "\n") + for i, line := range lines { + if i == 0 || i == len(lines)-1 { + fmt.Print("\n") + continue + } + fmt.Print(comment + line + "\n") + } } } @@ -285,7 +347,7 @@ func sliceContains(name string, list []string) bool { // PrintInputConfig prints the config usage of a single input. func PrintInputConfig(name string) error { if creator, ok := inputs.Inputs[name]; ok { - printConfig(name, creator(), "inputs") + printConfig(name, creator(), "inputs", false) } else { return errors.New(fmt.Sprintf("Input %s not found", name)) } @@ -295,7 +357,7 @@ func PrintInputConfig(name string) error { // PrintOutputConfig prints the config usage of a single output. func PrintOutputConfig(name string) error { if creator, ok := outputs.Outputs[name]; ok { - printConfig(name, creator(), "outputs") + printConfig(name, creator(), "outputs", false) } else { return errors.New(fmt.Sprintf("Output %s not found", name)) } @@ -327,42 +389,42 @@ func (c *Config) LoadDirectory(path string) error { func (c *Config) LoadConfig(path string) error { tbl, err := config.ParseFile(path) if err != nil { - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } for name, val := range tbl.Fields { subTable, ok := val.(*ast.Table) if !ok { - return errors.New("invalid configuration") + return fmt.Errorf("%s: invalid configuration", path) } switch name { case "agent": if err = config.UnmarshalTable(subTable, c.Agent); err != nil { log.Printf("Could not parse [agent] config\n") - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } case "global_tags", "tags": if err = config.UnmarshalTable(subTable, c.Tags); err != nil { log.Printf("Could not parse [global_tags] config\n") - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } case "outputs": for pluginName, pluginVal := range subTable.Fields { switch pluginSubTable := pluginVal.(type) { case *ast.Table: if err = c.addOutput(pluginName, pluginSubTable); err != nil { - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } case []*ast.Table: for _, t := range pluginSubTable { if err = c.addOutput(pluginName, t); err != nil { - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } } default: - return fmt.Errorf("Unsupported config format: %s", - pluginName) + return fmt.Errorf("Unsupported config format: %s, file %s", + pluginName, path) } } case "inputs", "plugins": @@ -370,24 +432,24 @@ func (c *Config) LoadConfig(path string) error { switch pluginSubTable := pluginVal.(type) { case *ast.Table: if err = c.addInput(pluginName, pluginSubTable); err != nil { - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } case []*ast.Table: for _, t := range pluginSubTable { if err = c.addInput(pluginName, t); err != nil { - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } } default: - return fmt.Errorf("Unsupported config format: %s", - pluginName) + return fmt.Errorf("Unsupported config format: %s, file %s", + pluginName, path) } } // Assume it's an input input for legacy config file support if no other // identifiers are present default: if err = c.addInput(name, subTable); err != nil { - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } } } diff --git a/plugins/inputs/disque/disque.go b/plugins/inputs/disque/disque.go index 822e5924f..d726590b4 100644 --- a/plugins/inputs/disque/disque.go +++ b/plugins/inputs/disque/disque.go @@ -24,9 +24,8 @@ type Disque struct { var sampleConfig = ` ## An array of URI to gather stats about. Specify an ip or hostname - ## with optional port and password. ie disque://localhost, disque://10.10.3.33:18832, - ## 10.0.0.1:10000, etc. - + ## with optional port and password. + ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. ## If no servers are specified, then localhost is used as the host. servers = ["localhost"] ` diff --git a/plugins/inputs/dns_query/dns_query.go b/plugins/inputs/dns_query/dns_query.go index 397482a98..2231f2921 100644 --- a/plugins/inputs/dns_query/dns_query.go +++ b/plugins/inputs/dns_query/dns_query.go @@ -35,7 +35,8 @@ var sampleConfig = ` ## Domains or subdomains to query. "."(root) is default domains = ["."] # optional - ## Query record type. Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. Default is "NS" + ## Query record type. Default is "A" + ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. record_type = "A" # optional ## Dns server port. 53 is default diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index 9fd9491ca..d2e09ccd0 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -22,7 +22,7 @@ const sampleConfig = ` ## measurement name suffix (for separating different commands) name_suffix = "_mycollector" - ## Data format to consume. This can be "json", "influx", "graphite" or "nagios + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 07c87199f..a2cda43d6 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -56,7 +56,7 @@ var sampleConfig = ` ## Offset (must be either "oldest" or "newest") offset = "oldest" - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index ccb76daae..b096a20d9 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -34,7 +34,16 @@ var sampleConfig = ` # A list of Mesos masters, default value is localhost:5050. masters = ["localhost:5050"] # Metrics groups to be collected, by default, all enabled. - master_collections = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"] + master_collections = [ + "resources", + "master", + "system", + "slaves", + "frameworks", + "messages", + "evqueue", + "registrar", + ] ` // SampleConfig returns a sample configuration block diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 50a20740a..c64d2139b 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -78,7 +78,7 @@ var sampleConfig = ` ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index 235601100..232d5740f 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -55,7 +55,7 @@ var sampleConfig = ` ## name a queue group queue_group = "telegraf_consumers" - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index d8d0d1978..da8ee8001 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -26,7 +26,8 @@ var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_rese var sampleConfig = ` ## specify address via a url matching: - ## postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] + ## postgres://[pqgotest[:password]]@localhost[/dbname]\ + ## ?sslmode=[disable|verify-ca|verify-full] ## or a simple string: ## host=localhost user=pqotest password=... sslmode=... dbname=app_production ## diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index 67097db4b..4ebf752ff 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -38,38 +38,41 @@ type query []struct { var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true} var sampleConfig = ` - # specify address via a url matching: - # postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] - # or a simple string: - # host=localhost user=pqotest password=... sslmode=... dbname=app_production + ## specify address via a url matching: + ## postgres://[pqgotest[:password]]@localhost[/dbname]\ + ## ?sslmode=[disable|verify-ca|verify-full] + ## or a simple string: + ## host=localhost user=pqotest password=... sslmode=... dbname=app_production # - # All connection parameters are optional. # - # Without the dbname parameter, the driver will default to a database - # with the same name as the user. This dbname is just for instantiating a - # connection with the server and doesn't restrict the databases we are trying - # to grab metrics for. + ## All connection parameters are optional. # + ## Without the dbname parameter, the driver will default to a database + ## with the same name as the user. This dbname is just for instantiating a + ## connection with the server and doesn't restrict the databases we are trying + ## to grab metrics for. # address = "host=localhost user=postgres sslmode=disable" - # A list of databases to pull metrics about. If not specified, metrics for all - # databases are gathered. - # databases = ["app_production", "testing"] + ## A list of databases to pull metrics about. If not specified, metrics for all + ## databases are gathered. + ## databases = ["app_production", "testing"] # - # Define the toml config where the sql queries are stored - # New queries can be added, if the withdbname is set to true and there is no databases defined - # in the 'databases field', the sql query is ended by a 'is not null' in order to make the query - # succeed. - # Example : - # The sqlquery : "SELECT * FROM pg_stat_database where datname" become "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" - # because the databases variable was set to ['postgres', 'pgbench' ] and the withdbname was true. - # Be careful that if the withdbname is set to false you d'ont have to define the where clause (aka with the dbname) - # the tagvalue field is used to define custom tags (separated by comas) + ## Define the toml config where the sql queries are stored + ## New queries can be added, if the withdbname is set to true and there is no + ## databases defined in the 'databases field', the sql query is ended by a + ## 'is not null' in order to make the query succeed. + ## Example : + ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become + ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" + ## because the databases variable was set to ['postgres', 'pgbench' ] and the + ## withdbname was true. Be careful that if the withdbname is set to false you + ## don't have to define the where clause (aka with the dbname) the tagvalue + ## field is used to define custom tags (separated by comas) # - # Structure : - # [[inputs.postgresql_extensible.query]] - # sqlquery string - # version string - # withdbname boolean - # tagvalue string (coma separated) + ## Structure : + ## [[inputs.postgresql_extensible.query]] + ## sqlquery string + ## version string + ## withdbname boolean + ## tagvalue string (coma separated) [[inputs.postgresql_extensible.query]] sqlquery="SELECT * FROM pg_stat_database" version=901 diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 0281cc24a..460a79faf 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -26,10 +26,10 @@ var sampleConfig = ` ## An array of urls to scrape metrics from. urls = ["http://localhost:9100/metrics"] - ### Use SSL but skip chain & host verification - # insecure_skip_verify = false - ### Use bearer token for authorization - # bearer_token = /path/to/bearer/token + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false + ## Use bearer token for authorization + # bearer_token = /path/to/bearer/token ` func (p *Prometheus) SampleConfig() string { diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index a56e53ff7..4c2de93c9 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -178,7 +178,6 @@ var sampleConfig = ` max_repetition = 127 oid = "ifOutOctets" - [[inputs.snmp.host]] address = "192.168.2.13:161" #address = "127.0.0.1:161" @@ -219,10 +218,8 @@ var sampleConfig = ` # if empty get all instances mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" # if empty get all subtables - # sub_tables could be not "real subtables" + # sub_tables could be not "real subtables" sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] - - ` // SampleConfig returns sample configuration message diff --git a/plugins/inputs/tcp_listener/tcp_listener.go b/plugins/inputs/tcp_listener/tcp_listener.go index dd239fedf..a1b991058 100644 --- a/plugins/inputs/tcp_listener/tcp_listener.go +++ b/plugins/inputs/tcp_listener/tcp_listener.go @@ -53,7 +53,7 @@ const sampleConfig = ` ## Maximum number of concurrent TCP connections to allow max_tcp_connections = 250 - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/udp_listener/udp_listener.go b/plugins/inputs/udp_listener/udp_listener.go index 9b0a65d6f..794f1791d 100644 --- a/plugins/inputs/udp_listener/udp_listener.go +++ b/plugins/inputs/udp_listener/udp_listener.go @@ -48,7 +48,7 @@ const sampleConfig = ` ## usually 1500 bytes, but can be as large as 65,535 bytes. udp_packet_size = 1500 - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index c9531b2a5..bf9353d6e 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -89,7 +89,7 @@ var sampleConfig = ` ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ## Data format to output. This can be "influx" or "graphite" + ## Data format to output. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md diff --git a/plugins/outputs/file/file.go b/plugins/outputs/file/file.go index 743c0f03f..1d47642b2 100644 --- a/plugins/outputs/file/file.go +++ b/plugins/outputs/file/file.go @@ -23,7 +23,7 @@ var sampleConfig = ` ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/metrics.out"] - ## Data format to output. This can be "influx" or "graphite" + ## Data format to output. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 3cecfeeab..1fafa1353 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -59,16 +59,27 @@ var sampleConfig = ` ## ie, if this tag exists, it's value will be used as the routing key routing_tag = "host" - ## CompressionCodec represents the various compression codecs recognized by Kafka in messages. + ## CompressionCodec represents the various compression codecs recognized by + ## Kafka in messages. ## 0 : No compression ## 1 : Gzip compression ## 2 : Snappy compression compression_codec = 0 - ## RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements it must see before responding - ## 0 : the producer never waits for an acknowledgement from the broker. This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails). - ## 1 : the producer gets an acknowledgement after the leader replica has received the data. This option provides better durability as the client waits until the server acknowledges the request as successful (only messages that were written to the now-dead leader but not yet replicated will be lost). - ## -1 : the producer gets an acknowledgement after all in-sync replicas have received the data. This option provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains. + ## RequiredAcks is used in Produce Requests to tell the broker how many + ## replica acknowledgements it must see before responding + ## 0 : the producer never waits for an acknowledgement from the broker. + ## This option provides the lowest latency but the weakest durability + ## guarantees (some data will be lost when a server fails). + ## 1 : the producer gets an acknowledgement after the leader replica has + ## received the data. This option provides better durability as the + ## client waits until the server acknowledges the request as successful + ## (only messages that were written to the now-dead leader but not yet + ## replicated will be lost). + ## -1: the producer gets an acknowledgement after all in-sync replicas have + ## received the data. This option provides the best durability, we + ## guarantee that no messages will be lost as long as at least one in + ## sync replica remains. required_acks = -1 ## The total number of times to retry sending a message @@ -81,7 +92,7 @@ var sampleConfig = ` ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ## Data format to output. This can be "influx" or "graphite" + ## Data format to output. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index f13500db9..c57ee8cd0 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -32,7 +32,7 @@ var sampleConfig = ` ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ## Data format to output. This can be "influx" or "graphite" + ## Data format to output. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md diff --git a/plugins/outputs/nsq/nsq.go b/plugins/outputs/nsq/nsq.go index 75b998484..fd4053222 100644 --- a/plugins/outputs/nsq/nsq.go +++ b/plugins/outputs/nsq/nsq.go @@ -24,7 +24,7 @@ var sampleConfig = ` ## NSQ topic for producer messages topic = "telegraf" - ## Data format to output. This can be "influx" or "graphite" + ## Data format to output. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md From f5246eb1678a44ec7b959ac30c525848684ddc68 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 1 Apr 2016 11:45:09 -0600 Subject: [PATCH 11/13] Update changelog with config file PR --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f43aca161..1095364f5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ ## v0.12.0 [unreleased] ### Features +- [#948](https://github.com/influxdata/telegraf/pull/948): Cleanup config file and make default package version include all plugins (but commented). - [#927](https://github.com/influxdata/telegraf/pull/927): Adds parsing of tags to the statsd input when using DataDog's dogstatsd extension - [#863](https://github.com/influxdata/telegraf/pull/863): AMQP output: allow external auth. Thanks @ekini! - [#707](https://github.com/influxdata/telegraf/pull/707): Improved prometheus plugin. Thanks @titilambert! From 9211d22b2b7dfd5fa12280b4e6c719395bb6fe5e Mon Sep 17 00:00:00 2001 From: Rubycut Date: Fri, 1 Apr 2016 17:59:09 +0200 Subject: [PATCH 12/13] Add writing in documentation. closes #950 --- plugins/inputs/nginx/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/inputs/nginx/README.md b/plugins/inputs/nginx/README.md index 8c64f6311..918ee08ad 100644 --- a/plugins/inputs/nginx/README.md +++ b/plugins/inputs/nginx/README.md @@ -18,6 +18,7 @@ - reading - requests - waiting + - writing ### Tags: From 8e041420cd9c34ae8a91e73ae62dab4d76937d0e Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 1 Apr 2016 13:53:34 -0600 Subject: [PATCH 13/13] config: parse environment variables in the config file closes #663 --- CHANGELOG.md | 1 + docs/CONFIGURATION.md | 6 +++ etc/telegraf.conf | 48 +++++++++++-------- internal/config/config.go | 47 ++++++++++++++++-- internal/config/config_test.go | 44 +++++++++++++++++ .../testdata/single_plugin_env_vars.toml | 11 +++++ 6 files changed, 132 insertions(+), 25 deletions(-) create mode 100644 internal/config/testdata/single_plugin_env_vars.toml diff --git a/CHANGELOG.md b/CHANGELOG.md index 1095364f5..305fa6d03 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ ## v0.12.0 [unreleased] ### Features +- [#951](https://github.com/influxdata/telegraf/pull/951): Parse environment variables in the config file. - [#948](https://github.com/influxdata/telegraf/pull/948): Cleanup config file and make default package version include all plugins (but commented). - [#927](https://github.com/influxdata/telegraf/pull/927): Adds parsing of tags to the statsd input when using DataDog's dogstatsd extension - [#863](https://github.com/influxdata/telegraf/pull/863): AMQP output: allow external auth. Thanks @ekini! diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 810dc9470..0afaa120f 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -9,6 +9,12 @@ To generate a file with specific inputs and outputs, you can use the -input-filter and -output-filter flags: `telegraf -sample-config -input-filter cpu:mem:net:swap -output-filter influxdb:kafka` +## Environment Variables + +Environment variables can be used anywhere in the config file, simply prepend +them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), +for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) + ## `[global_tags]` Configuration Global tags can be specific in the `[global_tags]` section of the config file in diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 43d647beb..633483e22 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -8,12 +8,18 @@ # # Use 'telegraf -config telegraf.conf -test' to see what metrics a config # file would generate. +# +# Environment variables can be used anywhere in this config file, simply prepend +# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), +# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) # Global tags can be specified here in key="value" format. [global_tags] # dc = "us-east-1" # will tag all metrics with dc=us-east-1 # rack = "1a" + ## Environment variables can be used as tags, and throughout the config file + # user = "$USER" # Configuration for telegraf agent @@ -1114,27 +1120,6 @@ # SERVICE INPUT PLUGINS # ############################################################################### -# # Generic UDP listener -# [[inputs.udp_listener]] -# ## Address and port to host UDP listener on -# service_address = ":8092" -# -# ## Number of UDP messages allowed to queue up. Once filled, the -# ## UDP listener will start dropping packets. -# allowed_pending_messages = 10000 -# -# ## UDP packet size for the server to listen for. This will depend -# ## on the size of the packets that the client is sending, which is -# ## usually 1500 bytes, but can be as large as 65,535 bytes. -# udp_packet_size = 1500 -# -# ## Data format to consume. -# ## Each data format has it's own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" - - # # A Github Webhook Event collector # [[inputs.github_webhooks]] # ## Address and port to host Webhook listener on @@ -1277,3 +1262,24 @@ # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" + +# # Generic UDP listener +# [[inputs.udp_listener]] +# ## Address and port to host UDP listener on +# service_address = ":8092" +# +# ## Number of UDP messages allowed to queue up. Once filled, the +# ## UDP listener will start dropping packets. +# allowed_pending_messages = 10000 +# +# ## UDP packet size for the server to listen for. This will depend +# ## on the size of the packets that the client is sending, which is +# ## usually 1500 bytes, but can be as large as 65,535 bytes. +# udp_packet_size = 1500 +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + diff --git a/internal/config/config.go b/internal/config/config.go index 715fa777c..1e07234e8 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1,11 +1,14 @@ package config import ( + "bytes" "errors" "fmt" "io/ioutil" "log" + "os" "path/filepath" + "regexp" "sort" "strings" "time" @@ -19,6 +22,7 @@ import ( "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/config" + "github.com/influxdata/toml" "github.com/influxdata/toml/ast" ) @@ -29,6 +33,9 @@ var ( // Default output plugins outputDefaults = []string{"influxdb"} + + // envVarRe is a regex to find environment variables in the config file + envVarRe = regexp.MustCompile(`\$\w+`) ) // Config specifies the URL/user/password for the database that telegraf @@ -153,12 +160,18 @@ var header = `# Telegraf Configuration # # Use 'telegraf -config telegraf.conf -test' to see what metrics a config # file would generate. +# +# Environment variables can be used anywhere in this config file, simply prepend +# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), +# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) # Global tags can be specified here in key="value" format. [global_tags] # dc = "us-east-1" # will tag all metrics with dc=us-east-1 # rack = "1a" + ## Environment variables can be used as tags, and throughout the config file + # user = "$USER" # Configuration for telegraf agent @@ -264,8 +277,12 @@ func printFilteredInputs(inputFilters []string, commented bool) { } sort.Strings(pnames) - // Print Inputs + // cache service inputs to print them at the end servInputs := make(map[string]telegraf.ServiceInput) + // for alphabetical looping: + servInputNames := []string{} + + // Print Inputs for _, pname := range pnames { creator := inputs.Inputs[pname] input := creator() @@ -273,6 +290,7 @@ func printFilteredInputs(inputFilters []string, commented bool) { switch p := input.(type) { case telegraf.ServiceInput: servInputs[pname] = p + servInputNames = append(servInputNames, pname) continue } @@ -283,9 +301,10 @@ func printFilteredInputs(inputFilters []string, commented bool) { if len(servInputs) == 0 { return } + sort.Strings(servInputNames) fmt.Printf(serviceInputHeader) - for name, input := range servInputs { - printConfig(name, input, "inputs", commented) + for _, name := range servInputNames { + printConfig(name, servInputs[name], "inputs", commented) } } @@ -387,7 +406,7 @@ func (c *Config) LoadDirectory(path string) error { // LoadConfig loads the given config file and applies it to c func (c *Config) LoadConfig(path string) error { - tbl, err := config.ParseFile(path) + tbl, err := parseFile(path) if err != nil { return fmt.Errorf("Error parsing %s, %s", path, err) } @@ -456,6 +475,26 @@ func (c *Config) LoadConfig(path string) error { return nil } +// parseFile loads a TOML configuration from a provided path and +// returns the AST produced from the TOML parser. When loading the file, it +// will find environment variables and replace them. +func parseFile(fpath string) (*ast.Table, error) { + contents, err := ioutil.ReadFile(fpath) + if err != nil { + return nil, err + } + + env_vars := envVarRe.FindAll(contents, -1) + for _, env_var := range env_vars { + env_val := os.Getenv(strings.TrimPrefix(string(env_var), "$")) + if env_val != "" { + contents = bytes.Replace(contents, env_var, []byte(env_val), 1) + } + } + + return toml.Parse(contents) +} + func (c *Config) addOutput(name string, table *ast.Table) error { if len(c.OutputFilters) > 0 && !sliceContains(name, c.OutputFilters) { return nil diff --git a/internal/config/config_test.go b/internal/config/config_test.go index f0add8b98..d78a8d6b8 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -1,6 +1,7 @@ package config import ( + "os" "testing" "time" @@ -10,9 +11,52 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/memcached" "github.com/influxdata/telegraf/plugins/inputs/procstat" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/stretchr/testify/assert" ) +func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) { + c := NewConfig() + err := os.Setenv("MY_TEST_SERVER", "192.168.1.1") + assert.NoError(t, err) + err = os.Setenv("TEST_INTERVAL", "10s") + assert.NoError(t, err) + c.LoadConfig("./testdata/single_plugin_env_vars.toml") + + memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) + memcached.Servers = []string{"192.168.1.1"} + + mConfig := &internal_models.InputConfig{ + Name: "memcached", + Filter: internal_models.Filter{ + NameDrop: []string{"metricname2"}, + NamePass: []string{"metricname1"}, + FieldDrop: []string{"other", "stuff"}, + FieldPass: []string{"some", "strings"}, + TagDrop: []internal_models.TagFilter{ + internal_models.TagFilter{ + Name: "badtag", + Filter: []string{"othertag"}, + }, + }, + TagPass: []internal_models.TagFilter{ + internal_models.TagFilter{ + Name: "goodtag", + Filter: []string{"mytag"}, + }, + }, + IsActive: true, + }, + Interval: 10 * time.Second, + } + mConfig.Tags = make(map[string]string) + + assert.Equal(t, memcached, c.Inputs[0].Input, + "Testdata did not produce a correct memcached struct.") + assert.Equal(t, mConfig, c.Inputs[0].Config, + "Testdata did not produce correct memcached metadata.") +} + func TestConfig_LoadSingleInput(t *testing.T) { c := NewConfig() c.LoadConfig("./testdata/single_plugin.toml") diff --git a/internal/config/testdata/single_plugin_env_vars.toml b/internal/config/testdata/single_plugin_env_vars.toml new file mode 100644 index 000000000..6600a77b3 --- /dev/null +++ b/internal/config/testdata/single_plugin_env_vars.toml @@ -0,0 +1,11 @@ +[[inputs.memcached]] + servers = ["$MY_TEST_SERVER"] + namepass = ["metricname1"] + namedrop = ["metricname2"] + fieldpass = ["some", "strings"] + fielddrop = ["other", "stuff"] + interval = "$TEST_INTERVAL" + [inputs.memcached.tagpass] + goodtag = ["mytag"] + [inputs.memcached.tagdrop] + badtag = ["othertag"]