Use sarama built in support for consumer groups (#6172)
This commit is contained in:
parent
4d73290e13
commit
aea09b3a20
|
@ -61,6 +61,14 @@
|
||||||
revision = "1f7cd6cfe0adea687ad44a512dfe76140f804318"
|
revision = "1f7cd6cfe0adea687ad44a512dfe76140f804318"
|
||||||
version = "v10.12.0"
|
version = "v10.12.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:82041ab48e5c76da656b723fdc13a2b9ec716cdc736f82adaac77f5c39d4fca8"
|
||||||
|
name = "github.com/DataDog/zstd"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = ""
|
||||||
|
revision = "2347a397da4ee9c6b8226d4aff82c302d0e52773"
|
||||||
|
version = "v1.4.1"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
digest = "1:298712a3ee36b59c3ca91f4183bd75d174d5eaa8b4aed5072831f126e2e752f6"
|
digest = "1:298712a3ee36b59c3ca91f4183bd75d174d5eaa8b4aed5072831f126e2e752f6"
|
||||||
|
@ -81,12 +89,12 @@
|
||||||
version = "v0.4.9"
|
version = "v0.4.9"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:213b41361ad1cb4768add9d26c2e27794c65264eefdb24ed6ea34cdfeeff3f3c"
|
digest = "1:5dd52495eaf9fad11f4742f341166aa9eb68f70061fc1a9b546f9481b284b6d8"
|
||||||
name = "github.com/Shopify/sarama"
|
name = "github.com/Shopify/sarama"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
pruneopts = ""
|
pruneopts = ""
|
||||||
revision = "a6144ae922fd99dd0ea5046c8137acfb7fab0914"
|
revision = "46c83074a05474240f9620fb7c70fb0d80ca401a"
|
||||||
version = "v1.18.0"
|
version = "v1.23.1"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:f82b8ac36058904227087141017bb82f4b0fc58272990a4cdae3e2d6d222644e"
|
digest = "1:f82b8ac36058904227087141017bb82f4b0fc58272990a4cdae3e2d6d222644e"
|
||||||
|
@ -195,14 +203,6 @@
|
||||||
pruneopts = ""
|
pruneopts = ""
|
||||||
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:c5978131c797af795972c27c25396c81d1bf53b7b6e8e3e0259e58375765c071"
|
|
||||||
name = "github.com/bsm/sarama-cluster"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = ""
|
|
||||||
revision = "cf455bc755fe41ac9bb2861e7a961833d9c2ecc3"
|
|
||||||
version = "v2.1.13"
|
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:e5691038f8e87e7da05280095d968e50c17d624e25cca095d4e4cd947a805563"
|
digest = "1:e5691038f8e87e7da05280095d968e50c17d624e25cca095d4e4cd947a805563"
|
||||||
name = "github.com/caio/go-tdigest"
|
name = "github.com/caio/go-tdigest"
|
||||||
|
@ -640,6 +640,14 @@
|
||||||
pruneopts = ""
|
pruneopts = ""
|
||||||
revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00"
|
revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:0038a7f43b51c8b2a8cd03b5372e73f8eadfe156484c2ae8185ae836f8ebc2cd"
|
||||||
|
name = "github.com/hashicorp/go-uuid"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = ""
|
||||||
|
revision = "4f571afc59f3043a65f8fe6bf46d887b10a01d43"
|
||||||
|
version = "v1.0.1"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:f72168ea995f398bab88e84bd1ff58a983466ba162fb8d50d47420666cd57fad"
|
digest = "1:f72168ea995f398bab88e84bd1ff58a983466ba162fb8d50d47420666cd57fad"
|
||||||
name = "github.com/hashicorp/serf"
|
name = "github.com/hashicorp/serf"
|
||||||
|
@ -710,6 +718,17 @@
|
||||||
revision = "8faa4453fc7051d1076053f8854077753ab912f2"
|
revision = "8faa4453fc7051d1076053f8854077753ab912f2"
|
||||||
version = "v3.4.0"
|
version = "v3.4.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:d45477e90c25c8c6d7d4237281167aa56079382fc042db4b44a8328071649bfa"
|
||||||
|
name = "github.com/jcmturner/gofork"
|
||||||
|
packages = [
|
||||||
|
"encoding/asn1",
|
||||||
|
"x/crypto/pbkdf2",
|
||||||
|
]
|
||||||
|
pruneopts = ""
|
||||||
|
revision = "dc7c13fece037a4a36e2b3c69db4991498d30692"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:6f49eae0c1e5dab1dafafee34b207aeb7a42303105960944828c2079b92fc88e"
|
digest = "1:6f49eae0c1e5dab1dafafee34b207aeb7a42303105960944828c2079b92fc88e"
|
||||||
name = "github.com/jmespath/go-jmespath"
|
name = "github.com/jmespath/go-jmespath"
|
||||||
|
@ -1523,6 +1542,72 @@
|
||||||
revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
|
revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
|
||||||
version = "v0.9.1"
|
version = "v0.9.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:4777ba481cc12866b89aafb0a67529e7ac48b9aea06a25f3737b2cf5a3ffda12"
|
||||||
|
name = "gopkg.in/jcmturner/aescts.v1"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = ""
|
||||||
|
revision = "f6abebb3171c4c1b1fea279cb7c7325020a26290"
|
||||||
|
version = "v1.0.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:84c5b1392ef65ad1bb64da4b4d0beb2f204eefc769d6d96082347bb7057cb7b1"
|
||||||
|
name = "gopkg.in/jcmturner/dnsutils.v1"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = ""
|
||||||
|
revision = "13eeb8d49ffb74d7a75784c35e4d900607a3943c"
|
||||||
|
version = "v1.0.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:502ab576ba8c47c4de77fe3f2b2386adc1a1447bb5afae2ac7bf0edd2b6f7c52"
|
||||||
|
name = "gopkg.in/jcmturner/gokrb5.v7"
|
||||||
|
packages = [
|
||||||
|
"asn1tools",
|
||||||
|
"client",
|
||||||
|
"config",
|
||||||
|
"credentials",
|
||||||
|
"crypto",
|
||||||
|
"crypto/common",
|
||||||
|
"crypto/etype",
|
||||||
|
"crypto/rfc3961",
|
||||||
|
"crypto/rfc3962",
|
||||||
|
"crypto/rfc4757",
|
||||||
|
"crypto/rfc8009",
|
||||||
|
"gssapi",
|
||||||
|
"iana",
|
||||||
|
"iana/addrtype",
|
||||||
|
"iana/adtype",
|
||||||
|
"iana/asnAppTag",
|
||||||
|
"iana/chksumtype",
|
||||||
|
"iana/errorcode",
|
||||||
|
"iana/etypeID",
|
||||||
|
"iana/flags",
|
||||||
|
"iana/keyusage",
|
||||||
|
"iana/msgtype",
|
||||||
|
"iana/nametype",
|
||||||
|
"iana/patype",
|
||||||
|
"kadmin",
|
||||||
|
"keytab",
|
||||||
|
"krberror",
|
||||||
|
"messages",
|
||||||
|
"pac",
|
||||||
|
"types",
|
||||||
|
]
|
||||||
|
pruneopts = ""
|
||||||
|
revision = "363118e62befa8a14ff01031c025026077fe5d6d"
|
||||||
|
version = "v7.3.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:f9956ccc103c6208cd50c71ee5191b6fdcc635972c12624ef949c9b20b2bb9d1"
|
||||||
|
name = "gopkg.in/jcmturner/rpc.v1"
|
||||||
|
packages = [
|
||||||
|
"mstypes",
|
||||||
|
"ndr",
|
||||||
|
]
|
||||||
|
pruneopts = ""
|
||||||
|
revision = "99a8ce2fbf8b8087b6ed12a37c61b10f04070043"
|
||||||
|
version = "v1.1.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:367baf06b7dbd0ef0bbdd785f6a79f929c96b0c18e9d3b29c0eed1ac3f5db133"
|
digest = "1:367baf06b7dbd0ef0bbdd785f6a79f929c96b0c18e9d3b29c0eed1ac3f5db133"
|
||||||
name = "gopkg.in/ldap.v2"
|
name = "gopkg.in/ldap.v2"
|
||||||
|
@ -1598,7 +1683,6 @@
|
||||||
"github.com/aws/aws-sdk-go/service/cloudwatch",
|
"github.com/aws/aws-sdk-go/service/cloudwatch",
|
||||||
"github.com/aws/aws-sdk-go/service/dynamodb",
|
"github.com/aws/aws-sdk-go/service/dynamodb",
|
||||||
"github.com/aws/aws-sdk-go/service/kinesis",
|
"github.com/aws/aws-sdk-go/service/kinesis",
|
||||||
"github.com/bsm/sarama-cluster",
|
|
||||||
"github.com/cisco-ie/nx-telemetry-proto/mdt_dialout",
|
"github.com/cisco-ie/nx-telemetry-proto/mdt_dialout",
|
||||||
"github.com/cisco-ie/nx-telemetry-proto/telemetry_bis",
|
"github.com/cisco-ie/nx-telemetry-proto/telemetry_bis",
|
||||||
"github.com/couchbase/go-couchbase",
|
"github.com/couchbase/go-couchbase",
|
||||||
|
|
|
@ -18,10 +18,6 @@
|
||||||
name = "github.com/aws/aws-sdk-go"
|
name = "github.com/aws/aws-sdk-go"
|
||||||
version = "1.15.54"
|
version = "1.15.54"
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/bsm/sarama-cluster"
|
|
||||||
version = "2.1.13"
|
|
||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "github.com/couchbase/go-couchbase"
|
name = "github.com/couchbase/go-couchbase"
|
||||||
branch = "master"
|
branch = "master"
|
||||||
|
|
8
Makefile
8
Makefile
|
@ -1,9 +1,8 @@
|
||||||
ifeq ($(SHELL), cmd)
|
ifeq ($(OS), Windows_NT)
|
||||||
VERSION := $(shell git describe --exact-match --tags 2>nil)
|
|
||||||
HOME := $(HOMEPATH)
|
|
||||||
else ifeq ($(SHELL), sh.exe)
|
|
||||||
VERSION := $(shell git describe --exact-match --tags 2>nil)
|
VERSION := $(shell git describe --exact-match --tags 2>nil)
|
||||||
HOME := $(HOMEPATH)
|
HOME := $(HOMEPATH)
|
||||||
|
CGO_ENABLED ?= 0
|
||||||
|
export CGO_ENABLED
|
||||||
else
|
else
|
||||||
VERSION := $(shell git describe --exact-match --tags 2>/dev/null)
|
VERSION := $(shell git describe --exact-match --tags 2>/dev/null)
|
||||||
endif
|
endif
|
||||||
|
@ -48,7 +47,6 @@ install: telegraf
|
||||||
mkdir -p $(DESTDIR)$(PREFIX)/bin/
|
mkdir -p $(DESTDIR)$(PREFIX)/bin/
|
||||||
cp telegraf $(DESTDIR)$(PREFIX)/bin/
|
cp telegraf $(DESTDIR)$(PREFIX)/bin/
|
||||||
|
|
||||||
|
|
||||||
.PHONY: test
|
.PHONY: test
|
||||||
test:
|
test:
|
||||||
go test -short ./...
|
go test -short ./...
|
||||||
|
|
|
@ -14,7 +14,6 @@ following works:
|
||||||
- github.com/aws/aws-sdk-go [Apache License 2.0](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt)
|
- github.com/aws/aws-sdk-go [Apache License 2.0](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt)
|
||||||
- github.com/Azure/go-autorest [Apache License 2.0](https://github.com/Azure/go-autorest/blob/master/LICENSE)
|
- github.com/Azure/go-autorest [Apache License 2.0](https://github.com/Azure/go-autorest/blob/master/LICENSE)
|
||||||
- github.com/beorn7/perks [MIT License](https://github.com/beorn7/perks/blob/master/LICENSE)
|
- github.com/beorn7/perks [MIT License](https://github.com/beorn7/perks/blob/master/LICENSE)
|
||||||
- github.com/bsm/sarama-cluster [MIT License](https://github.com/bsm/sarama-cluster/blob/master/LICENSE)
|
|
||||||
- github.com/cenkalti/backoff [MIT License](https://github.com/cenkalti/backoff/blob/master/LICENSE)
|
- github.com/cenkalti/backoff [MIT License](https://github.com/cenkalti/backoff/blob/master/LICENSE)
|
||||||
- github.com/cisco-ie/nx-telemetry-proto [Apache License 2.0](https://github.com/cisco-ie/nx-telemetry-proto/blob/master/LICENSE)
|
- github.com/cisco-ie/nx-telemetry-proto [Apache License 2.0](https://github.com/cisco-ie/nx-telemetry-proto/blob/master/LICENSE)
|
||||||
- github.com/couchbase/go-couchbase [MIT License](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
|
- github.com/couchbase/go-couchbase [MIT License](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
|
||||||
|
@ -139,4 +138,4 @@ following works:
|
||||||
- gopkg.in/yaml.v2 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v2.2.2/LICENSE)
|
- gopkg.in/yaml.v2 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v2.2.2/LICENSE)
|
||||||
|
|
||||||
## telegraf used and modified code from these projects
|
## telegraf used and modified code from these projects
|
||||||
- github.com/DataDog/datadog-agent [Apache License 2.0](https://github.com/DataDog/datadog-agent/LICENSE)
|
- github.com/DataDog/datadog-agent [Apache License 2.0](https://github.com/DataDog/datadog-agent/LICENSE)
|
||||||
|
|
|
@ -10,11 +10,13 @@ and use the old zookeeper connection method.
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
[[inputs.kafka_consumer]]
|
[[inputs.kafka_consumer]]
|
||||||
## kafka servers
|
## Kafka brokers.
|
||||||
brokers = ["localhost:9092"]
|
brokers = ["localhost:9092"]
|
||||||
## topic(s) to consume
|
|
||||||
|
## Topics to consume.
|
||||||
topics = ["telegraf"]
|
topics = ["telegraf"]
|
||||||
## Add topic as tag if topic_tag is not empty
|
|
||||||
|
## When set this tag will be added to all metrics with the topic as the value.
|
||||||
# topic_tag = ""
|
# topic_tag = ""
|
||||||
|
|
||||||
## Optional Client id
|
## Optional Client id
|
||||||
|
@ -37,10 +39,11 @@ and use the old zookeeper connection method.
|
||||||
# sasl_username = "kafka"
|
# sasl_username = "kafka"
|
||||||
# sasl_password = "secret"
|
# sasl_password = "secret"
|
||||||
|
|
||||||
## the name of the consumer group
|
## Name of the consumer group.
|
||||||
consumer_group = "telegraf_metrics_consumers"
|
# consumer_group = "telegraf_metrics_consumers"
|
||||||
## Offset (must be either "oldest" or "newest")
|
|
||||||
offset = "oldest"
|
## Initial offset position; one of "oldest" or "newest".
|
||||||
|
# offset = "oldest"
|
||||||
|
|
||||||
## Maximum length of a message to consume, in bytes (default 0/unlimited);
|
## Maximum length of a message to consume, in bytes (default 0/unlimited);
|
||||||
## larger messages are dropped
|
## larger messages are dropped
|
||||||
|
|
|
@ -8,61 +8,20 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/Shopify/sarama"
|
"github.com/Shopify/sarama"
|
||||||
cluster "github.com/bsm/sarama-cluster"
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/internal/tls"
|
"github.com/influxdata/telegraf/internal/tls"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
"github.com/influxdata/telegraf/plugins/parsers"
|
"github.com/influxdata/telegraf/plugins/parsers"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const sampleConfig = `
|
||||||
defaultMaxUndeliveredMessages = 1000
|
## Kafka brokers.
|
||||||
)
|
|
||||||
|
|
||||||
type empty struct{}
|
|
||||||
type semaphore chan empty
|
|
||||||
|
|
||||||
type Consumer interface {
|
|
||||||
Errors() <-chan error
|
|
||||||
Messages() <-chan *sarama.ConsumerMessage
|
|
||||||
MarkOffset(msg *sarama.ConsumerMessage, metadata string)
|
|
||||||
Close() error
|
|
||||||
}
|
|
||||||
|
|
||||||
type Kafka struct {
|
|
||||||
ConsumerGroup string `toml:"consumer_group"`
|
|
||||||
ClientID string `toml:"client_id"`
|
|
||||||
Topics []string `toml:"topics"`
|
|
||||||
Brokers []string `toml:"brokers"`
|
|
||||||
MaxMessageLen int `toml:"max_message_len"`
|
|
||||||
Version string `toml:"version"`
|
|
||||||
MaxUndeliveredMessages int `toml:"max_undelivered_messages"`
|
|
||||||
Offset string `toml:"offset"`
|
|
||||||
SASLUsername string `toml:"sasl_username"`
|
|
||||||
SASLPassword string `toml:"sasl_password"`
|
|
||||||
TopicTag string `toml:"topic_tag"`
|
|
||||||
|
|
||||||
tls.ClientConfig
|
|
||||||
|
|
||||||
cluster Consumer
|
|
||||||
parser parsers.Parser
|
|
||||||
wg *sync.WaitGroup
|
|
||||||
cancel context.CancelFunc
|
|
||||||
|
|
||||||
// Unconfirmed messages
|
|
||||||
messages map[telegraf.TrackingID]*sarama.ConsumerMessage
|
|
||||||
|
|
||||||
// doNotCommitMsgs tells the parser not to call CommitUpTo on the consumer
|
|
||||||
// this is mostly for test purposes, but there may be a use-case for it later.
|
|
||||||
doNotCommitMsgs bool
|
|
||||||
}
|
|
||||||
|
|
||||||
var sampleConfig = `
|
|
||||||
## kafka servers
|
|
||||||
brokers = ["localhost:9092"]
|
brokers = ["localhost:9092"]
|
||||||
## topic(s) to consume
|
|
||||||
|
## Topics to consume.
|
||||||
topics = ["telegraf"]
|
topics = ["telegraf"]
|
||||||
## Add topic as tag if topic_tag is not empty
|
|
||||||
|
## When set this tag will be added to all metrics with the topic as the value.
|
||||||
# topic_tag = ""
|
# topic_tag = ""
|
||||||
|
|
||||||
## Optional Client id
|
## Optional Client id
|
||||||
|
@ -85,10 +44,12 @@ var sampleConfig = `
|
||||||
# sasl_username = "kafka"
|
# sasl_username = "kafka"
|
||||||
# sasl_password = "secret"
|
# sasl_password = "secret"
|
||||||
|
|
||||||
## the name of the consumer group
|
## Name of the consumer group.
|
||||||
consumer_group = "telegraf_metrics_consumers"
|
# consumer_group = "telegraf_metrics_consumers"
|
||||||
## Offset (must be either "oldest" or "newest")
|
|
||||||
offset = "oldest"
|
## Initial offset position; one of "oldest" or "newest".
|
||||||
|
# offset = "oldest"
|
||||||
|
|
||||||
## Maximum length of a message to consume, in bytes (default 0/unlimited);
|
## Maximum length of a message to consume, in bytes (default 0/unlimited);
|
||||||
## larger messages are dropped
|
## larger messages are dropped
|
||||||
max_message_len = 1000000
|
max_message_len = 1000000
|
||||||
|
@ -110,22 +71,77 @@ var sampleConfig = `
|
||||||
data_format = "influx"
|
data_format = "influx"
|
||||||
`
|
`
|
||||||
|
|
||||||
func (k *Kafka) SampleConfig() string {
|
const (
|
||||||
|
defaultMaxUndeliveredMessages = 1000
|
||||||
|
defaultMaxMessageLen = 1000000
|
||||||
|
defaultConsumerGroup = "telegraf_metrics_consumers"
|
||||||
|
)
|
||||||
|
|
||||||
|
type empty struct{}
|
||||||
|
type semaphore chan empty
|
||||||
|
|
||||||
|
type KafkaConsumer struct {
|
||||||
|
Brokers []string `toml:"brokers"`
|
||||||
|
ClientID string `toml:"client_id"`
|
||||||
|
ConsumerGroup string `toml:"consumer_group"`
|
||||||
|
MaxMessageLen int `toml:"max_message_len"`
|
||||||
|
MaxUndeliveredMessages int `toml:"max_undelivered_messages"`
|
||||||
|
Offset string `toml:"offset"`
|
||||||
|
Topics []string `toml:"topics"`
|
||||||
|
TopicTag string `toml:"topic_tag"`
|
||||||
|
Version string `toml:"version"`
|
||||||
|
SASLPassword string `toml:"sasl_password"`
|
||||||
|
SASLUsername string `toml:"sasl_username"`
|
||||||
|
|
||||||
|
tls.ClientConfig
|
||||||
|
|
||||||
|
ConsumerCreator ConsumerGroupCreator `toml:"-"`
|
||||||
|
consumer ConsumerGroup
|
||||||
|
config *sarama.Config
|
||||||
|
|
||||||
|
parser parsers.Parser
|
||||||
|
wg sync.WaitGroup
|
||||||
|
cancel context.CancelFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
type ConsumerGroup interface {
|
||||||
|
Consume(ctx context.Context, topics []string, handler sarama.ConsumerGroupHandler) error
|
||||||
|
Errors() <-chan error
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
type ConsumerGroupCreator interface {
|
||||||
|
Create(brokers []string, group string, config *sarama.Config) (ConsumerGroup, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type SaramaCreator struct{}
|
||||||
|
|
||||||
|
func (*SaramaCreator) Create(brokers []string, group string, config *sarama.Config) (ConsumerGroup, error) {
|
||||||
|
return sarama.NewConsumerGroup(brokers, group, config)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *KafkaConsumer) SampleConfig() string {
|
||||||
return sampleConfig
|
return sampleConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k *Kafka) Description() string {
|
func (k *KafkaConsumer) Description() string {
|
||||||
return "Read metrics from Kafka topic(s)"
|
return "Read metrics from Kafka topics"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k *Kafka) SetParser(parser parsers.Parser) {
|
func (k *KafkaConsumer) SetParser(parser parsers.Parser) {
|
||||||
k.parser = parser
|
k.parser = parser
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k *Kafka) Start(acc telegraf.Accumulator) error {
|
func (k *KafkaConsumer) Init() error {
|
||||||
var clusterErr error
|
if k.MaxUndeliveredMessages == 0 {
|
||||||
|
k.MaxUndeliveredMessages = defaultMaxUndeliveredMessages
|
||||||
|
}
|
||||||
|
if k.ConsumerGroup == "" {
|
||||||
|
k.ConsumerGroup = defaultConsumerGroup
|
||||||
|
}
|
||||||
|
|
||||||
config := cluster.NewConfig()
|
config := sarama.NewConfig()
|
||||||
|
config.Consumer.Return.Errors = true
|
||||||
|
|
||||||
if k.Version != "" {
|
if k.Version != "" {
|
||||||
version, err := sarama.ParseKafkaVersion(k.Version)
|
version, err := sarama.ParseKafkaVersion(k.Version)
|
||||||
|
@ -135,172 +151,255 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error {
|
||||||
config.Version = version
|
config.Version = version
|
||||||
}
|
}
|
||||||
|
|
||||||
config.Consumer.Return.Errors = true
|
|
||||||
|
|
||||||
tlsConfig, err := k.ClientConfig.TLSConfig()
|
tlsConfig, err := k.ClientConfig.TLSConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if tlsConfig != nil {
|
||||||
|
config.Net.TLS.Config = tlsConfig
|
||||||
|
config.Net.TLS.Enable = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if k.SASLUsername != "" && k.SASLPassword != "" {
|
||||||
|
config.Net.SASL.User = k.SASLUsername
|
||||||
|
config.Net.SASL.Password = k.SASLPassword
|
||||||
|
config.Net.SASL.Enable = true
|
||||||
|
}
|
||||||
|
|
||||||
if k.ClientID != "" {
|
if k.ClientID != "" {
|
||||||
config.ClientID = k.ClientID
|
config.ClientID = k.ClientID
|
||||||
} else {
|
} else {
|
||||||
config.ClientID = "Telegraf"
|
config.ClientID = "Telegraf"
|
||||||
}
|
}
|
||||||
|
|
||||||
if tlsConfig != nil {
|
|
||||||
log.Printf("D! TLS Enabled")
|
|
||||||
config.Net.TLS.Config = tlsConfig
|
|
||||||
config.Net.TLS.Enable = true
|
|
||||||
}
|
|
||||||
if k.SASLUsername != "" && k.SASLPassword != "" {
|
|
||||||
log.Printf("D! Using SASL auth with username '%s',",
|
|
||||||
k.SASLUsername)
|
|
||||||
config.Net.SASL.User = k.SASLUsername
|
|
||||||
config.Net.SASL.Password = k.SASLPassword
|
|
||||||
config.Net.SASL.Enable = true
|
|
||||||
}
|
|
||||||
|
|
||||||
switch strings.ToLower(k.Offset) {
|
switch strings.ToLower(k.Offset) {
|
||||||
case "oldest", "":
|
case "oldest", "":
|
||||||
config.Consumer.Offsets.Initial = sarama.OffsetOldest
|
config.Consumer.Offsets.Initial = sarama.OffsetOldest
|
||||||
case "newest":
|
case "newest":
|
||||||
config.Consumer.Offsets.Initial = sarama.OffsetNewest
|
config.Consumer.Offsets.Initial = sarama.OffsetNewest
|
||||||
default:
|
default:
|
||||||
log.Printf("I! WARNING: Kafka consumer invalid offset '%s', using 'oldest'",
|
return fmt.Errorf("invalid offset %q", k.Offset)
|
||||||
k.Offset)
|
|
||||||
config.Consumer.Offsets.Initial = sarama.OffsetOldest
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if k.cluster == nil {
|
if k.ConsumerCreator == nil {
|
||||||
k.cluster, clusterErr = cluster.NewConsumer(
|
k.ConsumerCreator = &SaramaCreator{}
|
||||||
k.Brokers,
|
}
|
||||||
k.ConsumerGroup,
|
|
||||||
k.Topics,
|
|
||||||
config,
|
|
||||||
)
|
|
||||||
|
|
||||||
if clusterErr != nil {
|
k.config = config
|
||||||
log.Printf("E! Error when creating Kafka Consumer, brokers: %v, topics: %v",
|
return nil
|
||||||
k.Brokers, k.Topics)
|
}
|
||||||
return clusterErr
|
|
||||||
}
|
func (k *KafkaConsumer) Start(acc telegraf.Accumulator) error {
|
||||||
|
var err error
|
||||||
|
k.consumer, err = k.ConsumerCreator.Create(
|
||||||
|
k.Brokers,
|
||||||
|
k.ConsumerGroup,
|
||||||
|
k.config,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
k.cancel = cancel
|
k.cancel = cancel
|
||||||
|
|
||||||
// Start consumer goroutine
|
// Start consumer goroutine
|
||||||
k.wg = &sync.WaitGroup{}
|
|
||||||
k.wg.Add(1)
|
k.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer k.wg.Done()
|
defer k.wg.Done()
|
||||||
k.receiver(ctx, acc)
|
for ctx.Err() == nil {
|
||||||
|
handler := NewConsumerGroupHandler(acc, k.MaxUndeliveredMessages, k.parser)
|
||||||
|
handler.MaxMessageLen = k.MaxMessageLen
|
||||||
|
handler.TopicTag = k.TopicTag
|
||||||
|
err := k.consumer.Consume(ctx, k.Topics, handler)
|
||||||
|
if err != nil {
|
||||||
|
acc.AddError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = k.consumer.Close()
|
||||||
|
if err != nil {
|
||||||
|
acc.AddError(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
k.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer k.wg.Done()
|
||||||
|
for err := range k.consumer.Errors() {
|
||||||
|
acc.AddError(err)
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
log.Printf("I! Started the kafka consumer service, brokers: %v, topics: %v",
|
|
||||||
k.Brokers, k.Topics)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// receiver() reads all incoming messages from the consumer, and parses them into
|
func (k *KafkaConsumer) Gather(acc telegraf.Accumulator) error {
|
||||||
// influxdb metric points.
|
return nil
|
||||||
func (k *Kafka) receiver(ctx context.Context, ac telegraf.Accumulator) {
|
}
|
||||||
k.messages = make(map[telegraf.TrackingID]*sarama.ConsumerMessage)
|
|
||||||
|
|
||||||
acc := ac.WithTracking(k.MaxUndeliveredMessages)
|
func (k *KafkaConsumer) Stop() {
|
||||||
sem := make(semaphore, k.MaxUndeliveredMessages)
|
k.cancel()
|
||||||
|
k.wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message is an aggregate type binding the Kafka message and the session so
|
||||||
|
// that offsets can be updated.
|
||||||
|
type Message struct {
|
||||||
|
message *sarama.ConsumerMessage
|
||||||
|
session sarama.ConsumerGroupSession
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewConsumerGroupHandler(acc telegraf.Accumulator, maxUndelivered int, parser parsers.Parser) *ConsumerGroupHandler {
|
||||||
|
handler := &ConsumerGroupHandler{
|
||||||
|
acc: acc.WithTracking(maxUndelivered),
|
||||||
|
sem: make(chan empty, maxUndelivered),
|
||||||
|
undelivered: make(map[telegraf.TrackingID]Message, maxUndelivered),
|
||||||
|
parser: parser,
|
||||||
|
}
|
||||||
|
return handler
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsumerGroupHandler is a sarama.ConsumerGroupHandler implementation.
|
||||||
|
type ConsumerGroupHandler struct {
|
||||||
|
MaxMessageLen int
|
||||||
|
TopicTag string
|
||||||
|
|
||||||
|
acc telegraf.TrackingAccumulator
|
||||||
|
sem semaphore
|
||||||
|
parser parsers.Parser
|
||||||
|
wg sync.WaitGroup
|
||||||
|
cancel context.CancelFunc
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
undelivered map[telegraf.TrackingID]Message
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup is called once when a new session is opened. It setups up the handler
|
||||||
|
// and begins processing delivered messages.
|
||||||
|
func (h *ConsumerGroupHandler) Setup(sarama.ConsumerGroupSession) error {
|
||||||
|
h.undelivered = make(map[telegraf.TrackingID]Message)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
h.cancel = cancel
|
||||||
|
|
||||||
|
h.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer h.wg.Done()
|
||||||
|
h.run(ctx)
|
||||||
|
}()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run processes any delivered metrics during the lifetime of the session.
|
||||||
|
func (h *ConsumerGroupHandler) run(ctx context.Context) error {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return nil
|
||||||
case track := <-acc.Delivered():
|
case track := <-h.acc.Delivered():
|
||||||
<-sem
|
h.onDelivery(track)
|
||||||
k.onDelivery(track)
|
|
||||||
case err := <-k.cluster.Errors():
|
|
||||||
acc.AddError(err)
|
|
||||||
case sem <- empty{}:
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case track := <-acc.Delivered():
|
|
||||||
// Once for the delivered message, once to leave the case
|
|
||||||
<-sem
|
|
||||||
<-sem
|
|
||||||
k.onDelivery(track)
|
|
||||||
case err := <-k.cluster.Errors():
|
|
||||||
<-sem
|
|
||||||
acc.AddError(err)
|
|
||||||
case msg := <-k.cluster.Messages():
|
|
||||||
err := k.onMessage(acc, msg)
|
|
||||||
if err != nil {
|
|
||||||
acc.AddError(err)
|
|
||||||
<-sem
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k *Kafka) markOffset(msg *sarama.ConsumerMessage) {
|
func (h *ConsumerGroupHandler) onDelivery(track telegraf.DeliveryInfo) {
|
||||||
if !k.doNotCommitMsgs {
|
h.mu.Lock()
|
||||||
k.cluster.MarkOffset(msg, "")
|
defer h.mu.Unlock()
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (k *Kafka) onMessage(acc telegraf.TrackingAccumulator, msg *sarama.ConsumerMessage) error {
|
msg, ok := h.undelivered[track.ID()]
|
||||||
if k.MaxMessageLen != 0 && len(msg.Value) > k.MaxMessageLen {
|
|
||||||
k.markOffset(msg)
|
|
||||||
return fmt.Errorf("Message longer than max_message_len (%d > %d)",
|
|
||||||
len(msg.Value), k.MaxMessageLen)
|
|
||||||
}
|
|
||||||
|
|
||||||
metrics, err := k.parser.Parse(msg.Value)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(k.TopicTag) > 0 {
|
|
||||||
for _, metric := range metrics {
|
|
||||||
metric.AddTag(k.TopicTag, msg.Topic)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
id := acc.AddTrackingMetricGroup(metrics)
|
|
||||||
k.messages[id] = msg
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (k *Kafka) onDelivery(track telegraf.DeliveryInfo) {
|
|
||||||
msg, ok := k.messages[track.ID()]
|
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Printf("E! [inputs.kafka_consumer] Could not mark message delivered: %d", track.ID())
|
log.Printf("E! [inputs.kafka_consumer] Could not mark message delivered: %d", track.ID())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if track.Delivered() {
|
if track.Delivered() {
|
||||||
k.markOffset(msg)
|
msg.session.MarkMessage(msg.message, "")
|
||||||
}
|
}
|
||||||
delete(k.messages, track.ID())
|
|
||||||
|
delete(h.undelivered, track.ID())
|
||||||
|
<-h.sem
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k *Kafka) Stop() {
|
// Reserve blocks until there is an available slot for a new message.
|
||||||
k.cancel()
|
func (h *ConsumerGroupHandler) Reserve(ctx context.Context) error {
|
||||||
k.wg.Wait()
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
if err := k.cluster.Close(); err != nil {
|
return ctx.Err()
|
||||||
log.Printf("E! [inputs.kafka_consumer] Error closing consumer: %v", err)
|
case h.sem <- empty{}:
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k *Kafka) Gather(acc telegraf.Accumulator) error {
|
func (h *ConsumerGroupHandler) release() {
|
||||||
|
<-h.sem
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle processes a message and if successful saves it to be acknowledged
|
||||||
|
// after delivery.
|
||||||
|
func (h *ConsumerGroupHandler) Handle(session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) error {
|
||||||
|
if h.MaxMessageLen != 0 && len(msg.Value) > h.MaxMessageLen {
|
||||||
|
session.MarkMessage(msg, "")
|
||||||
|
h.release()
|
||||||
|
return fmt.Errorf("message exceeds max_message_len (actual %d, max %d)",
|
||||||
|
len(msg.Value), h.MaxMessageLen)
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics, err := h.parser.Parse(msg.Value)
|
||||||
|
if err != nil {
|
||||||
|
h.release()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(h.TopicTag) > 0 {
|
||||||
|
for _, metric := range metrics {
|
||||||
|
metric.AddTag(h.TopicTag, msg.Topic)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
id := h.acc.AddTrackingMetricGroup(metrics)
|
||||||
|
h.mu.Lock()
|
||||||
|
h.undelivered[id] = Message{session: session, message: msg}
|
||||||
|
h.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsumeClaim is called once each claim in a goroutine and must be
|
||||||
|
// thread-safe. Should run until the claim is closed.
|
||||||
|
func (h *ConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
|
||||||
|
ctx := session.Context()
|
||||||
|
|
||||||
|
for {
|
||||||
|
err := h.Reserve(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil
|
||||||
|
case msg, ok := <-claim.Messages():
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err := h.Handle(session, msg)
|
||||||
|
if err != nil {
|
||||||
|
h.acc.AddError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cleanup stops the internal goroutine and is called after all ConsumeClaim
|
||||||
|
// functions have completed.
|
||||||
|
func (h *ConsumerGroupHandler) Cleanup(sarama.ConsumerGroupSession) error {
|
||||||
|
h.cancel()
|
||||||
|
h.wg.Wait()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
inputs.Add("kafka_consumer", func() telegraf.Input {
|
inputs.Add("kafka_consumer", func() telegraf.Input {
|
||||||
return &Kafka{
|
return &KafkaConsumer{}
|
||||||
MaxUndeliveredMessages: defaultMaxUndeliveredMessages,
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,94 +0,0 @@
|
||||||
package kafka_consumer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Shopify/sarama"
|
|
||||||
"github.com/influxdata/telegraf/testutil"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/plugins/parsers"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestReadsMetricsFromKafka(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("Skipping integration test in short mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
brokerPeers := []string{testutil.GetLocalHost() + ":9092"}
|
|
||||||
testTopic := fmt.Sprintf("telegraf_test_topic_%d", time.Now().Unix())
|
|
||||||
|
|
||||||
// Send a Kafka message to the kafka host
|
|
||||||
msg := "cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257\n"
|
|
||||||
producer, err := sarama.NewSyncProducer(brokerPeers, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, _, err = producer.SendMessage(
|
|
||||||
&sarama.ProducerMessage{
|
|
||||||
Topic: testTopic,
|
|
||||||
Value: sarama.StringEncoder(msg),
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer producer.Close()
|
|
||||||
|
|
||||||
// Start the Kafka Consumer
|
|
||||||
k := &Kafka{
|
|
||||||
ConsumerGroup: "telegraf_test_consumers",
|
|
||||||
Topics: []string{testTopic},
|
|
||||||
Brokers: brokerPeers,
|
|
||||||
Offset: "oldest",
|
|
||||||
}
|
|
||||||
p, _ := parsers.NewInfluxParser()
|
|
||||||
k.SetParser(p)
|
|
||||||
|
|
||||||
// Verify that we can now gather the sent message
|
|
||||||
var acc testutil.Accumulator
|
|
||||||
|
|
||||||
// Sanity check
|
|
||||||
assert.Equal(t, 0, len(acc.Metrics), "There should not be any points")
|
|
||||||
if err := k.Start(&acc); err != nil {
|
|
||||||
t.Fatal(err.Error())
|
|
||||||
} else {
|
|
||||||
defer k.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
waitForPoint(&acc, t)
|
|
||||||
|
|
||||||
// Gather points
|
|
||||||
err = acc.GatherError(k.Gather)
|
|
||||||
require.NoError(t, err)
|
|
||||||
if len(acc.Metrics) == 1 {
|
|
||||||
point := acc.Metrics[0]
|
|
||||||
assert.Equal(t, "cpu_load_short", point.Measurement)
|
|
||||||
assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields)
|
|
||||||
assert.Equal(t, map[string]string{
|
|
||||||
"host": "server01",
|
|
||||||
"direction": "in",
|
|
||||||
"region": "us-west",
|
|
||||||
}, point.Tags)
|
|
||||||
assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix())
|
|
||||||
} else {
|
|
||||||
t.Errorf("No points found in accumulator, expected 1")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Waits for the metric that was sent to the kafka broker to arrive at the kafka
|
|
||||||
// consumer
|
|
||||||
func waitForPoint(acc *testutil.Accumulator, t *testing.T) {
|
|
||||||
// Give the kafka container up to 2 seconds to get the point to the consumer
|
|
||||||
ticker := time.NewTicker(5 * time.Millisecond)
|
|
||||||
counter := 0
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
counter++
|
|
||||||
if counter > 1000 {
|
|
||||||
t.Fatal("Waited for 5s, point never arrived to consumer")
|
|
||||||
} else if acc.NFields() == 1 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -2,219 +2,344 @@ package kafka_consumer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/Shopify/sarama"
|
"github.com/Shopify/sarama"
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/plugins/parsers"
|
"github.com/influxdata/telegraf/plugins/parsers/value"
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
type FakeConsumerGroup struct {
|
||||||
testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257\n"
|
brokers []string
|
||||||
testMsgGraphite = "cpu.load.short.graphite 23422 1454780029"
|
group string
|
||||||
testMsgJSON = "{\"a\": 5, \"b\": {\"c\": 6}}\n"
|
config *sarama.Config
|
||||||
invalidMsg = "cpu_load_short,host=server01 1422568543702900257\n"
|
|
||||||
)
|
|
||||||
|
|
||||||
type TestConsumer struct {
|
handler sarama.ConsumerGroupHandler
|
||||||
errors chan error
|
errors chan error
|
||||||
messages chan *sarama.ConsumerMessage
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *TestConsumer) Errors() <-chan error {
|
func (g *FakeConsumerGroup) Consume(ctx context.Context, topics []string, handler sarama.ConsumerGroupHandler) error {
|
||||||
return c.errors
|
g.handler = handler
|
||||||
}
|
g.handler.Setup(nil)
|
||||||
|
|
||||||
func (c *TestConsumer) Messages() <-chan *sarama.ConsumerMessage {
|
|
||||||
return c.messages
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *TestConsumer) MarkOffset(msg *sarama.ConsumerMessage, metadata string) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *TestConsumer) Close() error {
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *TestConsumer) Inject(msg *sarama.ConsumerMessage) {
|
func (g *FakeConsumerGroup) Errors() <-chan error {
|
||||||
c.messages <- msg
|
return g.errors
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestKafka() (*Kafka, *TestConsumer) {
|
func (g *FakeConsumerGroup) Close() error {
|
||||||
consumer := &TestConsumer{
|
close(g.errors)
|
||||||
errors: make(chan error),
|
return nil
|
||||||
messages: make(chan *sarama.ConsumerMessage, 1000),
|
}
|
||||||
|
|
||||||
|
type FakeCreator struct {
|
||||||
|
ConsumerGroup *FakeConsumerGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *FakeCreator) Create(brokers []string, group string, config *sarama.Config) (ConsumerGroup, error) {
|
||||||
|
c.ConsumerGroup.brokers = brokers
|
||||||
|
c.ConsumerGroup.group = group
|
||||||
|
c.ConsumerGroup.config = config
|
||||||
|
return c.ConsumerGroup, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInit(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
plugin *KafkaConsumer
|
||||||
|
initError bool
|
||||||
|
check func(t *testing.T, plugin *KafkaConsumer)
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "default config",
|
||||||
|
plugin: &KafkaConsumer{},
|
||||||
|
check: func(t *testing.T, plugin *KafkaConsumer) {
|
||||||
|
require.Equal(t, plugin.ConsumerGroup, defaultConsumerGroup)
|
||||||
|
require.Equal(t, plugin.MaxUndeliveredMessages, defaultMaxUndeliveredMessages)
|
||||||
|
require.Equal(t, plugin.config.ClientID, "Telegraf")
|
||||||
|
require.Equal(t, plugin.config.Consumer.Offsets.Initial, sarama.OffsetOldest)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "parses valid version string",
|
||||||
|
plugin: &KafkaConsumer{
|
||||||
|
Version: "1.0.0",
|
||||||
|
},
|
||||||
|
check: func(t *testing.T, plugin *KafkaConsumer) {
|
||||||
|
require.Equal(t, plugin.config.Version, sarama.V1_0_0_0)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid version string",
|
||||||
|
plugin: &KafkaConsumer{
|
||||||
|
Version: "100",
|
||||||
|
},
|
||||||
|
initError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "custom client_id",
|
||||||
|
plugin: &KafkaConsumer{
|
||||||
|
ClientID: "custom",
|
||||||
|
},
|
||||||
|
check: func(t *testing.T, plugin *KafkaConsumer) {
|
||||||
|
require.Equal(t, plugin.config.ClientID, "custom")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "custom offset",
|
||||||
|
plugin: &KafkaConsumer{
|
||||||
|
Offset: "newest",
|
||||||
|
},
|
||||||
|
check: func(t *testing.T, plugin *KafkaConsumer) {
|
||||||
|
require.Equal(t, plugin.config.Consumer.Offsets.Initial, sarama.OffsetNewest)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid offset",
|
||||||
|
plugin: &KafkaConsumer{
|
||||||
|
Offset: "middle",
|
||||||
|
},
|
||||||
|
initError: true,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
k := Kafka{
|
for _, tt := range tests {
|
||||||
cluster: consumer,
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
ConsumerGroup: "test",
|
cg := &FakeConsumerGroup{}
|
||||||
Topics: []string{"telegraf"},
|
tt.plugin.ConsumerCreator = &FakeCreator{ConsumerGroup: cg}
|
||||||
Brokers: []string{"localhost:9092"},
|
err := tt.plugin.Init()
|
||||||
Offset: "oldest",
|
if tt.initError {
|
||||||
MaxUndeliveredMessages: defaultMaxUndeliveredMessages,
|
require.Error(t, err)
|
||||||
doNotCommitMsgs: true,
|
return
|
||||||
messages: make(map[telegraf.TrackingID]*sarama.ConsumerMessage),
|
}
|
||||||
}
|
|
||||||
return &k, consumer
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTestKafkaWithTopicTag() (*Kafka, *TestConsumer) {
|
tt.check(t, tt.plugin)
|
||||||
consumer := &TestConsumer{
|
|
||||||
errors: make(chan error),
|
|
||||||
messages: make(chan *sarama.ConsumerMessage, 1000),
|
|
||||||
}
|
|
||||||
k := Kafka{
|
|
||||||
cluster: consumer,
|
|
||||||
ConsumerGroup: "test",
|
|
||||||
Topics: []string{"telegraf"},
|
|
||||||
Brokers: []string{"localhost:9092"},
|
|
||||||
Offset: "oldest",
|
|
||||||
MaxUndeliveredMessages: defaultMaxUndeliveredMessages,
|
|
||||||
doNotCommitMsgs: true,
|
|
||||||
messages: make(map[telegraf.TrackingID]*sarama.ConsumerMessage),
|
|
||||||
TopicTag: "topic",
|
|
||||||
}
|
|
||||||
return &k, consumer
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that the parser parses kafka messages into points
|
|
||||||
func TestRunParser(t *testing.T) {
|
|
||||||
k, consumer := newTestKafka()
|
|
||||||
acc := testutil.Accumulator{}
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
k.parser, _ = parsers.NewInfluxParser()
|
|
||||||
go k.receiver(ctx, &acc)
|
|
||||||
consumer.Inject(saramaMsg(testMsg))
|
|
||||||
acc.Wait(1)
|
|
||||||
|
|
||||||
assert.Equal(t, acc.NFields(), 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that the parser parses kafka messages into points
|
|
||||||
// and adds the topic tag
|
|
||||||
func TestRunParserWithTopic(t *testing.T) {
|
|
||||||
k, consumer := newTestKafkaWithTopicTag()
|
|
||||||
acc := testutil.Accumulator{}
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
k.parser, _ = parsers.NewInfluxParser()
|
|
||||||
go k.receiver(ctx, &acc)
|
|
||||||
consumer.Inject(saramaMsgWithTopic(testMsg, "test_topic"))
|
|
||||||
acc.Wait(1)
|
|
||||||
|
|
||||||
assert.Equal(t, acc.NFields(), 1)
|
|
||||||
assert.True(t, acc.HasTag("cpu_load_short", "topic"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that the parser ignores invalid messages
|
|
||||||
func TestRunParserInvalidMsg(t *testing.T) {
|
|
||||||
k, consumer := newTestKafka()
|
|
||||||
acc := testutil.Accumulator{}
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
k.parser, _ = parsers.NewInfluxParser()
|
|
||||||
go k.receiver(ctx, &acc)
|
|
||||||
consumer.Inject(saramaMsg(invalidMsg))
|
|
||||||
acc.WaitError(1)
|
|
||||||
|
|
||||||
assert.Equal(t, acc.NFields(), 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that overlong messages are dropped
|
|
||||||
func TestDropOverlongMsg(t *testing.T) {
|
|
||||||
const maxMessageLen = 64 * 1024
|
|
||||||
k, consumer := newTestKafka()
|
|
||||||
k.MaxMessageLen = maxMessageLen
|
|
||||||
acc := testutil.Accumulator{}
|
|
||||||
ctx := context.Background()
|
|
||||||
overlongMsg := strings.Repeat("v", maxMessageLen+1)
|
|
||||||
|
|
||||||
go k.receiver(ctx, &acc)
|
|
||||||
consumer.Inject(saramaMsg(overlongMsg))
|
|
||||||
acc.WaitError(1)
|
|
||||||
|
|
||||||
assert.Equal(t, acc.NFields(), 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that the parser parses kafka messages into points
|
|
||||||
func TestRunParserAndGather(t *testing.T) {
|
|
||||||
k, consumer := newTestKafka()
|
|
||||||
acc := testutil.Accumulator{}
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
k.parser, _ = parsers.NewInfluxParser()
|
|
||||||
go k.receiver(ctx, &acc)
|
|
||||||
consumer.Inject(saramaMsg(testMsg))
|
|
||||||
acc.Wait(1)
|
|
||||||
|
|
||||||
acc.GatherError(k.Gather)
|
|
||||||
|
|
||||||
assert.Equal(t, acc.NFields(), 1)
|
|
||||||
acc.AssertContainsFields(t, "cpu_load_short",
|
|
||||||
map[string]interface{}{"value": float64(23422)})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that the parser parses kafka messages into points
|
|
||||||
func TestRunParserAndGatherGraphite(t *testing.T) {
|
|
||||||
k, consumer := newTestKafka()
|
|
||||||
acc := testutil.Accumulator{}
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
k.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil)
|
|
||||||
go k.receiver(ctx, &acc)
|
|
||||||
consumer.Inject(saramaMsg(testMsgGraphite))
|
|
||||||
acc.Wait(1)
|
|
||||||
|
|
||||||
acc.GatherError(k.Gather)
|
|
||||||
|
|
||||||
assert.Equal(t, acc.NFields(), 1)
|
|
||||||
acc.AssertContainsFields(t, "cpu_load_short_graphite",
|
|
||||||
map[string]interface{}{"value": float64(23422)})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that the parser parses kafka messages into points
|
|
||||||
func TestRunParserAndGatherJSON(t *testing.T) {
|
|
||||||
k, consumer := newTestKafka()
|
|
||||||
acc := testutil.Accumulator{}
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
k.parser, _ = parsers.NewParser(&parsers.Config{
|
|
||||||
DataFormat: "json",
|
|
||||||
MetricName: "kafka_json_test",
|
|
||||||
})
|
|
||||||
go k.receiver(ctx, &acc)
|
|
||||||
consumer.Inject(saramaMsg(testMsgJSON))
|
|
||||||
acc.Wait(1)
|
|
||||||
|
|
||||||
acc.GatherError(k.Gather)
|
|
||||||
|
|
||||||
assert.Equal(t, acc.NFields(), 2)
|
|
||||||
acc.AssertContainsFields(t, "kafka_json_test",
|
|
||||||
map[string]interface{}{
|
|
||||||
"a": float64(5),
|
|
||||||
"b_c": float64(6),
|
|
||||||
})
|
})
|
||||||
}
|
|
||||||
|
|
||||||
func saramaMsg(val string) *sarama.ConsumerMessage {
|
|
||||||
return &sarama.ConsumerMessage{
|
|
||||||
Key: nil,
|
|
||||||
Value: []byte(val),
|
|
||||||
Offset: 0,
|
|
||||||
Partition: 0,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func saramaMsgWithTopic(val string, topic string) *sarama.ConsumerMessage {
|
func TestStartStop(t *testing.T) {
|
||||||
return &sarama.ConsumerMessage{
|
cg := &FakeConsumerGroup{errors: make(chan error)}
|
||||||
Key: nil,
|
plugin := &KafkaConsumer{
|
||||||
Value: []byte(val),
|
ConsumerCreator: &FakeCreator{ConsumerGroup: cg},
|
||||||
Offset: 0,
|
}
|
||||||
Partition: 0,
|
err := plugin.Init()
|
||||||
Topic: topic,
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
err = plugin.Start(&acc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
plugin.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
type FakeConsumerGroupSession struct {
|
||||||
|
ctx context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FakeConsumerGroupSession) Claims() map[string][]int32 {
|
||||||
|
panic("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FakeConsumerGroupSession) MemberID() string {
|
||||||
|
panic("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FakeConsumerGroupSession) GenerationID() int32 {
|
||||||
|
panic("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FakeConsumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) {
|
||||||
|
panic("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FakeConsumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) {
|
||||||
|
panic("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FakeConsumerGroupSession) MarkMessage(msg *sarama.ConsumerMessage, metadata string) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FakeConsumerGroupSession) Context() context.Context {
|
||||||
|
return s.ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
type FakeConsumerGroupClaim struct {
|
||||||
|
messages chan *sarama.ConsumerMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *FakeConsumerGroupClaim) Topic() string {
|
||||||
|
panic("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *FakeConsumerGroupClaim) Partition() int32 {
|
||||||
|
panic("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *FakeConsumerGroupClaim) InitialOffset() int64 {
|
||||||
|
panic("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *FakeConsumerGroupClaim) HighWaterMarkOffset() int64 {
|
||||||
|
panic("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *FakeConsumerGroupClaim) Messages() <-chan *sarama.ConsumerMessage {
|
||||||
|
return c.messages
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConsumerGroupHandler_Lifecycle(t *testing.T) {
|
||||||
|
acc := &testutil.Accumulator{}
|
||||||
|
parser := &value.ValueParser{MetricName: "cpu", DataType: "int"}
|
||||||
|
cg := NewConsumerGroupHandler(acc, 1, parser)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
session := &FakeConsumerGroupSession{
|
||||||
|
ctx: ctx,
|
||||||
|
}
|
||||||
|
var claim FakeConsumerGroupClaim
|
||||||
|
var err error
|
||||||
|
|
||||||
|
err = cg.Setup(session)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
err = cg.ConsumeClaim(session, &claim)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = cg.Cleanup(session)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConsumerGroupHandler_ConsumeClaim(t *testing.T) {
|
||||||
|
acc := &testutil.Accumulator{}
|
||||||
|
parser := &value.ValueParser{MetricName: "cpu", DataType: "int"}
|
||||||
|
cg := NewConsumerGroupHandler(acc, 1, parser)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
session := &FakeConsumerGroupSession{ctx: ctx}
|
||||||
|
claim := &FakeConsumerGroupClaim{
|
||||||
|
messages: make(chan *sarama.ConsumerMessage, 1),
|
||||||
|
}
|
||||||
|
|
||||||
|
err := cg.Setup(session)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
claim.messages <- &sarama.ConsumerMessage{
|
||||||
|
Topic: "telegraf",
|
||||||
|
Value: []byte("42"),
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
err = cg.ConsumeClaim(session, claim)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
acc.Wait(1)
|
||||||
|
cancel()
|
||||||
|
|
||||||
|
err = cg.Cleanup(session)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
expected := []telegraf.Metric{
|
||||||
|
testutil.MustMetric(
|
||||||
|
"cpu",
|
||||||
|
map[string]string{},
|
||||||
|
map[string]interface{}{
|
||||||
|
"value": 42,
|
||||||
|
},
|
||||||
|
time.Now(),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConsumerGroupHandler_Handle(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
maxMessageLen int
|
||||||
|
topicTag string
|
||||||
|
msg *sarama.ConsumerMessage
|
||||||
|
expected []telegraf.Metric
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "happy path",
|
||||||
|
msg: &sarama.ConsumerMessage{
|
||||||
|
Topic: "telegraf",
|
||||||
|
Value: []byte("42"),
|
||||||
|
},
|
||||||
|
expected: []telegraf.Metric{
|
||||||
|
testutil.MustMetric(
|
||||||
|
"cpu",
|
||||||
|
map[string]string{},
|
||||||
|
map[string]interface{}{
|
||||||
|
"value": 42,
|
||||||
|
},
|
||||||
|
time.Now(),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "message to long",
|
||||||
|
maxMessageLen: 4,
|
||||||
|
msg: &sarama.ConsumerMessage{
|
||||||
|
Topic: "telegraf",
|
||||||
|
Value: []byte("12345"),
|
||||||
|
},
|
||||||
|
expected: []telegraf.Metric{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "parse error",
|
||||||
|
msg: &sarama.ConsumerMessage{
|
||||||
|
Topic: "telegraf",
|
||||||
|
Value: []byte("not an integer"),
|
||||||
|
},
|
||||||
|
expected: []telegraf.Metric{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "add topic tag",
|
||||||
|
topicTag: "topic",
|
||||||
|
msg: &sarama.ConsumerMessage{
|
||||||
|
Topic: "telegraf",
|
||||||
|
Value: []byte("42"),
|
||||||
|
},
|
||||||
|
expected: []telegraf.Metric{
|
||||||
|
testutil.MustMetric(
|
||||||
|
"cpu",
|
||||||
|
map[string]string{
|
||||||
|
"topic": "telegraf",
|
||||||
|
},
|
||||||
|
map[string]interface{}{
|
||||||
|
"value": 42,
|
||||||
|
},
|
||||||
|
time.Now(),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
acc := &testutil.Accumulator{}
|
||||||
|
parser := &value.ValueParser{MetricName: "cpu", DataType: "int"}
|
||||||
|
cg := NewConsumerGroupHandler(acc, 1, parser)
|
||||||
|
cg.MaxMessageLen = tt.maxMessageLen
|
||||||
|
cg.TopicTag = tt.topicTag
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
session := &FakeConsumerGroupSession{ctx: ctx}
|
||||||
|
|
||||||
|
cg.Reserve(ctx)
|
||||||
|
cg.Handle(session, tt.msg)
|
||||||
|
|
||||||
|
testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue