parent
a3feddd8ed
commit
970bfce997
|
@ -1,12 +1,20 @@
|
||||||
## v0.2.3 [unreleased]
|
## v0.2.3 [unreleased]
|
||||||
|
|
||||||
### Release Notes
|
### Release Notes
|
||||||
|
- **breaking change** The `kafka` plugin has been renamed to `kafka_consumer`.
|
||||||
|
and most of the config option names have changed.
|
||||||
|
This only affects the kafka consumer _plugin_ (not the
|
||||||
|
output). There were a number of problems with the kafka plugin that led to it
|
||||||
|
only collecting data once at startup, so the kafka plugin was basically non-
|
||||||
|
functional.
|
||||||
- Riemann output added
|
- Riemann output added
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
- [#379](https://github.com/influxdb/telegraf/pull/379): Riemann output, thanks @allenj!
|
- [#379](https://github.com/influxdb/telegraf/pull/379): Riemann output, thanks @allenj!
|
||||||
|
- [#375](https://github.com/influxdb/telegraf/pull/375): kafka_consumer service plugin.
|
||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
|
- [#371](https://github.com/influxdb/telegraf/issues/371): Kafka consumer plugin not functioning.
|
||||||
|
|
||||||
## v0.2.2 [2015-11-18]
|
## v0.2.2 [2015-11-18]
|
||||||
|
|
||||||
|
|
|
@ -164,7 +164,6 @@ Telegraf currently has support for collecting metrics from:
|
||||||
* haproxy
|
* haproxy
|
||||||
* httpjson (generic JSON-emitting http service plugin)
|
* httpjson (generic JSON-emitting http service plugin)
|
||||||
* jolokia (remote JMX with JSON over HTTP)
|
* jolokia (remote JMX with JSON over HTTP)
|
||||||
* kafka_consumer
|
|
||||||
* leofs
|
* leofs
|
||||||
* lustre2
|
* lustre2
|
||||||
* memcached
|
* memcached
|
||||||
|
@ -197,6 +196,7 @@ Telegraf currently has support for collecting metrics from:
|
||||||
Telegraf can collect metrics via the following services:
|
Telegraf can collect metrics via the following services:
|
||||||
|
|
||||||
* statsd
|
* statsd
|
||||||
|
* kafka_consumer
|
||||||
|
|
||||||
We'll be adding support for many more over the coming months. Read on if you
|
We'll be adding support for many more over the coming months. Read on if you
|
||||||
want to add support for another service or third-party API.
|
want to add support for another service or third-party API.
|
||||||
|
|
|
@ -8,7 +8,7 @@ import (
|
||||||
|
|
||||||
"github.com/influxdb/telegraf/plugins"
|
"github.com/influxdb/telegraf/plugins"
|
||||||
"github.com/influxdb/telegraf/plugins/exec"
|
"github.com/influxdb/telegraf/plugins/exec"
|
||||||
"github.com/influxdb/telegraf/plugins/kafka_consumer"
|
"github.com/influxdb/telegraf/plugins/memcached"
|
||||||
"github.com/influxdb/telegraf/plugins/procstat"
|
"github.com/influxdb/telegraf/plugins/procstat"
|
||||||
"github.com/naoina/toml"
|
"github.com/naoina/toml"
|
||||||
"github.com/naoina/toml/ast"
|
"github.com/naoina/toml/ast"
|
||||||
|
@ -205,17 +205,14 @@ func TestConfig_parsePlugin(t *testing.T) {
|
||||||
pluginConfigurationFieldsSet: make(map[string][]string),
|
pluginConfigurationFieldsSet: make(map[string][]string),
|
||||||
}
|
}
|
||||||
|
|
||||||
subtbl := tbl.Fields["kafka"].(*ast.Table)
|
subtbl := tbl.Fields["memcached"].(*ast.Table)
|
||||||
err = c.parsePlugin("kafka", subtbl)
|
err = c.parsePlugin("memcached", subtbl)
|
||||||
|
|
||||||
kafka := plugins.Plugins["kafka"]().(*kafka_consumer.Kafka)
|
memcached := plugins.Plugins["memcached"]().(*memcached.Memcached)
|
||||||
kafka.ConsumerGroupName = "telegraf_metrics_consumers"
|
memcached.Servers = []string{"localhost"}
|
||||||
kafka.Topic = "topic_with_metrics"
|
|
||||||
kafka.ZookeeperPeers = []string{"test.example.com:2181"}
|
|
||||||
kafka.BatchSize = 1000
|
|
||||||
|
|
||||||
kConfig := &ConfiguredPlugin{
|
mConfig := &ConfiguredPlugin{
|
||||||
Name: "kafka",
|
Name: "memcached",
|
||||||
Drop: []string{"other", "stuff"},
|
Drop: []string{"other", "stuff"},
|
||||||
Pass: []string{"some", "strings"},
|
Pass: []string{"some", "strings"},
|
||||||
TagDrop: []TagFilter{
|
TagDrop: []TagFilter{
|
||||||
|
@ -233,10 +230,10 @@ func TestConfig_parsePlugin(t *testing.T) {
|
||||||
Interval: 5 * time.Second,
|
Interval: 5 * time.Second,
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(t, kafka, c.plugins["kafka"],
|
assert.Equal(t, memcached, c.plugins["memcached"],
|
||||||
"Testdata did not produce a correct kafka struct.")
|
"Testdata did not produce a correct memcached struct.")
|
||||||
assert.Equal(t, kConfig, c.pluginConfigurations["kafka"],
|
assert.Equal(t, mConfig, c.pluginConfigurations["memcached"],
|
||||||
"Testdata did not produce correct kafka metadata.")
|
"Testdata did not produce correct memcached metadata.")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConfig_LoadDirectory(t *testing.T) {
|
func TestConfig_LoadDirectory(t *testing.T) {
|
||||||
|
@ -249,14 +246,11 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
kafka := plugins.Plugins["kafka"]().(*kafka_consumer.Kafka)
|
memcached := plugins.Plugins["memcached"]().(*memcached.Memcached)
|
||||||
kafka.ConsumerGroupName = "telegraf_metrics_consumers"
|
memcached.Servers = []string{"192.168.1.1"}
|
||||||
kafka.Topic = "topic_with_metrics"
|
|
||||||
kafka.ZookeeperPeers = []string{"test.example.com:2181"}
|
|
||||||
kafka.BatchSize = 10000
|
|
||||||
|
|
||||||
kConfig := &ConfiguredPlugin{
|
mConfig := &ConfiguredPlugin{
|
||||||
Name: "kafka",
|
Name: "memcached",
|
||||||
Drop: []string{"other", "stuff"},
|
Drop: []string{"other", "stuff"},
|
||||||
Pass: []string{"some", "strings"},
|
Pass: []string{"some", "strings"},
|
||||||
TagDrop: []TagFilter{
|
TagDrop: []TagFilter{
|
||||||
|
@ -296,10 +290,10 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
||||||
|
|
||||||
pConfig := &ConfiguredPlugin{Name: "procstat"}
|
pConfig := &ConfiguredPlugin{Name: "procstat"}
|
||||||
|
|
||||||
assert.Equal(t, kafka, c.plugins["kafka"],
|
assert.Equal(t, memcached, c.plugins["memcached"],
|
||||||
"Merged Testdata did not produce a correct kafka struct.")
|
"Merged Testdata did not produce a correct memcached struct.")
|
||||||
assert.Equal(t, kConfig, c.pluginConfigurations["kafka"],
|
assert.Equal(t, mConfig, c.pluginConfigurations["memcached"],
|
||||||
"Merged Testdata did not produce correct kafka metadata.")
|
"Merged Testdata did not produce correct memcached metadata.")
|
||||||
|
|
||||||
assert.Equal(t, ex, c.plugins["exec"],
|
assert.Equal(t, ex, c.plugins["exec"],
|
||||||
"Merged Testdata did not produce a correct exec struct.")
|
"Merged Testdata did not produce a correct exec struct.")
|
||||||
|
|
|
@ -29,8 +29,9 @@ type InfluxDB struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
var sampleConfig = `
|
var sampleConfig = `
|
||||||
# The full HTTP or UDP endpoint URL for your InfluxDB instance
|
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
|
||||||
# Multiple urls can be specified for InfluxDB cluster support.
|
# Multiple urls can be specified but it is assumed that they are part of the same
|
||||||
|
# cluster, this means that only ONE of the urls will be written to each interval.
|
||||||
# urls = ["udp://localhost:8089"] # UDP endpoint example
|
# urls = ["udp://localhost:8089"] # UDP endpoint example
|
||||||
urls = ["http://localhost:8086"] # required
|
urls = ["http://localhost:8086"] # required
|
||||||
# The target database for metrics (telegraf will create it if not exists)
|
# The target database for metrics (telegraf will create it if not exists)
|
||||||
|
|
|
@ -1,36 +1,51 @@
|
||||||
package kafka_consumer
|
package kafka_consumer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"log"
|
||||||
"os/signal"
|
"strings"
|
||||||
"time"
|
"sync"
|
||||||
|
|
||||||
"github.com/Shopify/sarama"
|
|
||||||
"github.com/influxdb/influxdb/models"
|
"github.com/influxdb/influxdb/models"
|
||||||
"github.com/influxdb/telegraf/plugins"
|
"github.com/influxdb/telegraf/plugins"
|
||||||
|
|
||||||
|
"github.com/Shopify/sarama"
|
||||||
"github.com/wvanbergen/kafka/consumergroup"
|
"github.com/wvanbergen/kafka/consumergroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Kafka struct {
|
type Kafka struct {
|
||||||
ConsumerGroupName string
|
ConsumerGroup string
|
||||||
Topic string
|
Topics []string
|
||||||
ZookeeperPeers []string
|
ZookeeperPeers []string
|
||||||
Consumer *consumergroup.ConsumerGroup
|
Consumer *consumergroup.ConsumerGroup
|
||||||
BatchSize int
|
PointBuffer int
|
||||||
|
Offset string
|
||||||
|
|
||||||
|
sync.Mutex
|
||||||
|
|
||||||
|
// channel for all incoming kafka messages
|
||||||
|
in <-chan *sarama.ConsumerMessage
|
||||||
|
// channel for all kafka consumer errors
|
||||||
|
errs <-chan *sarama.ConsumerError
|
||||||
|
// channel for all incoming parsed kafka points
|
||||||
|
pointChan chan models.Point
|
||||||
|
done chan struct{}
|
||||||
|
|
||||||
|
// doNotCommitMsgs tells the parser not to call CommitUpTo on the consumer
|
||||||
|
// this is mostly for test purposes, but there may be a use-case for it later.
|
||||||
|
doNotCommitMsgs bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var sampleConfig = `
|
var sampleConfig = `
|
||||||
# topic to consume
|
# topic(s) to consume
|
||||||
topic = "topic_with_metrics"
|
topics = ["telegraf"]
|
||||||
|
|
||||||
# the name of the consumer group
|
|
||||||
consumerGroupName = "telegraf_metrics_consumers"
|
|
||||||
|
|
||||||
# an array of Zookeeper connection strings
|
# an array of Zookeeper connection strings
|
||||||
zookeeperPeers = ["localhost:2181"]
|
zookeeper_peers = ["localhost:2181"]
|
||||||
|
# the name of the consumer group
|
||||||
# Batch size of points sent to InfluxDB
|
consumer_group = "telegraf_metrics_consumers"
|
||||||
batchSize = 1000
|
# Maximum number of points to buffer between collection intervals
|
||||||
|
point_buffer = 100000
|
||||||
|
# Offset (must be either "oldest" or "newest")
|
||||||
|
offset = "oldest"
|
||||||
`
|
`
|
||||||
|
|
||||||
func (k *Kafka) SampleConfig() string {
|
func (k *Kafka) SampleConfig() string {
|
||||||
|
@ -38,127 +53,114 @@ func (k *Kafka) SampleConfig() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k *Kafka) Description() string {
|
func (k *Kafka) Description() string {
|
||||||
return "read metrics from a Kafka topic"
|
return "Read line-protocol metrics from Kafka topic(s)"
|
||||||
}
|
}
|
||||||
|
|
||||||
type Metric struct {
|
func (k *Kafka) Start() error {
|
||||||
Measurement string `json:"measurement"`
|
k.Lock()
|
||||||
Values map[string]interface{} `json:"values"`
|
defer k.Unlock()
|
||||||
Tags map[string]string `json:"tags"`
|
|
||||||
Time time.Time `json:"time"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (k *Kafka) Gather(acc plugins.Accumulator) error {
|
|
||||||
var consumerErr error
|
var consumerErr error
|
||||||
metricQueue := make(chan []byte, 200)
|
|
||||||
|
|
||||||
if k.Consumer == nil {
|
config := consumergroup.NewConfig()
|
||||||
|
switch strings.ToLower(k.Offset) {
|
||||||
|
case "oldest", "":
|
||||||
|
config.Offsets.Initial = sarama.OffsetOldest
|
||||||
|
case "newest":
|
||||||
|
config.Offsets.Initial = sarama.OffsetNewest
|
||||||
|
default:
|
||||||
|
log.Printf("WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n",
|
||||||
|
k.Offset)
|
||||||
|
config.Offsets.Initial = sarama.OffsetOldest
|
||||||
|
}
|
||||||
|
|
||||||
|
if k.Consumer == nil || k.Consumer.Closed() {
|
||||||
k.Consumer, consumerErr = consumergroup.JoinConsumerGroup(
|
k.Consumer, consumerErr = consumergroup.JoinConsumerGroup(
|
||||||
k.ConsumerGroupName,
|
k.ConsumerGroup,
|
||||||
[]string{k.Topic},
|
k.Topics,
|
||||||
k.ZookeeperPeers,
|
k.ZookeeperPeers,
|
||||||
nil,
|
config,
|
||||||
)
|
)
|
||||||
|
|
||||||
if consumerErr != nil {
|
if consumerErr != nil {
|
||||||
return consumerErr
|
return consumerErr
|
||||||
}
|
}
|
||||||
|
|
||||||
c := make(chan os.Signal, 1)
|
// Setup message and error channels
|
||||||
halt := make(chan bool, 1)
|
k.in = k.Consumer.Messages()
|
||||||
signal.Notify(c, os.Interrupt)
|
k.errs = k.Consumer.Errors()
|
||||||
go func() {
|
|
||||||
<-c
|
|
||||||
halt <- true
|
|
||||||
emitMetrics(k, acc, metricQueue)
|
|
||||||
k.Consumer.Close()
|
|
||||||
}()
|
|
||||||
|
|
||||||
go readFromKafka(k.Consumer.Messages(),
|
|
||||||
metricQueue,
|
|
||||||
k.BatchSize,
|
|
||||||
k.Consumer.CommitUpto,
|
|
||||||
halt)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return emitMetrics(k, acc, metricQueue)
|
k.done = make(chan struct{})
|
||||||
|
if k.PointBuffer == 0 {
|
||||||
|
k.PointBuffer = 100000
|
||||||
|
}
|
||||||
|
k.pointChan = make(chan models.Point, k.PointBuffer)
|
||||||
|
|
||||||
|
// Start the kafka message reader
|
||||||
|
go k.parser()
|
||||||
|
log.Printf("Started the kafka consumer service, peers: %v, topics: %v\n",
|
||||||
|
k.ZookeeperPeers, k.Topics)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func emitMetrics(k *Kafka, acc plugins.Accumulator, metricConsumer <-chan []byte) error {
|
// parser() reads all incoming messages from the consumer, and parses them into
|
||||||
timeout := time.After(1 * time.Second)
|
// influxdb metric points.
|
||||||
|
func (k *Kafka) parser() {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case batch := <-metricConsumer:
|
case <-k.done:
|
||||||
var points []models.Point
|
return
|
||||||
var err error
|
case err := <-k.errs:
|
||||||
if points, err = models.ParsePoints(batch); err != nil {
|
log.Printf("Kafka Consumer Error: %s\n", err.Error())
|
||||||
return err
|
case msg := <-k.in:
|
||||||
|
points, err := models.ParsePoints(msg.Value)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Could not parse kafka message: %s, error: %s",
|
||||||
|
string(msg.Value), err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, point := range points {
|
for _, point := range points {
|
||||||
|
select {
|
||||||
|
case k.pointChan <- point:
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
log.Printf("Kafka Consumer buffer is full, dropping a point." +
|
||||||
|
" You may want to increase the point_buffer setting")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !k.doNotCommitMsgs {
|
||||||
|
// TODO(cam) this locking can be removed if this PR gets merged:
|
||||||
|
// https://github.com/wvanbergen/kafka/pull/84
|
||||||
|
k.Lock()
|
||||||
|
k.Consumer.CommitUpto(msg)
|
||||||
|
k.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *Kafka) Stop() {
|
||||||
|
k.Lock()
|
||||||
|
defer k.Unlock()
|
||||||
|
close(k.done)
|
||||||
|
if err := k.Consumer.Close(); err != nil {
|
||||||
|
log.Printf("Error closing kafka consumer: %s\n", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *Kafka) Gather(acc plugins.Accumulator) error {
|
||||||
|
k.Lock()
|
||||||
|
defer k.Unlock()
|
||||||
|
npoints := len(k.pointChan)
|
||||||
|
for i := 0; i < npoints; i++ {
|
||||||
|
point := <-k.pointChan
|
||||||
acc.AddFields(point.Name(), point.Fields(), point.Tags(), point.Time())
|
acc.AddFields(point.Name(), point.Fields(), point.Tags(), point.Time())
|
||||||
}
|
}
|
||||||
case <-timeout:
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const millisecond = 1000000 * time.Nanosecond
|
|
||||||
|
|
||||||
type ack func(*sarama.ConsumerMessage) error
|
|
||||||
|
|
||||||
func readFromKafka(
|
|
||||||
kafkaMsgs <-chan *sarama.ConsumerMessage,
|
|
||||||
metricProducer chan<- []byte,
|
|
||||||
maxBatchSize int,
|
|
||||||
ackMsg ack,
|
|
||||||
halt <-chan bool,
|
|
||||||
) {
|
|
||||||
batch := make([]byte, 0)
|
|
||||||
currentBatchSize := 0
|
|
||||||
timeout := time.After(500 * millisecond)
|
|
||||||
var msg *sarama.ConsumerMessage
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case msg = <-kafkaMsgs:
|
|
||||||
if currentBatchSize != 0 {
|
|
||||||
batch = append(batch, '\n')
|
|
||||||
}
|
|
||||||
|
|
||||||
batch = append(batch, msg.Value...)
|
|
||||||
currentBatchSize++
|
|
||||||
|
|
||||||
if currentBatchSize == maxBatchSize {
|
|
||||||
metricProducer <- batch
|
|
||||||
currentBatchSize = 0
|
|
||||||
batch = make([]byte, 0)
|
|
||||||
ackMsg(msg)
|
|
||||||
}
|
|
||||||
case <-timeout:
|
|
||||||
if currentBatchSize != 0 {
|
|
||||||
metricProducer <- batch
|
|
||||||
currentBatchSize = 0
|
|
||||||
batch = make([]byte, 0)
|
|
||||||
ackMsg(msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
timeout = time.After(500 * millisecond)
|
|
||||||
case <-halt:
|
|
||||||
if currentBatchSize != 0 {
|
|
||||||
metricProducer <- batch
|
|
||||||
ackMsg(msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
plugins.Add("kafka", func() plugins.Plugin {
|
plugins.Add("kafka_consumer", func() plugins.Plugin {
|
||||||
return &Kafka{}
|
return &Kafka{}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,36 +15,48 @@ func TestReadsMetricsFromKafka(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("Skipping integration test in short mode")
|
t.Skip("Skipping integration test in short mode")
|
||||||
}
|
}
|
||||||
var zkPeers, brokerPeers []string
|
|
||||||
|
|
||||||
zkPeers = []string{testutil.GetLocalHost() + ":2181"}
|
brokerPeers := []string{testutil.GetLocalHost() + ":9092"}
|
||||||
brokerPeers = []string{testutil.GetLocalHost() + ":9092"}
|
zkPeers := []string{testutil.GetLocalHost() + ":2181"}
|
||||||
|
testTopic := fmt.Sprintf("telegraf_test_topic_%d", time.Now().Unix())
|
||||||
k := &Kafka{
|
|
||||||
ConsumerGroupName: "telegraf_test_consumers",
|
|
||||||
Topic: fmt.Sprintf("telegraf_test_topic_%d", time.Now().Unix()),
|
|
||||||
ZookeeperPeers: zkPeers,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
// Send a Kafka message to the kafka host
|
||||||
msg := "cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257"
|
msg := "cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257"
|
||||||
producer, err := sarama.NewSyncProducer(brokerPeers, nil)
|
producer, err := sarama.NewSyncProducer(brokerPeers, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
_, _, err = producer.SendMessage(
|
||||||
_, _, err = producer.SendMessage(&sarama.ProducerMessage{Topic: k.Topic, Value: sarama.StringEncoder(msg)})
|
&sarama.ProducerMessage{
|
||||||
|
Topic: testTopic,
|
||||||
|
Value: sarama.StringEncoder(msg),
|
||||||
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer producer.Close()
|
||||||
|
|
||||||
producer.Close()
|
// Start the Kafka Consumer
|
||||||
|
k := &Kafka{
|
||||||
|
ConsumerGroup: "telegraf_test_consumers",
|
||||||
|
Topics: []string{testTopic},
|
||||||
|
ZookeeperPeers: zkPeers,
|
||||||
|
PointBuffer: 100000,
|
||||||
|
Offset: "oldest",
|
||||||
|
}
|
||||||
|
if err := k.Start(); err != nil {
|
||||||
|
t.Fatal(err.Error())
|
||||||
|
} else {
|
||||||
|
defer k.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
waitForPoint(k, t)
|
||||||
|
|
||||||
|
// Verify that we can now gather the sent message
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
// Sanity check
|
// Sanity check
|
||||||
assert.Equal(t, 0, len(acc.Points), "there should not be any points")
|
assert.Equal(t, 0, len(acc.Points), "There should not be any points")
|
||||||
|
|
||||||
|
// Gather points
|
||||||
err = k.Gather(&acc)
|
err = k.Gather(&acc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
if len(acc.Points) == 1 {
|
||||||
assert.Equal(t, 1, len(acc.Points), "there should be a single point")
|
|
||||||
|
|
||||||
point := acc.Points[0]
|
point := acc.Points[0]
|
||||||
assert.Equal(t, "cpu_load_short", point.Measurement)
|
assert.Equal(t, "cpu_load_short", point.Measurement)
|
||||||
assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields)
|
assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields)
|
||||||
|
@ -54,4 +66,26 @@ func TestReadsMetricsFromKafka(t *testing.T) {
|
||||||
"region": "us-west",
|
"region": "us-west",
|
||||||
}, point.Tags)
|
}, point.Tags)
|
||||||
assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix())
|
assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix())
|
||||||
|
} else {
|
||||||
|
t.Errorf("No points found in accumulator, expected 1")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Waits for the metric that was sent to the kafka broker to arrive at the kafka
|
||||||
|
// consumer
|
||||||
|
func waitForPoint(k *Kafka, t *testing.T) {
|
||||||
|
// Give the kafka container up to 2 seconds to get the point to the consumer
|
||||||
|
ticker := time.NewTicker(5 * time.Millisecond)
|
||||||
|
counter := 0
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
counter++
|
||||||
|
if counter > 1000 {
|
||||||
|
t.Fatal("Waited for 5s, point never arrived to consumer")
|
||||||
|
} else if len(k.pointChan) == 1 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,92 +1,91 @@
|
||||||
package kafka_consumer
|
package kafka_consumer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Shopify/sarama"
|
"github.com/influxdb/influxdb/models"
|
||||||
"github.com/influxdb/telegraf/testutil"
|
"github.com/influxdb/telegraf/testutil"
|
||||||
|
|
||||||
|
"github.com/Shopify/sarama"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const testMsg = "cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257"
|
const (
|
||||||
|
testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257"
|
||||||
|
invalidMsg = "cpu_load_short,host=server01 1422568543702900257"
|
||||||
|
pointBuffer = 5
|
||||||
|
)
|
||||||
|
|
||||||
func TestReadFromKafkaBatchesMsgsOnBatchSize(t *testing.T) {
|
func NewTestKafka() (*Kafka, chan *sarama.ConsumerMessage) {
|
||||||
halt := make(chan bool, 1)
|
in := make(chan *sarama.ConsumerMessage, pointBuffer)
|
||||||
metricChan := make(chan []byte, 1)
|
k := Kafka{
|
||||||
kafkaChan := make(chan *sarama.ConsumerMessage, 10)
|
ConsumerGroup: "test",
|
||||||
for i := 0; i < 10; i++ {
|
Topics: []string{"telegraf"},
|
||||||
kafkaChan <- saramaMsg(testMsg)
|
ZookeeperPeers: []string{"localhost:2181"},
|
||||||
|
PointBuffer: pointBuffer,
|
||||||
|
Offset: "oldest",
|
||||||
|
in: in,
|
||||||
|
doNotCommitMsgs: true,
|
||||||
|
errs: make(chan *sarama.ConsumerError, pointBuffer),
|
||||||
|
done: make(chan struct{}),
|
||||||
|
pointChan: make(chan models.Point, pointBuffer),
|
||||||
|
}
|
||||||
|
return &k, in
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedBatch := strings.Repeat(testMsg+"\n", 9) + testMsg
|
// Test that the parser parses kafka messages into points
|
||||||
readFromKafka(kafkaChan, metricChan, 10, func(msg *sarama.ConsumerMessage) error {
|
func TestRunParser(t *testing.T) {
|
||||||
batch := <-metricChan
|
k, in := NewTestKafka()
|
||||||
assert.Equal(t, expectedBatch, string(batch))
|
defer close(k.done)
|
||||||
|
|
||||||
halt <- true
|
go k.parser()
|
||||||
|
in <- saramaMsg(testMsg)
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
|
||||||
return nil
|
assert.Equal(t, len(k.pointChan), 1)
|
||||||
}, halt)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReadFromKafkaBatchesMsgsOnTimeout(t *testing.T) {
|
// Test that the parser ignores invalid messages
|
||||||
halt := make(chan bool, 1)
|
func TestRunParserInvalidMsg(t *testing.T) {
|
||||||
metricChan := make(chan []byte, 1)
|
k, in := NewTestKafka()
|
||||||
kafkaChan := make(chan *sarama.ConsumerMessage, 10)
|
defer close(k.done)
|
||||||
for i := 0; i < 3; i++ {
|
|
||||||
kafkaChan <- saramaMsg(testMsg)
|
go k.parser()
|
||||||
|
in <- saramaMsg(invalidMsg)
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
|
||||||
|
assert.Equal(t, len(k.pointChan), 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedBatch := strings.Repeat(testMsg+"\n", 2) + testMsg
|
// Test that points are dropped when we hit the buffer limit
|
||||||
readFromKafka(kafkaChan, metricChan, 10, func(msg *sarama.ConsumerMessage) error {
|
func TestRunParserRespectsBuffer(t *testing.T) {
|
||||||
batch := <-metricChan
|
k, in := NewTestKafka()
|
||||||
assert.Equal(t, expectedBatch, string(batch))
|
defer close(k.done)
|
||||||
|
|
||||||
halt <- true
|
go k.parser()
|
||||||
|
for i := 0; i < pointBuffer+1; i++ {
|
||||||
|
in <- saramaMsg(testMsg)
|
||||||
|
}
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
|
||||||
return nil
|
assert.Equal(t, len(k.pointChan), 5)
|
||||||
}, halt)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEmitMetricsSendMetricsToAcc(t *testing.T) {
|
// Test that the parser parses kafka messages into points
|
||||||
k := &Kafka{}
|
func TestRunParserAndGather(t *testing.T) {
|
||||||
var acc testutil.Accumulator
|
k, in := NewTestKafka()
|
||||||
testChan := make(chan []byte, 1)
|
defer close(k.done)
|
||||||
testChan <- []byte(testMsg)
|
|
||||||
|
|
||||||
err := emitMetrics(k, &acc, testChan)
|
go k.parser()
|
||||||
require.NoError(t, err)
|
in <- saramaMsg(testMsg)
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
|
||||||
assert.Equal(t, 1, len(acc.Points), "there should be a single point")
|
acc := testutil.Accumulator{}
|
||||||
|
k.Gather(&acc)
|
||||||
|
|
||||||
point := acc.Points[0]
|
assert.Equal(t, len(acc.Points), 1)
|
||||||
assert.Equal(t, "cpu_load_short", point.Measurement)
|
assert.True(t, acc.CheckValue("cpu_load_short", 23422.0))
|
||||||
assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields)
|
|
||||||
assert.Equal(t, map[string]string{
|
|
||||||
"host": "server01",
|
|
||||||
"direction": "in",
|
|
||||||
"region": "us-west",
|
|
||||||
}, point.Tags)
|
|
||||||
|
|
||||||
if time.Unix(0, 1422568543702900257).Unix() != point.Time.Unix() {
|
|
||||||
t.Errorf("Expected: %v, received %v\n",
|
|
||||||
time.Unix(0, 1422568543702900257).Unix(),
|
|
||||||
point.Time.Unix())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEmitMetricsTimesOut(t *testing.T) {
|
|
||||||
k := &Kafka{}
|
|
||||||
var acc testutil.Accumulator
|
|
||||||
testChan := make(chan []byte)
|
|
||||||
|
|
||||||
err := emitMetrics(k, &acc, testChan)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, 0, len(acc.Points), "there should not be a any points")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func saramaMsg(val string) *sarama.ConsumerMessage {
|
func saramaMsg(val string) *sarama.ConsumerMessage {
|
||||||
|
|
|
@ -183,8 +183,6 @@ func (s *Statsd) Gather(acc plugins.Accumulator) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Statsd) Start() error {
|
func (s *Statsd) Start() error {
|
||||||
log.Println("Starting up the statsd service")
|
|
||||||
|
|
||||||
// Make data structures
|
// Make data structures
|
||||||
s.done = make(chan struct{})
|
s.done = make(chan struct{})
|
||||||
s.in = make(chan string, s.AllowedPendingMessages)
|
s.in = make(chan string, s.AllowedPendingMessages)
|
||||||
|
@ -197,6 +195,7 @@ func (s *Statsd) Start() error {
|
||||||
go s.udpListen()
|
go s.udpListen()
|
||||||
// Start the line parser
|
// Start the line parser
|
||||||
go s.parser()
|
go s.parser()
|
||||||
|
log.Printf("Started the statsd service on %s\n", s.ServiceAddress)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,12 +1,9 @@
|
||||||
[kafka]
|
[memcached]
|
||||||
topic = "topic_with_metrics"
|
servers = ["localhost"]
|
||||||
consumerGroupName = "telegraf_metrics_consumers"
|
|
||||||
zookeeperPeers = ["test.example.com:2181"]
|
|
||||||
batchSize = 1000
|
|
||||||
pass = ["some", "strings"]
|
pass = ["some", "strings"]
|
||||||
drop = ["other", "stuff"]
|
drop = ["other", "stuff"]
|
||||||
interval = "5s"
|
interval = "5s"
|
||||||
[kafka.tagpass]
|
[memcached.tagpass]
|
||||||
goodtag = ["mytag"]
|
goodtag = ["mytag"]
|
||||||
[kafka.tagdrop]
|
[memcached.tagdrop]
|
||||||
badtag = ["othertag"]
|
badtag = ["othertag"]
|
||||||
|
|
|
@ -1,10 +1,9 @@
|
||||||
[kafka]
|
[memcached]
|
||||||
zookeeperPeers = ["test.example.com:2181"]
|
servers = ["192.168.1.1"]
|
||||||
batchSize = 10000
|
|
||||||
pass = ["some", "strings"]
|
pass = ["some", "strings"]
|
||||||
drop = ["other", "stuff"]
|
drop = ["other", "stuff"]
|
||||||
interval = "5s"
|
interval = "5s"
|
||||||
[kafka.tagpass]
|
[memcached.tagpass]
|
||||||
goodtag = ["mytag"]
|
goodtag = ["mytag"]
|
||||||
[kafka.tagdrop]
|
[memcached.tagdrop]
|
||||||
badtag = ["othertag"]
|
badtag = ["othertag"]
|
|
@ -171,18 +171,17 @@ urls = ["http://localhost/server-status?auto"]
|
||||||
# no configuration
|
# no configuration
|
||||||
|
|
||||||
# read metrics from a Kafka topic
|
# read metrics from a Kafka topic
|
||||||
[kafka]
|
[kafka_consumer]
|
||||||
# topic to consume
|
# topic(s) to consume
|
||||||
topic = "topic_with_metrics"
|
topics = ["telegraf"]
|
||||||
|
|
||||||
# the name of the consumer group
|
|
||||||
consumerGroupName = "telegraf_metrics_consumers"
|
|
||||||
|
|
||||||
# an array of Zookeeper connection strings
|
# an array of Zookeeper connection strings
|
||||||
zookeeperPeers = ["localhost:2181"]
|
zookeeper_peers = ["localhost:2181"]
|
||||||
|
# the name of the consumer group
|
||||||
# Batch size of points sent to InfluxDB
|
consumer_group = "telegraf_metrics_consumers"
|
||||||
batchSize = 1000
|
# Maximum number of points to buffer between collection intervals
|
||||||
|
point_buffer = 100000
|
||||||
|
# Offset (must be either "oldest" or "newest")
|
||||||
|
offset = "oldest"
|
||||||
|
|
||||||
# Read metrics from a LeoFS Server via SNMP
|
# Read metrics from a LeoFS Server via SNMP
|
||||||
[leofs]
|
[leofs]
|
||||||
|
|
Loading…
Reference in New Issue