2015-06-27 03:56:14 +00:00
|
|
|
package kafka_consumer
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/Shopify/sarama"
|
2016-01-20 18:57:35 +00:00
|
|
|
"github.com/influxdata/telegraf/testutil"
|
2015-06-27 03:56:14 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
2016-02-06 00:36:35 +00:00
|
|
|
|
|
|
|
"github.com/influxdata/telegraf/plugins/parsers"
|
2015-06-27 03:56:14 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestReadsMetricsFromKafka(t *testing.T) {
|
2015-07-06 04:46:43 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("Skipping integration test in short mode")
|
|
|
|
}
|
2015-06-27 03:56:14 +00:00
|
|
|
|
2015-11-16 20:12:45 +00:00
|
|
|
brokerPeers := []string{testutil.GetLocalHost() + ":9092"}
|
|
|
|
zkPeers := []string{testutil.GetLocalHost() + ":2181"}
|
|
|
|
testTopic := fmt.Sprintf("telegraf_test_topic_%d", time.Now().Unix())
|
2015-06-27 03:56:14 +00:00
|
|
|
|
2015-11-16 20:12:45 +00:00
|
|
|
// Send a Kafka message to the kafka host
|
2015-06-27 03:56:14 +00:00
|
|
|
msg := "cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257"
|
|
|
|
producer, err := sarama.NewSyncProducer(brokerPeers, nil)
|
|
|
|
require.NoError(t, err)
|
2015-11-16 20:12:45 +00:00
|
|
|
_, _, err = producer.SendMessage(
|
|
|
|
&sarama.ProducerMessage{
|
|
|
|
Topic: testTopic,
|
|
|
|
Value: sarama.StringEncoder(msg),
|
|
|
|
})
|
2015-10-23 15:02:51 +00:00
|
|
|
require.NoError(t, err)
|
2015-11-16 20:12:45 +00:00
|
|
|
defer producer.Close()
|
|
|
|
|
|
|
|
// Start the Kafka Consumer
|
|
|
|
k := &Kafka{
|
|
|
|
ConsumerGroup: "telegraf_test_consumers",
|
|
|
|
Topics: []string{testTopic},
|
|
|
|
ZookeeperPeers: zkPeers,
|
|
|
|
PointBuffer: 100000,
|
|
|
|
Offset: "oldest",
|
|
|
|
}
|
2016-02-06 00:36:35 +00:00
|
|
|
p, _ := parsers.NewInfluxParser()
|
|
|
|
k.SetParser(p)
|
2015-11-16 20:12:45 +00:00
|
|
|
if err := k.Start(); err != nil {
|
|
|
|
t.Fatal(err.Error())
|
|
|
|
} else {
|
|
|
|
defer k.Stop()
|
|
|
|
}
|
2015-10-23 15:02:51 +00:00
|
|
|
|
2015-11-16 20:12:45 +00:00
|
|
|
waitForPoint(k, t)
|
2015-06-27 03:56:14 +00:00
|
|
|
|
2015-11-16 20:12:45 +00:00
|
|
|
// Verify that we can now gather the sent message
|
2015-06-27 03:56:14 +00:00
|
|
|
var acc testutil.Accumulator
|
|
|
|
// Sanity check
|
2016-01-27 23:15:14 +00:00
|
|
|
assert.Equal(t, 0, len(acc.Metrics), "There should not be any points")
|
2015-06-27 03:56:14 +00:00
|
|
|
|
2015-11-16 20:12:45 +00:00
|
|
|
// Gather points
|
2015-06-27 03:56:14 +00:00
|
|
|
err = k.Gather(&acc)
|
|
|
|
require.NoError(t, err)
|
2016-01-27 23:15:14 +00:00
|
|
|
if len(acc.Metrics) == 1 {
|
|
|
|
point := acc.Metrics[0]
|
2015-11-16 20:12:45 +00:00
|
|
|
assert.Equal(t, "cpu_load_short", point.Measurement)
|
|
|
|
assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields)
|
|
|
|
assert.Equal(t, map[string]string{
|
|
|
|
"host": "server01",
|
|
|
|
"direction": "in",
|
|
|
|
"region": "us-west",
|
|
|
|
}, point.Tags)
|
|
|
|
assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix())
|
|
|
|
} else {
|
|
|
|
t.Errorf("No points found in accumulator, expected 1")
|
|
|
|
}
|
|
|
|
}
|
2015-06-27 03:56:14 +00:00
|
|
|
|
2015-11-16 20:12:45 +00:00
|
|
|
// Waits for the metric that was sent to the kafka broker to arrive at the kafka
|
|
|
|
// consumer
|
|
|
|
func waitForPoint(k *Kafka, t *testing.T) {
|
|
|
|
// Give the kafka container up to 2 seconds to get the point to the consumer
|
|
|
|
ticker := time.NewTicker(5 * time.Millisecond)
|
|
|
|
counter := 0
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
counter++
|
|
|
|
if counter > 1000 {
|
|
|
|
t.Fatal("Waited for 5s, point never arrived to consumer")
|
2016-01-27 23:15:14 +00:00
|
|
|
} else if len(k.metricC) == 1 {
|
2015-11-16 20:12:45 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-06-27 03:56:14 +00:00
|
|
|
}
|