2015-08-26 17:02:10 +00:00
|
|
|
package kafka
|
|
|
|
|
|
|
|
import (
|
2016-01-11 12:20:51 +00:00
|
|
|
"crypto/tls"
|
2015-08-26 17:02:10 +00:00
|
|
|
"fmt"
|
2016-02-03 19:59:34 +00:00
|
|
|
|
2016-01-27 21:21:36 +00:00
|
|
|
"github.com/influxdata/telegraf"
|
2016-02-03 19:59:34 +00:00
|
|
|
"github.com/influxdata/telegraf/internal"
|
2016-01-27 23:15:14 +00:00
|
|
|
"github.com/influxdata/telegraf/plugins/outputs"
|
2016-02-10 22:50:07 +00:00
|
|
|
"github.com/influxdata/telegraf/plugins/serializers"
|
2016-02-03 19:59:34 +00:00
|
|
|
|
|
|
|
"github.com/Shopify/sarama"
|
2015-08-26 17:02:10 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type Kafka struct {
|
|
|
|
// Kafka brokers to send metrics to
|
|
|
|
Brokers []string
|
|
|
|
// Kafka topic
|
|
|
|
Topic string
|
2015-09-16 19:10:26 +00:00
|
|
|
// Routing Key Tag
|
|
|
|
RoutingTag string `toml:"routing_tag"`
|
2016-03-31 09:14:20 +00:00
|
|
|
// Compression Codec Tag
|
2016-03-31 15:27:14 +00:00
|
|
|
CompressionCodec int
|
2016-03-31 09:14:20 +00:00
|
|
|
// RequiredAcks Tag
|
2016-03-31 15:27:14 +00:00
|
|
|
RequiredAcks int
|
2016-03-31 09:14:20 +00:00
|
|
|
// MaxRetry Tag
|
2016-03-31 15:27:14 +00:00
|
|
|
MaxRetry int
|
2016-02-03 19:59:34 +00:00
|
|
|
|
|
|
|
// Legacy SSL config options
|
2016-01-11 12:20:51 +00:00
|
|
|
// TLS client certificate
|
|
|
|
Certificate string
|
|
|
|
// TLS client key
|
|
|
|
Key string
|
|
|
|
// TLS certificate authority
|
|
|
|
CA string
|
2016-02-03 19:59:34 +00:00
|
|
|
|
|
|
|
// Path to CA file
|
|
|
|
SSLCA string `toml:"ssl_ca"`
|
|
|
|
// Path to host cert file
|
|
|
|
SSLCert string `toml:"ssl_cert"`
|
|
|
|
// Path to cert key file
|
|
|
|
SSLKey string `toml:"ssl_key"`
|
|
|
|
|
|
|
|
// Skip SSL verification
|
|
|
|
InsecureSkipVerify bool
|
2015-08-26 17:02:10 +00:00
|
|
|
|
2016-01-11 12:20:51 +00:00
|
|
|
tlsConfig tls.Config
|
|
|
|
producer sarama.SyncProducer
|
2016-02-10 22:50:07 +00:00
|
|
|
|
|
|
|
serializer serializers.Serializer
|
2015-08-26 17:02:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var sampleConfig = `
|
2016-02-18 21:26:51 +00:00
|
|
|
## URLs of kafka brokers
|
2015-10-15 21:53:29 +00:00
|
|
|
brokers = ["localhost:9092"]
|
2016-02-18 21:26:51 +00:00
|
|
|
## Kafka topic for producer messages
|
2015-10-15 21:53:29 +00:00
|
|
|
topic = "telegraf"
|
2016-02-18 21:26:51 +00:00
|
|
|
## Telegraf tag to use as a routing key
|
|
|
|
## ie, if this tag exists, it's value will be used as the routing key
|
2015-10-15 21:53:29 +00:00
|
|
|
routing_tag = "host"
|
2016-01-11 12:20:51 +00:00
|
|
|
|
2016-03-31 15:27:14 +00:00
|
|
|
## CompressionCodec represents the various compression codecs recognized by Kafka in messages.
|
|
|
|
## 0 : No compression
|
|
|
|
## 1 : Gzip compression
|
|
|
|
## 2 : Snappy compression
|
|
|
|
compression_codec = 0
|
2016-03-31 09:14:20 +00:00
|
|
|
|
2016-03-31 15:27:14 +00:00
|
|
|
## RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements it must see before responding
|
|
|
|
## 0 : the producer never waits for an acknowledgement from the broker. This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails).
|
|
|
|
## 1 : the producer gets an acknowledgement after the leader replica has received the data. This option provides better durability as the client waits until the server acknowledges the request as successful (only messages that were written to the now-dead leader but not yet replicated will be lost).
|
|
|
|
## -1 : the producer gets an acknowledgement after all in-sync replicas have received the data. This option provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains.
|
|
|
|
required_acks = -1
|
2016-03-31 09:14:20 +00:00
|
|
|
|
2016-03-31 15:27:14 +00:00
|
|
|
## The total number of times to retry sending a message
|
|
|
|
max_retry = 3
|
2016-03-31 09:14:20 +00:00
|
|
|
|
2016-02-18 21:26:51 +00:00
|
|
|
## Optional SSL Config
|
2016-02-03 19:59:34 +00:00
|
|
|
# ssl_ca = "/etc/telegraf/ca.pem"
|
|
|
|
# ssl_cert = "/etc/telegraf/cert.pem"
|
|
|
|
# ssl_key = "/etc/telegraf/key.pem"
|
2016-02-18 21:26:51 +00:00
|
|
|
## Use SSL but skip chain & host verification
|
2016-02-03 19:59:34 +00:00
|
|
|
# insecure_skip_verify = false
|
2016-02-10 22:50:07 +00:00
|
|
|
|
2016-02-18 21:26:51 +00:00
|
|
|
## Data format to output. This can be "influx" or "graphite"
|
|
|
|
## Each data format has it's own unique set of configuration options, read
|
|
|
|
## more about them here:
|
|
|
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
2016-02-10 22:50:07 +00:00
|
|
|
data_format = "influx"
|
2015-08-26 17:02:10 +00:00
|
|
|
`
|
|
|
|
|
2016-02-10 22:50:07 +00:00
|
|
|
func (k *Kafka) SetSerializer(serializer serializers.Serializer) {
|
|
|
|
k.serializer = serializer
|
|
|
|
}
|
|
|
|
|
2016-02-03 19:59:34 +00:00
|
|
|
func (k *Kafka) Connect() error {
|
|
|
|
config := sarama.NewConfig()
|
2016-03-31 09:14:20 +00:00
|
|
|
|
2016-03-31 15:27:14 +00:00
|
|
|
config.Producer.RequiredAcks = sarama.RequiredAcks(k.RequiredAcks)
|
|
|
|
config.Producer.Compression = sarama.CompressionCodec(k.CompressionCodec)
|
|
|
|
config.Producer.Retry.Max = k.MaxRetry
|
2016-01-11 12:20:51 +00:00
|
|
|
|
2016-02-03 19:59:34 +00:00
|
|
|
// Legacy support ssl config
|
|
|
|
if k.Certificate != "" {
|
|
|
|
k.SSLCert = k.Certificate
|
|
|
|
k.SSLCA = k.CA
|
|
|
|
k.SSLKey = k.Key
|
2016-01-11 12:20:51 +00:00
|
|
|
}
|
|
|
|
|
2016-02-03 19:59:34 +00:00
|
|
|
tlsConfig, err := internal.GetTLSConfig(
|
|
|
|
k.SSLCert, k.SSLKey, k.SSLCA, k.InsecureSkipVerify)
|
2016-01-11 12:20:51 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if tlsConfig != nil {
|
|
|
|
config.Net.TLS.Config = tlsConfig
|
|
|
|
config.Net.TLS.Enable = true
|
|
|
|
}
|
|
|
|
|
|
|
|
producer, err := sarama.NewSyncProducer(k.Brokers, config)
|
2015-08-26 17:02:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
k.producer = producer
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (k *Kafka) Close() error {
|
|
|
|
return k.producer.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (k *Kafka) SampleConfig() string {
|
|
|
|
return sampleConfig
|
|
|
|
}
|
|
|
|
|
|
|
|
func (k *Kafka) Description() string {
|
|
|
|
return "Configuration for the Kafka server to send metrics to"
|
|
|
|
}
|
|
|
|
|
2016-01-27 23:15:14 +00:00
|
|
|
func (k *Kafka) Write(metrics []telegraf.Metric) error {
|
|
|
|
if len(metrics) == 0 {
|
2015-08-26 17:02:10 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-02-10 22:50:07 +00:00
|
|
|
for _, metric := range metrics {
|
|
|
|
values, err := k.serializer.Serialize(metric)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2015-08-26 17:02:10 +00:00
|
|
|
}
|
2016-02-10 22:50:07 +00:00
|
|
|
|
|
|
|
var pubErr error
|
|
|
|
for _, value := range values {
|
|
|
|
m := &sarama.ProducerMessage{
|
|
|
|
Topic: k.Topic,
|
|
|
|
Value: sarama.StringEncoder(value),
|
|
|
|
}
|
|
|
|
if h, ok := metric.Tags()[k.RoutingTag]; ok {
|
|
|
|
m.Key = sarama.StringEncoder(h)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, _, pubErr = k.producer.SendMessage(m)
|
2015-08-26 17:02:10 +00:00
|
|
|
}
|
|
|
|
|
2016-02-10 22:50:07 +00:00
|
|
|
if pubErr != nil {
|
|
|
|
return fmt.Errorf("FAILED to send kafka message: %s\n", pubErr)
|
2015-08-26 17:02:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
2016-01-27 21:21:36 +00:00
|
|
|
outputs.Add("kafka", func() telegraf.Output {
|
2015-08-26 17:02:10 +00:00
|
|
|
return &Kafka{}
|
|
|
|
})
|
|
|
|
}
|