# Kafka Producer Output Plugin This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.html) acting a Kafka Producer. ``` [[outputs.kafka]] ## URLs of kafka brokers brokers = ["localhost:9092"] ## Kafka topic for producer messages topic = "telegraf" ## Optional topic suffix configuration. ## If the section is omitted, no suffix is used. ## Following topic suffix methods are supported: ## measurement - suffix equals to separator + measurement's name ## tags - suffix equals to separator + specified tags' values ## interleaved with separator ## Suffix equals to "_" + measurement's name # [outputs.kafka.topic_suffix] # method = "measurement" # separator = "_" ## Suffix equals to "__" + measurement's "foo" tag value. ## If there's no such a tag, suffix equals to an empty string # [outputs.kafka.topic_suffix] # method = "tags" # keys = ["foo"] # separator = "__" ## Suffix equals to "_" + measurement's "foo" and "bar" ## tag values, separated by "_". If there is no such tags, ## their values treated as empty strings. # [outputs.kafka.topic_suffix] # method = "tags" # keys = ["foo", "bar"] # separator = "_" ## Telegraf tag to use as a routing key ## ie, if this tag exists, its value will be used as the routing key routing_tag = "host" ## CompressionCodec represents the various compression codecs recognized by ## Kafka in messages. ## 0 : No compression ## 1 : Gzip compression ## 2 : Snappy compression compression_codec = 0 ## RequiredAcks is used in Produce Requests to tell the broker how many ## replica acknowledgements it must see before responding ## 0 : the producer never waits for an acknowledgement from the broker. ## This option provides the lowest latency but the weakest durability ## guarantees (some data will be lost when a server fails). ## 1 : the producer gets an acknowledgement after the leader replica has ## received the data. This option provides better durability as the ## client waits until the server acknowledges the request as successful ## (only messages that were written to the now-dead leader but not yet ## replicated will be lost). ## -1: the producer gets an acknowledgement after all in-sync replicas have ## received the data. This option provides the best durability, we ## guarantee that no messages will be lost as long as at least one in ## sync replica remains. required_acks = -1 ## The total number of times to retry sending a message max_retry = 3 ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" # ssl_cert = "/etc/telegraf/cert.pem" # ssl_key = "/etc/telegraf/key.pem" ## Use SSL but skip chain & host verification # insecure_skip_verify = false ## Optional SASL Config # sasl_username = "kafka" # sasl_password = "secret" data_format = "influx" ``` ### Required parameters: * `brokers`: List of strings, this is for speaking to a cluster of `kafka` brokers. On each flush interval, Telegraf will randomly choose one of the urls to write to. Each URL should just include host and port e.g. -> `["{host}:{port}","{host2}:{port2}"]` * `topic`: The `kafka` topic to publish to. ### Optional parameters: * `routing_tag`: If this tag exists, its value will be used as the routing key * `compression_codec`: What level of compression to use: `0` -> no compression, `1` -> gzip compression, `2` -> snappy compression * `required_acks`: a setting for how may `acks` required from the `kafka` broker cluster. * `max_retry`: Max number of times to retry failed write * `ssl_ca`: SSL CA * `ssl_cert`: SSL CERT * `ssl_key`: SSL key * `insecure_skip_verify`: Use SSL but skip chain & host verification (default: false) * `data_format`: [About Telegraf data formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md) * `topic_suffix`: Which, if any, method of calculating `kafka` topic suffix to use. For examples, please refer to sample configuration.