2015-12-10 02:07:55 +00:00
|
|
|
package kinesis
|
|
|
|
|
|
|
|
import (
|
|
|
|
"log"
|
|
|
|
"os"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/aws/aws-sdk-go/aws"
|
|
|
|
"github.com/aws/aws-sdk-go/service/kinesis"
|
2017-04-26 17:54:24 +00:00
|
|
|
"github.com/satori/go.uuid"
|
2015-12-10 02:07:55 +00:00
|
|
|
|
2016-01-27 21:21:36 +00:00
|
|
|
"github.com/influxdata/telegraf"
|
2016-05-25 11:30:39 +00:00
|
|
|
internalaws "github.com/influxdata/telegraf/internal/config/aws"
|
2016-01-27 23:15:14 +00:00
|
|
|
"github.com/influxdata/telegraf/plugins/outputs"
|
2016-12-20 18:49:28 +00:00
|
|
|
"github.com/influxdata/telegraf/plugins/serializers"
|
2015-12-10 02:07:55 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type KinesisOutput struct {
|
2016-05-25 11:30:39 +00:00
|
|
|
Region string `toml:"region"`
|
|
|
|
AccessKey string `toml:"access_key"`
|
|
|
|
SecretKey string `toml:"secret_key"`
|
|
|
|
RoleARN string `toml:"role_arn"`
|
|
|
|
Profile string `toml:"profile"`
|
|
|
|
Filename string `toml:"shared_credential_file"`
|
|
|
|
Token string `toml:"token"`
|
|
|
|
|
2017-04-26 17:54:24 +00:00
|
|
|
StreamName string `toml:"streamname"`
|
|
|
|
PartitionKey string `toml:"partitionkey"`
|
|
|
|
RandomPartitionKey bool `toml:"use_random_partitionkey"`
|
|
|
|
Debug bool `toml:"debug"`
|
|
|
|
svc *kinesis.Kinesis
|
2016-12-20 18:49:28 +00:00
|
|
|
|
|
|
|
serializer serializers.Serializer
|
2015-12-10 02:07:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var sampleConfig = `
|
2016-02-18 21:26:51 +00:00
|
|
|
## Amazon REGION of kinesis endpoint.
|
2015-12-10 02:07:55 +00:00
|
|
|
region = "ap-southeast-2"
|
2016-04-23 18:19:04 +00:00
|
|
|
|
|
|
|
## Amazon Credentials
|
|
|
|
## Credentials are loaded in the following order
|
2016-05-25 11:30:39 +00:00
|
|
|
## 1) Assumed credentials via STS if role_arn is specified
|
|
|
|
## 2) explicit credentials from 'access_key' and 'secret_key'
|
|
|
|
## 3) shared profile from 'profile'
|
|
|
|
## 4) environment variables
|
|
|
|
## 5) shared credentials file
|
|
|
|
## 6) EC2 Instance Profile
|
2016-04-23 18:19:04 +00:00
|
|
|
#access_key = ""
|
|
|
|
#secret_key = ""
|
2016-05-25 11:30:39 +00:00
|
|
|
#token = ""
|
|
|
|
#role_arn = ""
|
|
|
|
#profile = ""
|
|
|
|
#shared_credential_file = ""
|
2016-04-23 18:19:04 +00:00
|
|
|
|
2016-02-18 21:26:51 +00:00
|
|
|
## Kinesis StreamName must exist prior to starting telegraf.
|
2015-12-10 02:07:55 +00:00
|
|
|
streamname = "StreamName"
|
2016-02-18 21:26:51 +00:00
|
|
|
## PartitionKey as used for sharding data.
|
2015-12-10 02:07:55 +00:00
|
|
|
partitionkey = "PartitionKey"
|
2017-04-26 17:54:24 +00:00
|
|
|
## If set the paritionKey will be a random UUID on every put.
|
|
|
|
## This allows for scaling across multiple shards in a stream.
|
|
|
|
## This will cause issues with ordering.
|
|
|
|
use_random_partitionkey = false
|
|
|
|
|
2016-12-20 18:49:28 +00:00
|
|
|
|
|
|
|
## Data format to output.
|
|
|
|
## Each data format has it's own unique set of configuration options, read
|
|
|
|
## more about them here:
|
|
|
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
|
|
|
data_format = "influx"
|
|
|
|
|
2016-02-18 21:26:51 +00:00
|
|
|
## debug will show upstream aws messages.
|
2015-12-10 02:07:55 +00:00
|
|
|
debug = false
|
|
|
|
`
|
|
|
|
|
|
|
|
func (k *KinesisOutput) SampleConfig() string {
|
|
|
|
return sampleConfig
|
|
|
|
}
|
|
|
|
|
|
|
|
func (k *KinesisOutput) Description() string {
|
|
|
|
return "Configuration for the AWS Kinesis output."
|
|
|
|
}
|
|
|
|
|
|
|
|
func checkstream(l []*string, s string) bool {
|
|
|
|
// Check if the StreamName exists in the slice returned from the ListStreams API request.
|
|
|
|
for _, stream := range l {
|
|
|
|
if *stream == s {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func (k *KinesisOutput) Connect() error {
|
|
|
|
// We attempt first to create a session to Kinesis using an IAMS role, if that fails it will fall through to using
|
|
|
|
// environment variables, and then Shared Credentials.
|
|
|
|
if k.Debug {
|
2016-09-30 21:37:56 +00:00
|
|
|
log.Printf("E! kinesis: Establishing a connection to Kinesis in %+v", k.Region)
|
2015-12-10 02:07:55 +00:00
|
|
|
}
|
2016-05-25 11:30:39 +00:00
|
|
|
|
|
|
|
credentialConfig := &internalaws.CredentialConfig{
|
|
|
|
Region: k.Region,
|
|
|
|
AccessKey: k.AccessKey,
|
|
|
|
SecretKey: k.SecretKey,
|
|
|
|
RoleARN: k.RoleARN,
|
|
|
|
Profile: k.Profile,
|
|
|
|
Filename: k.Filename,
|
|
|
|
Token: k.Token,
|
2016-04-23 18:19:04 +00:00
|
|
|
}
|
2016-05-25 11:30:39 +00:00
|
|
|
configProvider := credentialConfig.Credentials()
|
|
|
|
svc := kinesis.New(configProvider)
|
2015-12-10 02:07:55 +00:00
|
|
|
|
|
|
|
KinesisParams := &kinesis.ListStreamsInput{
|
|
|
|
Limit: aws.Int64(100),
|
|
|
|
}
|
|
|
|
|
|
|
|
resp, err := svc.ListStreams(KinesisParams)
|
|
|
|
|
|
|
|
if err != nil {
|
2016-09-30 21:37:56 +00:00
|
|
|
log.Printf("E! kinesis: Error in ListSteams API call : %+v \n", err)
|
2015-12-10 02:07:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if checkstream(resp.StreamNames, k.StreamName) {
|
|
|
|
if k.Debug {
|
2016-09-30 21:37:56 +00:00
|
|
|
log.Printf("E! kinesis: Stream Exists")
|
2015-12-10 02:07:55 +00:00
|
|
|
}
|
|
|
|
k.svc = svc
|
|
|
|
return nil
|
|
|
|
} else {
|
2016-09-30 21:37:56 +00:00
|
|
|
log.Printf("E! kinesis : You have configured a StreamName %+v which does not exist. exiting.", k.StreamName)
|
2015-12-10 02:07:55 +00:00
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (k *KinesisOutput) Close() error {
|
2016-01-23 00:26:48 +00:00
|
|
|
return nil
|
2015-12-10 02:07:55 +00:00
|
|
|
}
|
|
|
|
|
2016-12-20 18:49:28 +00:00
|
|
|
func (k *KinesisOutput) SetSerializer(serializer serializers.Serializer) {
|
|
|
|
k.serializer = serializer
|
2015-12-10 02:07:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func writekinesis(k *KinesisOutput, r []*kinesis.PutRecordsRequestEntry) time.Duration {
|
|
|
|
start := time.Now()
|
|
|
|
payload := &kinesis.PutRecordsInput{
|
|
|
|
Records: r,
|
|
|
|
StreamName: aws.String(k.StreamName),
|
|
|
|
}
|
|
|
|
|
|
|
|
if k.Debug {
|
|
|
|
resp, err := k.svc.PutRecords(payload)
|
|
|
|
if err != nil {
|
2016-09-30 21:37:56 +00:00
|
|
|
log.Printf("E! kinesis: Unable to write to Kinesis : %+v \n", err.Error())
|
2015-12-10 02:07:55 +00:00
|
|
|
}
|
2016-09-30 21:37:56 +00:00
|
|
|
log.Printf("E! %+v \n", resp)
|
2015-12-10 02:07:55 +00:00
|
|
|
|
|
|
|
} else {
|
|
|
|
_, err := k.svc.PutRecords(payload)
|
|
|
|
if err != nil {
|
2016-09-30 21:37:56 +00:00
|
|
|
log.Printf("E! kinesis: Unable to write to Kinesis : %+v \n", err.Error())
|
2015-12-10 02:07:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return time.Since(start)
|
|
|
|
}
|
|
|
|
|
2016-01-27 23:15:14 +00:00
|
|
|
func (k *KinesisOutput) Write(metrics []telegraf.Metric) error {
|
2016-12-20 18:49:28 +00:00
|
|
|
var sz uint32
|
2015-12-10 02:07:55 +00:00
|
|
|
|
2016-01-27 23:15:14 +00:00
|
|
|
if len(metrics) == 0 {
|
2015-12-10 02:07:55 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
r := []*kinesis.PutRecordsRequestEntry{}
|
|
|
|
|
2016-12-20 18:49:28 +00:00
|
|
|
for _, metric := range metrics {
|
|
|
|
sz++
|
|
|
|
|
|
|
|
values, err := k.serializer.Serialize(metric)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-12-10 02:07:55 +00:00
|
|
|
|
2017-04-26 17:54:24 +00:00
|
|
|
partitionKey := k.PartitionKey
|
|
|
|
if k.RandomPartitionKey {
|
|
|
|
u := uuid.NewV4()
|
|
|
|
partitionKey = u.String()
|
|
|
|
}
|
|
|
|
|
2015-12-10 02:07:55 +00:00
|
|
|
d := kinesis.PutRecordsRequestEntry{
|
2016-12-20 18:49:28 +00:00
|
|
|
Data: values,
|
2017-04-26 17:54:24 +00:00
|
|
|
PartitionKey: aws.String(partitionKey),
|
2015-12-10 02:07:55 +00:00
|
|
|
}
|
2016-12-20 18:49:28 +00:00
|
|
|
|
2015-12-10 02:07:55 +00:00
|
|
|
r = append(r, &d)
|
|
|
|
|
|
|
|
if sz == 500 {
|
|
|
|
// Max Messages Per PutRecordRequest is 500
|
|
|
|
elapsed := writekinesis(k, r)
|
2016-09-30 21:37:56 +00:00
|
|
|
log.Printf("E! Wrote a %+v point batch to Kinesis in %+v.\n", sz, elapsed)
|
2016-12-20 18:49:28 +00:00
|
|
|
sz = 0
|
2015-12-10 02:07:55 +00:00
|
|
|
r = nil
|
|
|
|
}
|
2016-12-20 18:49:28 +00:00
|
|
|
|
2015-12-10 02:07:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
writekinesis(k, r)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
2016-01-27 21:21:36 +00:00
|
|
|
outputs.Add("kinesis", func() telegraf.Output {
|
2015-12-10 02:07:55 +00:00
|
|
|
return &KinesisOutput{}
|
|
|
|
})
|
|
|
|
}
|