renaming plugins -> inputs
This commit is contained in:
16
plugins/outputs/all/all.go
Normal file
16
plugins/outputs/all/all.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package all
|
||||
|
||||
import (
|
||||
_ "github.com/influxdb/telegraf/plugins/outputs/amon"
|
||||
_ "github.com/influxdb/telegraf/plugins/outputs/amqp"
|
||||
_ "github.com/influxdb/telegraf/plugins/outputs/datadog"
|
||||
_ "github.com/influxdb/telegraf/plugins/outputs/influxdb"
|
||||
_ "github.com/influxdb/telegraf/plugins/outputs/kafka"
|
||||
_ "github.com/influxdb/telegraf/plugins/outputs/kinesis"
|
||||
_ "github.com/influxdb/telegraf/plugins/outputs/librato"
|
||||
_ "github.com/influxdb/telegraf/plugins/outputs/mqtt"
|
||||
_ "github.com/influxdb/telegraf/plugins/outputs/nsq"
|
||||
_ "github.com/influxdb/telegraf/plugins/outputs/opentsdb"
|
||||
_ "github.com/influxdb/telegraf/plugins/outputs/prometheus_client"
|
||||
_ "github.com/influxdb/telegraf/plugins/outputs/riemann"
|
||||
)
|
||||
9
plugins/outputs/amon/README.md
Normal file
9
plugins/outputs/amon/README.md
Normal file
@@ -0,0 +1,9 @@
|
||||
# Amon Output Plugin
|
||||
|
||||
This plugin writes to [Amon](https://www.amon.cx)
|
||||
and requires an `serverkey` and `amoninstance` URL which can be obtained [here](https://www.amon.cx/docs/monitoring/)
|
||||
for the account.
|
||||
|
||||
If the point value being sent cannot be converted to a float64, the metric is skipped.
|
||||
|
||||
Metrics are grouped by converting any `_` characters to `.` in the Point Name.
|
||||
157
plugins/outputs/amon/amon.go
Normal file
157
plugins/outputs/amon/amon.go
Normal file
@@ -0,0 +1,157 @@
|
||||
package amon
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
type Amon struct {
|
||||
ServerKey string
|
||||
AmonInstance string
|
||||
Timeout internal.Duration
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# Amon Server Key
|
||||
server_key = "my-server-key" # required.
|
||||
|
||||
# Amon Instance URL
|
||||
amon_instance = "https://youramoninstance" # required
|
||||
|
||||
# Connection timeout.
|
||||
# timeout = "5s"
|
||||
`
|
||||
|
||||
type TimeSeries struct {
|
||||
Series []*Metric `json:"series"`
|
||||
}
|
||||
|
||||
type Metric struct {
|
||||
Metric string `json:"metric"`
|
||||
Points [1]Point `json:"points"`
|
||||
}
|
||||
|
||||
type Point [2]float64
|
||||
|
||||
func (a *Amon) Connect() error {
|
||||
if a.ServerKey == "" || a.AmonInstance == "" {
|
||||
return fmt.Errorf("serverkey and amon_instance are required fields for amon output")
|
||||
}
|
||||
a.client = &http.Client{
|
||||
Timeout: a.Timeout.Duration,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Amon) Write(points []*client.Point) error {
|
||||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
ts := TimeSeries{}
|
||||
tempSeries := []*Metric{}
|
||||
metricCounter := 0
|
||||
|
||||
for _, pt := range points {
|
||||
mname := strings.Replace(pt.Name(), "_", ".", -1)
|
||||
if amonPts, err := buildPoints(pt); err == nil {
|
||||
for fieldName, amonPt := range amonPts {
|
||||
metric := &Metric{
|
||||
Metric: mname + "_" + strings.Replace(fieldName, "_", ".", -1),
|
||||
}
|
||||
metric.Points[0] = amonPt
|
||||
tempSeries = append(tempSeries, metric)
|
||||
metricCounter++
|
||||
}
|
||||
} else {
|
||||
log.Printf("unable to build Metric for %s, skipping\n", pt.Name())
|
||||
}
|
||||
}
|
||||
|
||||
ts.Series = make([]*Metric, metricCounter)
|
||||
copy(ts.Series, tempSeries[0:])
|
||||
tsBytes, err := json.Marshal(ts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to marshal TimeSeries, %s\n", err.Error())
|
||||
}
|
||||
req, err := http.NewRequest("POST", a.authenticatedUrl(), bytes.NewBuffer(tsBytes))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create http.Request, %s\n", err.Error())
|
||||
}
|
||||
req.Header.Add("Content-Type", "application/json")
|
||||
|
||||
resp, err := a.client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error POSTing metrics, %s\n", err.Error())
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode > 209 {
|
||||
return fmt.Errorf("received bad status code, %d\n", resp.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Amon) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (a *Amon) Description() string {
|
||||
return "Configuration for Amon Server to send metrics to."
|
||||
}
|
||||
|
||||
func (a *Amon) authenticatedUrl() string {
|
||||
|
||||
return fmt.Sprintf("%s/api/system/%s", a.AmonInstance, a.ServerKey)
|
||||
}
|
||||
|
||||
func buildPoints(pt *client.Point) (map[string]Point, error) {
|
||||
pts := make(map[string]Point)
|
||||
for k, v := range pt.Fields() {
|
||||
var p Point
|
||||
if err := p.setValue(v); err != nil {
|
||||
return pts, fmt.Errorf("unable to extract value from Fields, %s", err.Error())
|
||||
}
|
||||
p[0] = float64(pt.Time().Unix())
|
||||
pts[k] = p
|
||||
}
|
||||
return pts, nil
|
||||
}
|
||||
|
||||
func (p *Point) setValue(v interface{}) error {
|
||||
switch d := v.(type) {
|
||||
case int:
|
||||
p[1] = float64(int(d))
|
||||
case int32:
|
||||
p[1] = float64(int32(d))
|
||||
case int64:
|
||||
p[1] = float64(int64(d))
|
||||
case float32:
|
||||
p[1] = float64(d)
|
||||
case float64:
|
||||
p[1] = float64(d)
|
||||
default:
|
||||
return fmt.Errorf("undeterminable type")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Amon) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("amon", func() outputs.Output {
|
||||
return &Amon{}
|
||||
})
|
||||
}
|
||||
90
plugins/outputs/amon/amon_test.go
Normal file
90
plugins/outputs/amon/amon_test.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package amon
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
)
|
||||
|
||||
func TestBuildPoint(t *testing.T) {
|
||||
var tagtests = []struct {
|
||||
ptIn *client.Point
|
||||
outPt Point
|
||||
err error
|
||||
}{
|
||||
{
|
||||
testutil.TestPoint(float64(0.0), "testpt"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
0.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(float64(1.0), "testpt"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
1.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(int(10), "testpt"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
10.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(int32(112345), "testpt"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
112345.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(int64(112345), "testpt"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
112345.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(float32(11234.5), "testpt"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
11234.5,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint("11234.5", "testpt"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
11234.5,
|
||||
},
|
||||
fmt.Errorf("unable to extract value from Fields, undeterminable type"),
|
||||
},
|
||||
}
|
||||
for _, tt := range tagtests {
|
||||
pt, err := buildPoints(tt.ptIn)
|
||||
if err != nil && tt.err == nil {
|
||||
t.Errorf("%s: unexpected error, %+v\n", tt.ptIn.Name(), err)
|
||||
}
|
||||
if tt.err != nil && err == nil {
|
||||
t.Errorf("%s: expected an error (%s) but none returned", tt.ptIn.Name(), tt.err.Error())
|
||||
}
|
||||
if !reflect.DeepEqual(pt["value"], tt.outPt) && tt.err == nil {
|
||||
t.Errorf("%s: \nexpected %+v\ngot %+v\n",
|
||||
tt.ptIn.Name(), tt.outPt, pt["value"])
|
||||
}
|
||||
}
|
||||
}
|
||||
9
plugins/outputs/amqp/README.md
Normal file
9
plugins/outputs/amqp/README.md
Normal file
@@ -0,0 +1,9 @@
|
||||
# AMQP Output Plugin
|
||||
|
||||
This plugin writes to a AMQP exchange using tag, defined in configuration file
|
||||
as RoutingTag, as a routing key.
|
||||
|
||||
If RoutingTag is empty, then empty routing key will be used.
|
||||
Metrics are grouped in batches by RoutingTag.
|
||||
|
||||
This plugin doesn't bind exchange to a queue, so it should be done by consumer.
|
||||
161
plugins/outputs/amqp/amqp.go
Normal file
161
plugins/outputs/amqp/amqp.go
Normal file
@@ -0,0 +1,161 @@
|
||||
package amqp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/plugins/outputs"
|
||||
"github.com/streadway/amqp"
|
||||
)
|
||||
|
||||
type AMQP struct {
|
||||
// AMQP brokers to send metrics to
|
||||
URL string
|
||||
// AMQP exchange
|
||||
Exchange string
|
||||
// Routing Key Tag
|
||||
RoutingTag string `toml:"routing_tag"`
|
||||
// InfluxDB database
|
||||
Database string
|
||||
// InfluxDB retention policy
|
||||
RetentionPolicy string
|
||||
// InfluxDB precision
|
||||
Precision string
|
||||
|
||||
channel *amqp.Channel
|
||||
sync.Mutex
|
||||
headers amqp.Table
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultRetentionPolicy = "default"
|
||||
DefaultDatabase = "telegraf"
|
||||
DefaultPrecision = "s"
|
||||
)
|
||||
|
||||
var sampleConfig = `
|
||||
# AMQP url
|
||||
url = "amqp://localhost:5672/influxdb"
|
||||
# AMQP exchange
|
||||
exchange = "telegraf"
|
||||
# Telegraf tag to use as a routing key
|
||||
# ie, if this tag exists, it's value will be used as the routing key
|
||||
routing_tag = "host"
|
||||
|
||||
# InfluxDB retention policy
|
||||
#retention_policy = "default"
|
||||
# InfluxDB database
|
||||
#database = "telegraf"
|
||||
# InfluxDB precision
|
||||
#precision = "s"
|
||||
`
|
||||
|
||||
func (q *AMQP) Connect() error {
|
||||
q.Lock()
|
||||
defer q.Unlock()
|
||||
|
||||
q.headers = amqp.Table{
|
||||
"precision": q.Precision,
|
||||
"database": q.Database,
|
||||
"retention_policy": q.RetentionPolicy,
|
||||
}
|
||||
|
||||
connection, err := amqp.Dial(q.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
channel, err := connection.Channel()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to open a channel: %s", err)
|
||||
}
|
||||
|
||||
err = channel.ExchangeDeclare(
|
||||
q.Exchange, // name
|
||||
"topic", // type
|
||||
true, // durable
|
||||
false, // delete when unused
|
||||
false, // internal
|
||||
false, // no-wait
|
||||
nil, // arguments
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to declare an exchange: %s", err)
|
||||
}
|
||||
q.channel = channel
|
||||
go func() {
|
||||
log.Printf("Closing: %s", <-connection.NotifyClose(make(chan *amqp.Error)))
|
||||
log.Printf("Trying to reconnect")
|
||||
for err := q.Connect(); err != nil; err = q.Connect() {
|
||||
log.Println(err)
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *AMQP) Close() error {
|
||||
return q.channel.Close()
|
||||
}
|
||||
|
||||
func (q *AMQP) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (q *AMQP) Description() string {
|
||||
return "Configuration for the AMQP server to send metrics to"
|
||||
}
|
||||
|
||||
func (q *AMQP) Write(points []*client.Point) error {
|
||||
q.Lock()
|
||||
defer q.Unlock()
|
||||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
var outbuf = make(map[string][][]byte)
|
||||
|
||||
for _, p := range points {
|
||||
// Combine tags from Point and BatchPoints and grab the resulting
|
||||
// line-protocol output string to write to AMQP
|
||||
var value, key string
|
||||
value = p.String()
|
||||
|
||||
if q.RoutingTag != "" {
|
||||
if h, ok := p.Tags()[q.RoutingTag]; ok {
|
||||
key = h
|
||||
}
|
||||
}
|
||||
outbuf[key] = append(outbuf[key], []byte(value))
|
||||
|
||||
}
|
||||
for key, buf := range outbuf {
|
||||
err := q.channel.Publish(
|
||||
q.Exchange, // exchange
|
||||
key, // routing key
|
||||
false, // mandatory
|
||||
false, // immediate
|
||||
amqp.Publishing{
|
||||
Headers: q.headers,
|
||||
ContentType: "text/plain",
|
||||
Body: bytes.Join(buf, []byte("\n")),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("FAILED to send amqp message: %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("amqp", func() outputs.Output {
|
||||
return &AMQP{
|
||||
Database: DefaultDatabase,
|
||||
Precision: DefaultPrecision,
|
||||
RetentionPolicy: DefaultRetentionPolicy,
|
||||
}
|
||||
})
|
||||
}
|
||||
28
plugins/outputs/amqp/amqp_test.go
Normal file
28
plugins/outputs/amqp/amqp_test.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package amqp
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConnectAndWrite(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
var url = "amqp://" + testutil.GetLocalHost() + ":5672/"
|
||||
q := &AMQP{
|
||||
URL: url,
|
||||
Exchange: "telegraf_test",
|
||||
}
|
||||
|
||||
// Verify that we can connect to the AMQP broker
|
||||
err := q.Connect()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that we can successfully write data to the amqp broker
|
||||
err = q.Write(testutil.MockBatchPoints().Points())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
9
plugins/outputs/datadog/README.md
Normal file
9
plugins/outputs/datadog/README.md
Normal file
@@ -0,0 +1,9 @@
|
||||
# Datadog Output Plugin
|
||||
|
||||
This plugin writes to the [Datadog Metrics API](http://docs.datadoghq.com/api/#metrics)
|
||||
and requires an `apikey` which can be obtained [here](https://app.datadoghq.com/account/settings#api)
|
||||
for the account.
|
||||
|
||||
If the point value being sent cannot be converted to a float64, the metric is skipped.
|
||||
|
||||
Metrics are grouped by converting any `_` characters to `.` in the Point Name.
|
||||
179
plugins/outputs/datadog/datadog.go
Normal file
179
plugins/outputs/datadog/datadog.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package datadog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
type Datadog struct {
|
||||
Apikey string
|
||||
Timeout internal.Duration
|
||||
|
||||
apiUrl string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# Datadog API key
|
||||
apikey = "my-secret-key" # required.
|
||||
|
||||
# Connection timeout.
|
||||
# timeout = "5s"
|
||||
`
|
||||
|
||||
type TimeSeries struct {
|
||||
Series []*Metric `json:"series"`
|
||||
}
|
||||
|
||||
type Metric struct {
|
||||
Metric string `json:"metric"`
|
||||
Points [1]Point `json:"points"`
|
||||
Host string `json:"host"`
|
||||
Tags []string `json:"tags,omitempty"`
|
||||
}
|
||||
|
||||
type Point [2]float64
|
||||
|
||||
const datadog_api = "https://app.datadoghq.com/api/v1/series"
|
||||
|
||||
func NewDatadog(apiUrl string) *Datadog {
|
||||
return &Datadog{
|
||||
apiUrl: apiUrl,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Datadog) Connect() error {
|
||||
if d.Apikey == "" {
|
||||
return fmt.Errorf("apikey is a required field for datadog output")
|
||||
}
|
||||
d.client = &http.Client{
|
||||
Timeout: d.Timeout.Duration,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Datadog) Write(points []*client.Point) error {
|
||||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
ts := TimeSeries{}
|
||||
tempSeries := []*Metric{}
|
||||
metricCounter := 0
|
||||
|
||||
for _, pt := range points {
|
||||
mname := strings.Replace(pt.Name(), "_", ".", -1)
|
||||
if amonPts, err := buildPoints(pt); err == nil {
|
||||
for fieldName, amonPt := range amonPts {
|
||||
metric := &Metric{
|
||||
Metric: mname + strings.Replace(fieldName, "_", ".", -1),
|
||||
}
|
||||
metric.Points[0] = amonPt
|
||||
tempSeries = append(tempSeries, metric)
|
||||
metricCounter++
|
||||
}
|
||||
} else {
|
||||
log.Printf("unable to build Metric for %s, skipping\n", pt.Name())
|
||||
}
|
||||
}
|
||||
|
||||
ts.Series = make([]*Metric, metricCounter)
|
||||
copy(ts.Series, tempSeries[0:])
|
||||
tsBytes, err := json.Marshal(ts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to marshal TimeSeries, %s\n", err.Error())
|
||||
}
|
||||
req, err := http.NewRequest("POST", d.authenticatedUrl(), bytes.NewBuffer(tsBytes))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create http.Request, %s\n", err.Error())
|
||||
}
|
||||
req.Header.Add("Content-Type", "application/json")
|
||||
|
||||
resp, err := d.client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error POSTing metrics, %s\n", err.Error())
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode > 209 {
|
||||
return fmt.Errorf("received bad status code, %d\n", resp.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Datadog) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (d *Datadog) Description() string {
|
||||
return "Configuration for DataDog API to send metrics to."
|
||||
}
|
||||
|
||||
func (d *Datadog) authenticatedUrl() string {
|
||||
q := url.Values{
|
||||
"api_key": []string{d.Apikey},
|
||||
}
|
||||
return fmt.Sprintf("%s?%s", d.apiUrl, q.Encode())
|
||||
}
|
||||
|
||||
func buildPoints(pt *client.Point) (map[string]Point, error) {
|
||||
pts := make(map[string]Point)
|
||||
for k, v := range pt.Fields() {
|
||||
var p Point
|
||||
if err := p.setValue(v); err != nil {
|
||||
return pts, fmt.Errorf("unable to extract value from Fields, %s", err.Error())
|
||||
}
|
||||
p[0] = float64(pt.Time().Unix())
|
||||
pts[k] = p
|
||||
}
|
||||
return pts, nil
|
||||
}
|
||||
|
||||
func buildTags(ptTags map[string]string) []string {
|
||||
tags := make([]string, len(ptTags))
|
||||
index := 0
|
||||
for k, v := range ptTags {
|
||||
tags[index] = fmt.Sprintf("%s:%s", k, v)
|
||||
index += 1
|
||||
}
|
||||
sort.Strings(tags)
|
||||
return tags
|
||||
}
|
||||
|
||||
func (p *Point) setValue(v interface{}) error {
|
||||
switch d := v.(type) {
|
||||
case int:
|
||||
p[1] = float64(int(d))
|
||||
case int32:
|
||||
p[1] = float64(int32(d))
|
||||
case int64:
|
||||
p[1] = float64(int64(d))
|
||||
case float32:
|
||||
p[1] = float64(d)
|
||||
case float64:
|
||||
p[1] = float64(d)
|
||||
default:
|
||||
return fmt.Errorf("undeterminable type")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Datadog) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("datadog", func() outputs.Output {
|
||||
return NewDatadog(datadog_api)
|
||||
})
|
||||
}
|
||||
177
plugins/outputs/datadog/datadog_test.go
Normal file
177
plugins/outputs/datadog/datadog_test.go
Normal file
@@ -0,0 +1,177 @@
|
||||
package datadog
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
fakeUrl = "http://test.datadog.com"
|
||||
fakeApiKey = "123456"
|
||||
)
|
||||
|
||||
func fakeDatadog() *Datadog {
|
||||
d := NewDatadog(fakeUrl)
|
||||
d.Apikey = fakeApiKey
|
||||
return d
|
||||
}
|
||||
|
||||
func TestUriOverride(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
json.NewEncoder(w).Encode(`{"status":"ok"}`)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
d := NewDatadog(ts.URL)
|
||||
d.Apikey = "123456"
|
||||
err := d.Connect()
|
||||
require.NoError(t, err)
|
||||
err = d.Write(testutil.MockBatchPoints().Points())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestBadStatusCode(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
json.NewEncoder(w).Encode(`{ 'errors': [
|
||||
'Something bad happened to the server.',
|
||||
'Your query made the server very sad.'
|
||||
]
|
||||
}`)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
d := NewDatadog(ts.URL)
|
||||
d.Apikey = "123456"
|
||||
err := d.Connect()
|
||||
require.NoError(t, err)
|
||||
err = d.Write(testutil.MockBatchPoints().Points())
|
||||
if err == nil {
|
||||
t.Errorf("error expected but none returned")
|
||||
} else {
|
||||
require.EqualError(t, fmt.Errorf("received bad status code, 500\n"), err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthenticatedUrl(t *testing.T) {
|
||||
d := fakeDatadog()
|
||||
|
||||
authUrl := d.authenticatedUrl()
|
||||
assert.EqualValues(t, fmt.Sprintf("%s?api_key=%s", fakeUrl, fakeApiKey), authUrl)
|
||||
}
|
||||
|
||||
func TestBuildTags(t *testing.T) {
|
||||
var tagtests = []struct {
|
||||
ptIn map[string]string
|
||||
outTags []string
|
||||
}{
|
||||
{
|
||||
map[string]string{"one": "two", "three": "four"},
|
||||
[]string{"one:two", "three:four"},
|
||||
},
|
||||
{
|
||||
map[string]string{"aaa": "bbb"},
|
||||
[]string{"aaa:bbb"},
|
||||
},
|
||||
{
|
||||
map[string]string{},
|
||||
[]string{},
|
||||
},
|
||||
}
|
||||
for _, tt := range tagtests {
|
||||
tags := buildTags(tt.ptIn)
|
||||
if !reflect.DeepEqual(tags, tt.outTags) {
|
||||
t.Errorf("\nexpected %+v\ngot %+v\n", tt.outTags, tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildPoint(t *testing.T) {
|
||||
var tagtests = []struct {
|
||||
ptIn *client.Point
|
||||
outPt Point
|
||||
err error
|
||||
}{
|
||||
{
|
||||
testutil.TestPoint(0.0, "test1"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
0.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(1.0, "test2"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
1.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(10, "test3"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
10.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(int32(112345), "test4"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
112345.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(int64(112345), "test5"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
112345.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(float32(11234.5), "test6"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
11234.5,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint("11234.5", "test7"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
11234.5,
|
||||
},
|
||||
fmt.Errorf("unable to extract value from Fields, undeterminable type"),
|
||||
},
|
||||
}
|
||||
for _, tt := range tagtests {
|
||||
pt, err := buildPoints(tt.ptIn)
|
||||
if err != nil && tt.err == nil {
|
||||
t.Errorf("%s: unexpected error, %+v\n", tt.ptIn.Name(), err)
|
||||
}
|
||||
if tt.err != nil && err == nil {
|
||||
t.Errorf("%s: expected an error (%s) but none returned", tt.ptIn.Name(), tt.err.Error())
|
||||
}
|
||||
if !reflect.DeepEqual(pt["value"], tt.outPt) && tt.err == nil {
|
||||
t.Errorf("%s: \nexpected %+v\ngot %+v\n",
|
||||
tt.ptIn.Name(), tt.outPt, pt["value"])
|
||||
}
|
||||
}
|
||||
}
|
||||
12
plugins/outputs/influxdb/README.md
Normal file
12
plugins/outputs/influxdb/README.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# InfluxDB Output Plugin
|
||||
|
||||
This plugin writes to [InfluxDB](https://www.influxdb.com) via HTTP or UDP.
|
||||
|
||||
Required parameters:
|
||||
|
||||
* `urls`: List of strings, this is for InfluxDB clustering
|
||||
support. On each flush interval, Telegraf will randomly choose one of the urls
|
||||
to write to. Each URL should start with either `http://` or `udp://`
|
||||
* `database`: The name of the database to write to.
|
||||
|
||||
|
||||
162
plugins/outputs/influxdb/influxdb.go
Normal file
162
plugins/outputs/influxdb/influxdb.go
Normal file
@@ -0,0 +1,162 @@
|
||||
package influxdb
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
type InfluxDB struct {
|
||||
// URL is only for backwards compatability
|
||||
URL string
|
||||
URLs []string `toml:"urls"`
|
||||
Username string
|
||||
Password string
|
||||
Database string
|
||||
UserAgent string
|
||||
Precision string
|
||||
Timeout internal.Duration
|
||||
UDPPayload int `toml:"udp_payload"`
|
||||
|
||||
conns []client.Client
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
|
||||
# Multiple urls can be specified but it is assumed that they are part of the same
|
||||
# cluster, this means that only ONE of the urls will be written to each interval.
|
||||
# urls = ["udp://localhost:8089"] # UDP endpoint example
|
||||
urls = ["http://localhost:8086"] # required
|
||||
# The target database for metrics (telegraf will create it if not exists)
|
||||
database = "telegraf" # required
|
||||
# Precision of writes, valid values are n, u, ms, s, m, and h
|
||||
# note: using second precision greatly helps InfluxDB compression
|
||||
precision = "s"
|
||||
|
||||
# Connection timeout (for the connection with InfluxDB), formatted as a string.
|
||||
# If not provided, will default to 0 (no timeout)
|
||||
# timeout = "5s"
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
|
||||
# user_agent = "telegraf"
|
||||
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
|
||||
# udp_payload = 512
|
||||
`
|
||||
|
||||
func (i *InfluxDB) Connect() error {
|
||||
var urls []string
|
||||
for _, u := range i.URLs {
|
||||
urls = append(urls, u)
|
||||
}
|
||||
|
||||
// Backward-compatability with single Influx URL config files
|
||||
// This could eventually be removed in favor of specifying the urls as a list
|
||||
if i.URL != "" {
|
||||
urls = append(urls, i.URL)
|
||||
}
|
||||
|
||||
var conns []client.Client
|
||||
for _, u := range urls {
|
||||
switch {
|
||||
case strings.HasPrefix(u, "udp"):
|
||||
parsed_url, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if i.UDPPayload == 0 {
|
||||
i.UDPPayload = client.UDPPayloadSize
|
||||
}
|
||||
c, err := client.NewUDPClient(client.UDPConfig{
|
||||
Addr: parsed_url.Host,
|
||||
PayloadSize: i.UDPPayload,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conns = append(conns, c)
|
||||
default:
|
||||
// If URL doesn't start with "udp", assume HTTP client
|
||||
c, err := client.NewHTTPClient(client.HTTPConfig{
|
||||
Addr: u,
|
||||
Username: i.Username,
|
||||
Password: i.Password,
|
||||
UserAgent: i.UserAgent,
|
||||
Timeout: i.Timeout.Duration,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create Database if it doesn't exist
|
||||
_, e := c.Query(client.Query{
|
||||
Command: fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s", i.Database),
|
||||
})
|
||||
|
||||
if e != nil {
|
||||
log.Println("Database creation failed: " + e.Error())
|
||||
}
|
||||
|
||||
conns = append(conns, c)
|
||||
}
|
||||
}
|
||||
|
||||
i.conns = conns
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *InfluxDB) Close() error {
|
||||
// InfluxDB client does not provide a Close() function
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *InfluxDB) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (i *InfluxDB) Description() string {
|
||||
return "Configuration for influxdb server to send metrics to"
|
||||
}
|
||||
|
||||
// Choose a random server in the cluster to write to until a successful write
|
||||
// occurs, logging each unsuccessful. If all servers fail, return error.
|
||||
func (i *InfluxDB) Write(points []*client.Point) error {
|
||||
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
|
||||
Database: i.Database,
|
||||
Precision: i.Precision,
|
||||
})
|
||||
|
||||
for _, point := range points {
|
||||
bp.AddPoint(point)
|
||||
}
|
||||
|
||||
// This will get set to nil if a successful write occurs
|
||||
err := errors.New("Could not write to any InfluxDB server in cluster")
|
||||
|
||||
p := rand.Perm(len(i.conns))
|
||||
for _, n := range p {
|
||||
if e := i.conns[n].Write(bp); e != nil {
|
||||
log.Println("ERROR: " + e.Error())
|
||||
} else {
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("influxdb", func() outputs.Output {
|
||||
return &InfluxDB{}
|
||||
})
|
||||
}
|
||||
41
plugins/outputs/influxdb/influxdb_test.go
Normal file
41
plugins/outputs/influxdb/influxdb_test.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package influxdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestUDPInflux(t *testing.T) {
|
||||
i := InfluxDB{
|
||||
URLs: []string{"udp://localhost:8089"},
|
||||
}
|
||||
|
||||
err := i.Connect()
|
||||
require.NoError(t, err)
|
||||
err = i.Write(testutil.MockBatchPoints().Points())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestHTTPInflux(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
fmt.Fprintln(w, `{"results":[{}]}`)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
i := InfluxDB{
|
||||
URLs: []string{ts.URL},
|
||||
}
|
||||
|
||||
err := i.Connect()
|
||||
require.NoError(t, err)
|
||||
err = i.Write(testutil.MockBatchPoints().Points())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
85
plugins/outputs/kafka/kafka.go
Normal file
85
plugins/outputs/kafka/kafka.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
type Kafka struct {
|
||||
// Kafka brokers to send metrics to
|
||||
Brokers []string
|
||||
// Kafka topic
|
||||
Topic string
|
||||
// Routing Key Tag
|
||||
RoutingTag string `toml:"routing_tag"`
|
||||
|
||||
producer sarama.SyncProducer
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# URLs of kafka brokers
|
||||
brokers = ["localhost:9092"]
|
||||
# Kafka topic for producer messages
|
||||
topic = "telegraf"
|
||||
# Telegraf tag to use as a routing key
|
||||
# ie, if this tag exists, it's value will be used as the routing key
|
||||
routing_tag = "host"
|
||||
`
|
||||
|
||||
func (k *Kafka) Connect() error {
|
||||
producer, err := sarama.NewSyncProducer(k.Brokers, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k.producer = producer
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *Kafka) Close() error {
|
||||
return k.producer.Close()
|
||||
}
|
||||
|
||||
func (k *Kafka) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (k *Kafka) Description() string {
|
||||
return "Configuration for the Kafka server to send metrics to"
|
||||
}
|
||||
|
||||
func (k *Kafka) Write(points []*client.Point) error {
|
||||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, p := range points {
|
||||
// Combine tags from Point and BatchPoints and grab the resulting
|
||||
// line-protocol output string to write to Kafka
|
||||
value := p.String()
|
||||
|
||||
m := &sarama.ProducerMessage{
|
||||
Topic: k.Topic,
|
||||
Value: sarama.StringEncoder(value),
|
||||
}
|
||||
if h, ok := p.Tags()[k.RoutingTag]; ok {
|
||||
m.Key = sarama.StringEncoder(h)
|
||||
}
|
||||
|
||||
_, _, err := k.producer.SendMessage(m)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("FAILED to send kafka message: %s\n",
|
||||
err))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("kafka", func() outputs.Output {
|
||||
return &Kafka{}
|
||||
})
|
||||
}
|
||||
28
plugins/outputs/kafka/kafka_test.go
Normal file
28
plugins/outputs/kafka/kafka_test.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConnectAndWrite(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
brokers := []string{testutil.GetLocalHost() + ":9092"}
|
||||
k := &Kafka{
|
||||
Brokers: brokers,
|
||||
Topic: "Test",
|
||||
}
|
||||
|
||||
// Verify that we can connect to the Kafka broker
|
||||
err := k.Connect()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that we can successfully write data to the kafka broker
|
||||
err = k.Write(testutil.MockBatchPoints().Points())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
61
plugins/outputs/kinesis/README.md
Normal file
61
plugins/outputs/kinesis/README.md
Normal file
@@ -0,0 +1,61 @@
|
||||
## Amazon Kinesis Output for Telegraf
|
||||
|
||||
This is an experimental plugin that is still in the early stages of development. It will batch up all of the Points
|
||||
in one Put request to Kinesis. This should save the number of API requests by a considerable level.
|
||||
|
||||
## About Kinesis
|
||||
|
||||
This is not the place to document all of the various Kinesis terms however it
|
||||
maybe useful for users to review Amazons official documentation which is available
|
||||
[here](http://docs.aws.amazon.com/kinesis/latest/dev/key-concepts.html).
|
||||
|
||||
## Amazon Authentication
|
||||
|
||||
This plugin uses a credential chain for Authentication with the Kinesis API endpoint. In the following order the plugin
|
||||
will attempt to authenticate.
|
||||
1. [IAMS Role](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html)
|
||||
2. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk)
|
||||
3. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk)
|
||||
|
||||
|
||||
## Config
|
||||
|
||||
For this output plugin to function correctly the following variables must be configured.
|
||||
|
||||
* region
|
||||
* streamname
|
||||
* partitionkey
|
||||
|
||||
### region
|
||||
|
||||
The region is the Amazon region that you wish to connect to. Examples include but are not limited to
|
||||
* us-west-1
|
||||
* us-west-2
|
||||
* us-east-1
|
||||
* ap-southeast-1
|
||||
* ap-southeast-2
|
||||
|
||||
### streamname
|
||||
|
||||
The streamname is used by the plugin to ensure that data is sent to the correct Kinesis stream. It is important to
|
||||
note that the stream *MUST* be pre-configured for this plugin to function correctly. If the stream does not exist the
|
||||
plugin will result in telegraf exiting with an exit code of 1.
|
||||
|
||||
### partitionkey
|
||||
|
||||
This is used to group data within a stream. Currently this plugin only supports a single partitionkey.
|
||||
Manually configuring different hosts, or groups of hosts with manually selected partitionkeys might be a workable
|
||||
solution to scale out.
|
||||
|
||||
### format
|
||||
|
||||
The format configuration value has been designated to allow people to change the format of the Point as written to
|
||||
Kinesis. Right now there are two supported formats string and custom.
|
||||
|
||||
#### string
|
||||
|
||||
String is defined using the default Point.String() value and translated to []byte for the Kinesis stream.
|
||||
|
||||
#### custom
|
||||
|
||||
Custom is a string defined by a number of values in the FormatMetric() function.
|
||||
179
plugins/outputs/kinesis/kinesis.go
Normal file
179
plugins/outputs/kinesis/kinesis.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package kinesis
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/kinesis"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
type KinesisOutput struct {
|
||||
Region string `toml:"region"`
|
||||
StreamName string `toml:"streamname"`
|
||||
PartitionKey string `toml:"partitionkey"`
|
||||
Format string `toml:"format"`
|
||||
Debug bool `toml:"debug"`
|
||||
svc *kinesis.Kinesis
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# Amazon REGION of kinesis endpoint.
|
||||
region = "ap-southeast-2"
|
||||
# Kinesis StreamName must exist prior to starting telegraf.
|
||||
streamname = "StreamName"
|
||||
# PartitionKey as used for sharding data.
|
||||
partitionkey = "PartitionKey"
|
||||
# format of the Data payload in the kinesis PutRecord, supported
|
||||
# String and Custom.
|
||||
format = "string"
|
||||
# debug will show upstream aws messages.
|
||||
debug = false
|
||||
`
|
||||
|
||||
func (k *KinesisOutput) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (k *KinesisOutput) Description() string {
|
||||
return "Configuration for the AWS Kinesis output."
|
||||
}
|
||||
|
||||
func checkstream(l []*string, s string) bool {
|
||||
// Check if the StreamName exists in the slice returned from the ListStreams API request.
|
||||
for _, stream := range l {
|
||||
if *stream == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (k *KinesisOutput) Connect() error {
|
||||
// We attempt first to create a session to Kinesis using an IAMS role, if that fails it will fall through to using
|
||||
// environment variables, and then Shared Credentials.
|
||||
if k.Debug {
|
||||
log.Printf("kinesis: Establishing a connection to Kinesis in %+v", k.Region)
|
||||
}
|
||||
Config := &aws.Config{
|
||||
Region: aws.String(k.Region),
|
||||
Credentials: credentials.NewChainCredentials(
|
||||
[]credentials.Provider{
|
||||
&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())},
|
||||
&credentials.EnvProvider{},
|
||||
&credentials.SharedCredentialsProvider{},
|
||||
}),
|
||||
}
|
||||
svc := kinesis.New(session.New(Config))
|
||||
|
||||
KinesisParams := &kinesis.ListStreamsInput{
|
||||
Limit: aws.Int64(100),
|
||||
}
|
||||
|
||||
resp, err := svc.ListStreams(KinesisParams)
|
||||
|
||||
if err != nil {
|
||||
log.Printf("kinesis: Error in ListSteams API call : %+v \n", err)
|
||||
}
|
||||
|
||||
if checkstream(resp.StreamNames, k.StreamName) {
|
||||
if k.Debug {
|
||||
log.Printf("kinesis: Stream Exists")
|
||||
}
|
||||
k.svc = svc
|
||||
return nil
|
||||
} else {
|
||||
log.Printf("kinesis : You have configured a StreamName %+v which does not exist. exiting.", k.StreamName)
|
||||
os.Exit(1)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (k *KinesisOutput) Close() error {
|
||||
return errors.New("Error")
|
||||
}
|
||||
|
||||
func FormatMetric(k *KinesisOutput, point *client.Point) (string, error) {
|
||||
if k.Format == "string" {
|
||||
return point.String(), nil
|
||||
} else {
|
||||
m := fmt.Sprintf("%+v,%+v,%+v",
|
||||
point.Name(),
|
||||
point.Tags(),
|
||||
point.String())
|
||||
return m, nil
|
||||
}
|
||||
}
|
||||
|
||||
func writekinesis(k *KinesisOutput, r []*kinesis.PutRecordsRequestEntry) time.Duration {
|
||||
start := time.Now()
|
||||
payload := &kinesis.PutRecordsInput{
|
||||
Records: r,
|
||||
StreamName: aws.String(k.StreamName),
|
||||
}
|
||||
|
||||
if k.Debug {
|
||||
resp, err := k.svc.PutRecords(payload)
|
||||
if err != nil {
|
||||
log.Printf("kinesis: Unable to write to Kinesis : %+v \n", err.Error())
|
||||
}
|
||||
log.Printf("%+v \n", resp)
|
||||
|
||||
} else {
|
||||
_, err := k.svc.PutRecords(payload)
|
||||
if err != nil {
|
||||
log.Printf("kinesis: Unable to write to Kinesis : %+v \n", err.Error())
|
||||
}
|
||||
}
|
||||
return time.Since(start)
|
||||
}
|
||||
|
||||
func (k *KinesisOutput) Write(points []*client.Point) error {
|
||||
var sz uint32 = 0
|
||||
|
||||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r := []*kinesis.PutRecordsRequestEntry{}
|
||||
|
||||
for _, p := range points {
|
||||
atomic.AddUint32(&sz, 1)
|
||||
|
||||
metric, _ := FormatMetric(k, p)
|
||||
d := kinesis.PutRecordsRequestEntry{
|
||||
Data: []byte(metric),
|
||||
PartitionKey: aws.String(k.PartitionKey),
|
||||
}
|
||||
r = append(r, &d)
|
||||
|
||||
if sz == 500 {
|
||||
// Max Messages Per PutRecordRequest is 500
|
||||
elapsed := writekinesis(k, r)
|
||||
log.Printf("Wrote a %+v point batch to Kinesis in %+v.\n", sz, elapsed)
|
||||
atomic.StoreUint32(&sz, 0)
|
||||
r = nil
|
||||
}
|
||||
}
|
||||
|
||||
writekinesis(k, r)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("kinesis", func() outputs.Output {
|
||||
return &KinesisOutput{}
|
||||
})
|
||||
}
|
||||
39
plugins/outputs/kinesis/kinesis_test.go
Normal file
39
plugins/outputs/kinesis/kinesis_test.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package kinesis
|
||||
|
||||
import (
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFormatMetric(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
k := &KinesisOutput{
|
||||
Format: "string",
|
||||
}
|
||||
|
||||
p := testutil.MockBatchPoints().Points()[0]
|
||||
|
||||
valid_string := "test1,tag1=value1 value=1 1257894000000000000"
|
||||
func_string, err := FormatMetric(k, p)
|
||||
|
||||
if func_string != valid_string {
|
||||
t.Error("Expected ", valid_string)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
k = &KinesisOutput{
|
||||
Format: "custom",
|
||||
}
|
||||
|
||||
valid_custom := "test1,map[tag1:value1],test1,tag1=value1 value=1 1257894000000000000"
|
||||
func_custom, err := FormatMetric(k, p)
|
||||
|
||||
if func_custom != valid_custom {
|
||||
t.Error("Expected ", valid_custom)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
12
plugins/outputs/librato/README.md
Normal file
12
plugins/outputs/librato/README.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# Librato Output Plugin
|
||||
|
||||
This plugin writes to the [Librato Metrics API](http://dev.librato.com/v1/metrics#metrics)
|
||||
and requires an `api_user` and `api_token` which can be obtained [here](https://metrics.librato.com/account/api_tokens)
|
||||
for the account.
|
||||
|
||||
The `source_tag` option in the Configuration file is used to send contextual information from
|
||||
Point Tags to the API.
|
||||
|
||||
If the point value being sent cannot be converted to a float64, the metric is skipped.
|
||||
|
||||
Currently, the plugin does not send any associated Point Tags.
|
||||
175
plugins/outputs/librato/librato.go
Normal file
175
plugins/outputs/librato/librato.go
Normal file
@@ -0,0 +1,175 @@
|
||||
package librato
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
type Librato struct {
|
||||
ApiUser string
|
||||
ApiToken string
|
||||
SourceTag string
|
||||
Timeout internal.Duration
|
||||
|
||||
apiUrl string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# Librator API Docs
|
||||
# http://dev.librato.com/v1/metrics-authentication
|
||||
|
||||
# Librato API user
|
||||
api_user = "telegraf@influxdb.com" # required.
|
||||
|
||||
# Librato API token
|
||||
api_token = "my-secret-token" # required.
|
||||
|
||||
# Tag Field to populate source attribute (optional)
|
||||
# This is typically the _hostname_ from which the metric was obtained.
|
||||
source_tag = "hostname"
|
||||
|
||||
# Connection timeout.
|
||||
# timeout = "5s"
|
||||
`
|
||||
|
||||
type Metrics struct {
|
||||
Gauges []*Gauge `json:"gauges"`
|
||||
}
|
||||
|
||||
type Gauge struct {
|
||||
Name string `json:"name"`
|
||||
Value float64 `json:"value"`
|
||||
Source string `json:"source"`
|
||||
MeasureTime int64 `json:"measure_time"`
|
||||
}
|
||||
|
||||
const librato_api = "https://metrics-api.librato.com/v1/metrics"
|
||||
|
||||
func NewLibrato(apiUrl string) *Librato {
|
||||
return &Librato{
|
||||
apiUrl: apiUrl,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Librato) Connect() error {
|
||||
if l.ApiUser == "" || l.ApiToken == "" {
|
||||
return fmt.Errorf("api_user and api_token are required fields for librato output")
|
||||
}
|
||||
l.client = &http.Client{
|
||||
Timeout: l.Timeout.Duration,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *Librato) Write(points []*client.Point) error {
|
||||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
metrics := Metrics{}
|
||||
tempGauges := []*Gauge{}
|
||||
metricCounter := 0
|
||||
|
||||
for _, pt := range points {
|
||||
if gauges, err := l.buildGauges(pt); err == nil {
|
||||
for _, gauge := range gauges {
|
||||
tempGauges = append(tempGauges, gauge)
|
||||
metricCounter++
|
||||
}
|
||||
} else {
|
||||
log.Printf("unable to build Gauge for %s, skipping\n", pt.Name())
|
||||
}
|
||||
}
|
||||
|
||||
metrics.Gauges = make([]*Gauge, metricCounter)
|
||||
copy(metrics.Gauges, tempGauges[0:])
|
||||
metricsBytes, err := json.Marshal(metrics)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to marshal Metrics, %s\n", err.Error())
|
||||
}
|
||||
req, err := http.NewRequest("POST", l.apiUrl, bytes.NewBuffer(metricsBytes))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create http.Request, %s\n", err.Error())
|
||||
}
|
||||
req.Header.Add("Content-Type", "application/json")
|
||||
req.SetBasicAuth(l.ApiUser, l.ApiToken)
|
||||
|
||||
resp, err := l.client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error POSTing metrics, %s\n", err.Error())
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return fmt.Errorf("received bad status code, %d\n", resp.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *Librato) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (l *Librato) Description() string {
|
||||
return "Configuration for Librato API to send metrics to."
|
||||
}
|
||||
|
||||
func (l *Librato) buildGauges(pt *client.Point) ([]*Gauge, error) {
|
||||
gauges := []*Gauge{}
|
||||
for fieldName, value := range pt.Fields() {
|
||||
gauge := &Gauge{
|
||||
Name: pt.Name() + "_" + fieldName,
|
||||
MeasureTime: pt.Time().Unix(),
|
||||
}
|
||||
if err := gauge.setValue(value); err != nil {
|
||||
return gauges, fmt.Errorf("unable to extract value from Fields, %s\n",
|
||||
err.Error())
|
||||
}
|
||||
if l.SourceTag != "" {
|
||||
if source, ok := pt.Tags()[l.SourceTag]; ok {
|
||||
gauge.Source = source
|
||||
} else {
|
||||
return gauges,
|
||||
fmt.Errorf("undeterminable Source type from Field, %s\n",
|
||||
l.SourceTag)
|
||||
}
|
||||
}
|
||||
}
|
||||
return gauges, nil
|
||||
}
|
||||
|
||||
func (g *Gauge) setValue(v interface{}) error {
|
||||
switch d := v.(type) {
|
||||
case int:
|
||||
g.Value = float64(int(d))
|
||||
case int32:
|
||||
g.Value = float64(int32(d))
|
||||
case int64:
|
||||
g.Value = float64(int64(d))
|
||||
case float32:
|
||||
g.Value = float64(d)
|
||||
case float64:
|
||||
g.Value = float64(d)
|
||||
default:
|
||||
return fmt.Errorf("undeterminable type %+v", d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *Librato) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("librato", func() outputs.Output {
|
||||
return NewLibrato(librato_api)
|
||||
})
|
||||
}
|
||||
220
plugins/outputs/librato/librato_test.go
Normal file
220
plugins/outputs/librato/librato_test.go
Normal file
@@ -0,0 +1,220 @@
|
||||
package librato
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
fakeUrl = "http://test.librato.com"
|
||||
fakeUser = "telegraf@influxdb.com"
|
||||
fakeToken = "123456"
|
||||
)
|
||||
|
||||
func fakeLibrato() *Librato {
|
||||
l := NewLibrato(fakeUrl)
|
||||
l.ApiUser = fakeUser
|
||||
l.ApiToken = fakeToken
|
||||
return l
|
||||
}
|
||||
|
||||
func TestUriOverride(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
l := NewLibrato(ts.URL)
|
||||
l.ApiUser = "telegraf@influxdb.com"
|
||||
l.ApiToken = "123456"
|
||||
err := l.Connect()
|
||||
require.NoError(t, err)
|
||||
err = l.Write(testutil.MockBatchPoints().Points())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestBadStatusCode(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
json.NewEncoder(w).Encode(`{
|
||||
"errors": {
|
||||
"system": [
|
||||
"The API is currently down for maintenance. It'll be back shortly."
|
||||
]
|
||||
}
|
||||
}`)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
l := NewLibrato(ts.URL)
|
||||
l.ApiUser = "telegraf@influxdb.com"
|
||||
l.ApiToken = "123456"
|
||||
err := l.Connect()
|
||||
require.NoError(t, err)
|
||||
err = l.Write(testutil.MockBatchPoints().Points())
|
||||
if err == nil {
|
||||
t.Errorf("error expected but none returned")
|
||||
} else {
|
||||
require.EqualError(t, fmt.Errorf("received bad status code, 503\n"), err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildGauge(t *testing.T) {
|
||||
var gaugeTests = []struct {
|
||||
ptIn *client.Point
|
||||
outGauge *Gauge
|
||||
err error
|
||||
}{
|
||||
{
|
||||
testutil.TestPoint(0.0, "test1"),
|
||||
&Gauge{
|
||||
Name: "test1",
|
||||
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 0.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(1.0, "test2"),
|
||||
&Gauge{
|
||||
Name: "test2",
|
||||
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 1.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(10, "test3"),
|
||||
&Gauge{
|
||||
Name: "test3",
|
||||
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 10.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(int32(112345), "test4"),
|
||||
&Gauge{
|
||||
Name: "test4",
|
||||
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 112345.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(int64(112345), "test5"),
|
||||
&Gauge{
|
||||
Name: "test5",
|
||||
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 112345.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(float32(11234.5), "test6"),
|
||||
&Gauge{
|
||||
Name: "test6",
|
||||
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 11234.5,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint("11234.5", "test7"),
|
||||
&Gauge{
|
||||
Name: "test7",
|
||||
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 11234.5,
|
||||
},
|
||||
fmt.Errorf("unable to extract value from Fields, undeterminable type"),
|
||||
},
|
||||
}
|
||||
|
||||
l := NewLibrato(fakeUrl)
|
||||
for _, gt := range gaugeTests {
|
||||
gauges, err := l.buildGauges(gt.ptIn)
|
||||
if err != nil && gt.err == nil {
|
||||
t.Errorf("%s: unexpected error, %+v\n", gt.ptIn.Name(), err)
|
||||
}
|
||||
if gt.err != nil && err == nil {
|
||||
t.Errorf("%s: expected an error (%s) but none returned",
|
||||
gt.ptIn.Name(), gt.err.Error())
|
||||
}
|
||||
if len(gauges) == 0 {
|
||||
continue
|
||||
}
|
||||
if gt.err == nil && !reflect.DeepEqual(gauges[0], gt.outGauge) {
|
||||
t.Errorf("%s: \nexpected %+v\ngot %+v\n",
|
||||
gt.ptIn.Name(), gt.outGauge, gauges[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildGaugeWithSource(t *testing.T) {
|
||||
pt1, _ := client.NewPoint(
|
||||
"test1",
|
||||
map[string]string{"hostname": "192.168.0.1"},
|
||||
map[string]interface{}{"value": 0.0},
|
||||
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
)
|
||||
pt2, _ := client.NewPoint(
|
||||
"test2",
|
||||
map[string]string{"hostnam": "192.168.0.1"},
|
||||
map[string]interface{}{"value": 1.0},
|
||||
time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC),
|
||||
)
|
||||
var gaugeTests = []struct {
|
||||
ptIn *client.Point
|
||||
outGauge *Gauge
|
||||
err error
|
||||
}{
|
||||
|
||||
{
|
||||
pt1,
|
||||
&Gauge{
|
||||
Name: "test1",
|
||||
MeasureTime: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 0.0,
|
||||
Source: "192.168.0.1",
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
pt2,
|
||||
&Gauge{
|
||||
Name: "test2",
|
||||
MeasureTime: time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 1.0,
|
||||
},
|
||||
fmt.Errorf("undeterminable Source type from Field, hostname"),
|
||||
},
|
||||
}
|
||||
|
||||
l := NewLibrato(fakeUrl)
|
||||
l.SourceTag = "hostname"
|
||||
for _, gt := range gaugeTests {
|
||||
gauges, err := l.buildGauges(gt.ptIn)
|
||||
if err != nil && gt.err == nil {
|
||||
t.Errorf("%s: unexpected error, %+v\n", gt.ptIn.Name(), err)
|
||||
}
|
||||
if gt.err != nil && err == nil {
|
||||
t.Errorf("%s: expected an error (%s) but none returned", gt.ptIn.Name(), gt.err.Error())
|
||||
}
|
||||
if len(gauges) == 0 {
|
||||
continue
|
||||
}
|
||||
if gt.err == nil && !reflect.DeepEqual(gauges[0], gt.outGauge) {
|
||||
t.Errorf("%s: \nexpected %+v\ngot %+v\n", gt.ptIn.Name(), gt.outGauge, gauges[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
190
plugins/outputs/mqtt/mqtt.go
Normal file
190
plugins/outputs/mqtt/mqtt.go
Normal file
@@ -0,0 +1,190 @@
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
paho "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
const MaxClientIdLen = 8
|
||||
const MaxRetryCount = 3
|
||||
const ClientIdPrefix = "telegraf"
|
||||
|
||||
type MQTT struct {
|
||||
Servers []string `toml:"servers"`
|
||||
Username string
|
||||
Password string
|
||||
Database string
|
||||
Timeout internal.Duration
|
||||
TopicPrefix string
|
||||
|
||||
Client *paho.Client
|
||||
Opts *paho.ClientOptions
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
servers = ["localhost:1883"] # required.
|
||||
|
||||
# MQTT outputs send metrics to this topic format
|
||||
# "<topic_prefix>/host/<hostname>/<pluginname>/"
|
||||
# ex: prefix/host/web01.example.com/mem/available
|
||||
# topic_prefix = "prefix"
|
||||
|
||||
# username and password to connect MQTT server.
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
`
|
||||
|
||||
func (m *MQTT) Connect() error {
|
||||
var err error
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
m.Opts, err = m.CreateOpts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.Client = paho.NewClient(m.Opts)
|
||||
if token := m.Client.Connect(); token.Wait() && token.Error() != nil {
|
||||
return token.Error()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MQTT) Close() error {
|
||||
if m.Client.IsConnected() {
|
||||
m.Client.Disconnect(20)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MQTT) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (m *MQTT) Description() string {
|
||||
return "Configuration for MQTT server to send metrics to"
|
||||
}
|
||||
|
||||
func (m *MQTT) Write(points []*client.Point) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
hostname, ok := points[0].Tags()["host"]
|
||||
if !ok {
|
||||
hostname = ""
|
||||
}
|
||||
|
||||
for _, p := range points {
|
||||
var t []string
|
||||
if m.TopicPrefix != "" {
|
||||
t = append(t, m.TopicPrefix)
|
||||
}
|
||||
tm := strings.Split(p.Name(), "_")
|
||||
if len(tm) < 2 {
|
||||
tm = []string{p.Name(), "stat"}
|
||||
}
|
||||
|
||||
t = append(t, "host", hostname, tm[0], tm[1])
|
||||
topic := strings.Join(t, "/")
|
||||
|
||||
value := p.String()
|
||||
err := m.publish(topic, value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not write to MQTT server, %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MQTT) publish(topic, body string) error {
|
||||
token := m.Client.Publish(topic, 0, false, body)
|
||||
token.Wait()
|
||||
if token.Error() != nil {
|
||||
return token.Error()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MQTT) CreateOpts() (*paho.ClientOptions, error) {
|
||||
opts := paho.NewClientOptions()
|
||||
|
||||
clientId := getRandomClientId()
|
||||
opts.SetClientID(clientId)
|
||||
|
||||
TLSConfig := &tls.Config{InsecureSkipVerify: false}
|
||||
ca := "" // TODO
|
||||
scheme := "tcp"
|
||||
if ca != "" {
|
||||
scheme = "ssl"
|
||||
certPool, err := getCertPool(ca)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
TLSConfig.RootCAs = certPool
|
||||
}
|
||||
TLSConfig.InsecureSkipVerify = true // TODO
|
||||
opts.SetTLSConfig(TLSConfig)
|
||||
|
||||
user := m.Username
|
||||
if user == "" {
|
||||
opts.SetUsername(user)
|
||||
}
|
||||
password := m.Password
|
||||
if password != "" {
|
||||
opts.SetPassword(password)
|
||||
}
|
||||
|
||||
if len(m.Servers) == 0 {
|
||||
return opts, fmt.Errorf("could not get host infomations")
|
||||
}
|
||||
for _, host := range m.Servers {
|
||||
server := fmt.Sprintf("%s://%s", scheme, host)
|
||||
|
||||
opts.AddBroker(server)
|
||||
}
|
||||
opts.SetAutoReconnect(true)
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
func getRandomClientId() string {
|
||||
const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||
var bytes = make([]byte, MaxClientIdLen)
|
||||
rand.Read(bytes)
|
||||
for i, b := range bytes {
|
||||
bytes[i] = alphanum[b%byte(len(alphanum))]
|
||||
}
|
||||
return ClientIdPrefix + "-" + string(bytes)
|
||||
}
|
||||
|
||||
func getCertPool(pemPath string) (*x509.CertPool, error) {
|
||||
certs := x509.NewCertPool()
|
||||
|
||||
pemData, err := ioutil.ReadFile(pemPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
certs.AppendCertsFromPEM(pemData)
|
||||
return certs, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("mqtt", func() outputs.Output {
|
||||
return &MQTT{}
|
||||
})
|
||||
}
|
||||
27
plugins/outputs/mqtt/mqtt_test.go
Normal file
27
plugins/outputs/mqtt/mqtt_test.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConnectAndWrite(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
var url = testutil.GetLocalHost() + ":1883"
|
||||
m := &MQTT{
|
||||
Servers: []string{url},
|
||||
}
|
||||
|
||||
// Verify that we can connect to the MQTT broker
|
||||
err := m.Connect()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that we can successfully write data to the mqtt broker
|
||||
err = m.Write(testutil.MockBatchPoints().Points())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
4
plugins/outputs/nsq/README.md
Normal file
4
plugins/outputs/nsq/README.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# NSQ Output Plugin
|
||||
|
||||
This plugin writes to a specified NSQD instance, usually local to the producer. It requires
|
||||
a `server` name and a `topic` name.
|
||||
71
plugins/outputs/nsq/nsq.go
Normal file
71
plugins/outputs/nsq/nsq.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package nsq
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/plugins/outputs"
|
||||
"github.com/nsqio/go-nsq"
|
||||
)
|
||||
|
||||
type NSQ struct {
|
||||
Server string
|
||||
Topic string
|
||||
producer *nsq.Producer
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# Location of nsqd instance listening on TCP
|
||||
server = "localhost:4150"
|
||||
# NSQ topic for producer messages
|
||||
topic = "telegraf"
|
||||
`
|
||||
|
||||
func (n *NSQ) Connect() error {
|
||||
config := nsq.NewConfig()
|
||||
producer, err := nsq.NewProducer(n.Server, config)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n.producer = producer
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NSQ) Close() error {
|
||||
n.producer.Stop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NSQ) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (n *NSQ) Description() string {
|
||||
return "Send telegraf measurements to NSQD"
|
||||
}
|
||||
|
||||
func (n *NSQ) Write(points []*client.Point) error {
|
||||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, p := range points {
|
||||
// Combine tags from Point and BatchPoints and grab the resulting
|
||||
// line-protocol output string to write to NSQ
|
||||
value := p.String()
|
||||
|
||||
err := n.producer.Publish(n.Topic, []byte(value))
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("FAILED to send NSQD message: %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("nsq", func() outputs.Output {
|
||||
return &NSQ{}
|
||||
})
|
||||
}
|
||||
28
plugins/outputs/nsq/nsq_test.go
Normal file
28
plugins/outputs/nsq/nsq_test.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package nsq
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConnectAndWrite(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
server := []string{testutil.GetLocalHost() + ":4150"}
|
||||
n := &NSQ{
|
||||
Server: server[0],
|
||||
Topic: "telegraf",
|
||||
}
|
||||
|
||||
// Verify that we can connect to the NSQ daemon
|
||||
err := n.Connect()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that we can successfully write data to the NSQ daemon
|
||||
err = n.Write(testutil.MockBatchPoints().Points())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
78
plugins/outputs/opentsdb/README.md
Normal file
78
plugins/outputs/opentsdb/README.md
Normal file
@@ -0,0 +1,78 @@
|
||||
# OpenTSDB Output Plugin
|
||||
|
||||
This plugin writes to a OpenTSDB instance using the "telnet" mode
|
||||
|
||||
## Transfer "Protocol" in the telnet mode
|
||||
|
||||
The expected input from OpenTSDB is specified in the following way:
|
||||
|
||||
```
|
||||
put <metric> <timestamp> <value> <tagk1=tagv1[ tagk2=tagv2 ...tagkN=tagvN]>
|
||||
```
|
||||
|
||||
The telegraf output plugin adds an optional prefix to the metric keys so
|
||||
that a subamount can be selected.
|
||||
|
||||
```
|
||||
put <[prefix.]metric> <timestamp> <value> <tagk1=tagv1[ tagk2=tagv2 ...tagkN=tagvN]>
|
||||
```
|
||||
|
||||
### Example
|
||||
|
||||
```
|
||||
put nine.telegraf.system_load1 1441910356 0.430000 dc=homeoffice host=irimame scope=green
|
||||
put nine.telegraf.system_load5 1441910356 0.580000 dc=homeoffice host=irimame scope=green
|
||||
put nine.telegraf.system_load15 1441910356 0.730000 dc=homeoffice host=irimame scope=green
|
||||
put nine.telegraf.system_uptime 1441910356 3655970.000000 dc=homeoffice host=irimame scope=green
|
||||
put nine.telegraf.system_uptime_format 1441910356 dc=homeoffice host=irimame scope=green
|
||||
put nine.telegraf.mem_total 1441910356 4145426432 dc=homeoffice host=irimame scope=green
|
||||
...
|
||||
put nine.telegraf.io_write_bytes 1441910366 0 dc=homeoffice host=irimame name=vda2 scope=green
|
||||
put nine.telegraf.io_read_time 1441910366 0 dc=homeoffice host=irimame name=vda2 scope=green
|
||||
put nine.telegraf.io_write_time 1441910366 0 dc=homeoffice host=irimame name=vda2 scope=green
|
||||
put nine.telegraf.io_io_time 1441910366 0 dc=homeoffice host=irimame name=vda2 scope=green
|
||||
put nine.telegraf.ping_packets_transmitted 1441910366 dc=homeoffice host=irimame scope=green url=www.google.com
|
||||
put nine.telegraf.ping_packets_received 1441910366 dc=homeoffice host=irimame scope=green url=www.google.com
|
||||
put nine.telegraf.ping_percent_packet_loss 1441910366 0.000000 dc=homeoffice host=irimame scope=green url=www.google.com
|
||||
put nine.telegraf.ping_average_response_ms 1441910366 24.006000 dc=homeoffice host=irimame scope=green url=www.google.com
|
||||
...
|
||||
```
|
||||
|
||||
##
|
||||
|
||||
The OpenTSDB interface can be simulated with this reader:
|
||||
|
||||
```
|
||||
// opentsdb_telnet_mode_mock.go
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
l, err := net.Listen("tcp", "localhost:4242")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer l.Close()
|
||||
for {
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
go func(c net.Conn) {
|
||||
defer c.Close()
|
||||
io.Copy(os.Stdout, c)
|
||||
}(conn)
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Allowed values for metrics
|
||||
|
||||
OpenTSDB allows `integers` and `floats` as input values
|
||||
168
plugins/outputs/opentsdb/opentsdb.go
Normal file
168
plugins/outputs/opentsdb/opentsdb.go
Normal file
@@ -0,0 +1,168 @@
|
||||
package opentsdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
type OpenTSDB struct {
|
||||
Prefix string
|
||||
|
||||
Host string
|
||||
Port int
|
||||
|
||||
Debug bool
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# prefix for metrics keys
|
||||
prefix = "my.specific.prefix."
|
||||
|
||||
## Telnet Mode ##
|
||||
# DNS name of the OpenTSDB server in telnet mode
|
||||
host = "opentsdb.example.com"
|
||||
|
||||
# Port of the OpenTSDB server in telnet mode
|
||||
port = 4242
|
||||
|
||||
# Debug true - Prints OpenTSDB communication
|
||||
debug = false
|
||||
`
|
||||
|
||||
type MetricLine struct {
|
||||
Metric string
|
||||
Timestamp int64
|
||||
Value string
|
||||
Tags string
|
||||
}
|
||||
|
||||
func (o *OpenTSDB) Connect() error {
|
||||
// Test Connection to OpenTSDB Server
|
||||
uri := fmt.Sprintf("%s:%d", o.Host, o.Port)
|
||||
tcpAddr, err := net.ResolveTCPAddr("tcp", uri)
|
||||
if err != nil {
|
||||
return fmt.Errorf("OpenTSDB: TCP address cannot be resolved")
|
||||
}
|
||||
connection, err := net.DialTCP("tcp", nil, tcpAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("OpenTSDB: Telnet connect fail")
|
||||
}
|
||||
defer connection.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OpenTSDB) Write(points []*client.Point) error {
|
||||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
// Send Data with telnet / socket communication
|
||||
uri := fmt.Sprintf("%s:%d", o.Host, o.Port)
|
||||
tcpAddr, _ := net.ResolveTCPAddr("tcp", uri)
|
||||
connection, err := net.DialTCP("tcp", nil, tcpAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("OpenTSDB: Telnet connect fail")
|
||||
}
|
||||
defer connection.Close()
|
||||
|
||||
for _, pt := range points {
|
||||
for _, metric := range buildMetrics(pt, now, o.Prefix) {
|
||||
messageLine := fmt.Sprintf("put %s %v %s %s\n",
|
||||
metric.Metric, metric.Timestamp, metric.Value, metric.Tags)
|
||||
if o.Debug {
|
||||
fmt.Print(messageLine)
|
||||
}
|
||||
_, err := connection.Write([]byte(messageLine))
|
||||
if err != nil {
|
||||
return fmt.Errorf("OpenTSDB: Telnet writing error %s", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildTags(ptTags map[string]string) []string {
|
||||
tags := make([]string, len(ptTags))
|
||||
index := 0
|
||||
for k, v := range ptTags {
|
||||
tags[index] = fmt.Sprintf("%s=%s", k, v)
|
||||
index += 1
|
||||
}
|
||||
sort.Strings(tags)
|
||||
return tags
|
||||
}
|
||||
|
||||
func buildMetrics(pt *client.Point, now time.Time, prefix string) []*MetricLine {
|
||||
ret := []*MetricLine{}
|
||||
for fieldName, value := range pt.Fields() {
|
||||
metric := &MetricLine{
|
||||
Metric: fmt.Sprintf("%s%s_%s", prefix, pt.Name(), fieldName),
|
||||
Timestamp: now.Unix(),
|
||||
}
|
||||
|
||||
metricValue, buildError := buildValue(value)
|
||||
if buildError != nil {
|
||||
fmt.Printf("OpenTSDB: %s\n", buildError.Error())
|
||||
continue
|
||||
}
|
||||
metric.Value = metricValue
|
||||
tagsSlice := buildTags(pt.Tags())
|
||||
metric.Tags = fmt.Sprint(strings.Join(tagsSlice, " "))
|
||||
ret = append(ret, metric)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func buildValue(v interface{}) (string, error) {
|
||||
var retv string
|
||||
switch p := v.(type) {
|
||||
case int64:
|
||||
retv = IntToString(int64(p))
|
||||
case uint64:
|
||||
retv = UIntToString(uint64(p))
|
||||
case float64:
|
||||
retv = FloatToString(float64(p))
|
||||
default:
|
||||
return retv, fmt.Errorf("unexpected type %T with value %v for OpenTSDB", v, v)
|
||||
}
|
||||
return retv, nil
|
||||
}
|
||||
|
||||
func IntToString(input_num int64) string {
|
||||
return strconv.FormatInt(input_num, 10)
|
||||
}
|
||||
|
||||
func UIntToString(input_num uint64) string {
|
||||
return strconv.FormatUint(input_num, 10)
|
||||
}
|
||||
|
||||
func FloatToString(input_num float64) string {
|
||||
return strconv.FormatFloat(input_num, 'f', 6, 64)
|
||||
}
|
||||
|
||||
func (o *OpenTSDB) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (o *OpenTSDB) Description() string {
|
||||
return "Configuration for OpenTSDB server to send metrics to"
|
||||
}
|
||||
|
||||
func (o *OpenTSDB) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("opentsdb", func() outputs.Output {
|
||||
return &OpenTSDB{}
|
||||
})
|
||||
}
|
||||
71
plugins/outputs/opentsdb/opentsdb_test.go
Normal file
71
plugins/outputs/opentsdb/opentsdb_test.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package opentsdb
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBuildTagsTelnet(t *testing.T) {
|
||||
var tagtests = []struct {
|
||||
ptIn map[string]string
|
||||
outTags []string
|
||||
}{
|
||||
{
|
||||
map[string]string{"one": "two", "three": "four"},
|
||||
[]string{"one=two", "three=four"},
|
||||
},
|
||||
{
|
||||
map[string]string{"aaa": "bbb"},
|
||||
[]string{"aaa=bbb"},
|
||||
},
|
||||
{
|
||||
map[string]string{"one": "two", "aaa": "bbb"},
|
||||
[]string{"aaa=bbb", "one=two"},
|
||||
},
|
||||
{
|
||||
map[string]string{},
|
||||
[]string{},
|
||||
},
|
||||
}
|
||||
for _, tt := range tagtests {
|
||||
tags := buildTags(tt.ptIn)
|
||||
if !reflect.DeepEqual(tags, tt.outTags) {
|
||||
t.Errorf("\nexpected %+v\ngot %+v\n", tt.outTags, tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWrite(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
o := &OpenTSDB{
|
||||
Host: testutil.GetLocalHost(),
|
||||
Port: 4242,
|
||||
Prefix: "prefix.test.",
|
||||
}
|
||||
|
||||
// Verify that we can connect to the OpenTSDB instance
|
||||
err := o.Connect()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that we can successfully write data to OpenTSDB
|
||||
err = o.Write(testutil.MockBatchPoints().Points())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify postive and negative test cases of writing data
|
||||
bp := testutil.MockBatchPoints()
|
||||
bp.AddPoint(testutil.TestPoint(float64(1.0), "justametric.float"))
|
||||
bp.AddPoint(testutil.TestPoint(int64(123456789), "justametric.int"))
|
||||
bp.AddPoint(testutil.TestPoint(uint64(123456789012345), "justametric.uint"))
|
||||
bp.AddPoint(testutil.TestPoint("Lorem Ipsum", "justametric.string"))
|
||||
bp.AddPoint(testutil.TestPoint(float64(42.0), "justametric.anotherfloat"))
|
||||
|
||||
err = o.Write(bp.Points())
|
||||
require.NoError(t, err)
|
||||
|
||||
}
|
||||
6
plugins/outputs/prometheus_client/README.md
Normal file
6
plugins/outputs/prometheus_client/README.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# Prometheus Client Service Output Plugin
|
||||
|
||||
This plugin starts a Prometheus Client, listening on a port defined in the
|
||||
configuration file.
|
||||
|
||||
It exposes all metrics on `/metrics` to be polled by a Prometheus server.
|
||||
125
plugins/outputs/prometheus_client/prometheus_client.go
Normal file
125
plugins/outputs/prometheus_client/prometheus_client.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package prometheus_client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/plugins/outputs"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type PrometheusClient struct {
|
||||
Listen string
|
||||
metrics map[string]*prometheus.UntypedVec
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# Address to listen on
|
||||
# listen = ":9126"
|
||||
`
|
||||
|
||||
func (p *PrometheusClient) Start() error {
|
||||
if p.Listen == "" {
|
||||
p.Listen = "localhost:9126"
|
||||
}
|
||||
|
||||
http.Handle("/metrics", prometheus.Handler())
|
||||
server := &http.Server{
|
||||
Addr: p.Listen,
|
||||
}
|
||||
|
||||
p.metrics = make(map[string]*prometheus.UntypedVec)
|
||||
go server.ListenAndServe()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PrometheusClient) Stop() {
|
||||
// TODO: Use a listener for http.Server that counts active connections
|
||||
// that can be stopped and closed gracefully
|
||||
}
|
||||
|
||||
func (p *PrometheusClient) Connect() error {
|
||||
// This service output does not need to make any further connections
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PrometheusClient) Close() error {
|
||||
// This service output does not need to close any of its connections
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PrometheusClient) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (p *PrometheusClient) Description() string {
|
||||
return "Configuration for the Prometheus client to spawn"
|
||||
}
|
||||
|
||||
func (p *PrometheusClient) Write(points []*client.Point) error {
|
||||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, point := range points {
|
||||
var labels []string
|
||||
key := point.Name()
|
||||
|
||||
for k, _ := range point.Tags() {
|
||||
if len(k) > 0 {
|
||||
labels = append(labels, k)
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := p.metrics[key]; !ok {
|
||||
p.metrics[key] = prometheus.NewUntypedVec(
|
||||
prometheus.UntypedOpts{
|
||||
Name: key,
|
||||
Help: fmt.Sprintf("Telegraf collected point '%s'", key),
|
||||
},
|
||||
labels,
|
||||
)
|
||||
prometheus.MustRegister(p.metrics[key])
|
||||
}
|
||||
|
||||
l := prometheus.Labels{}
|
||||
for tk, tv := range point.Tags() {
|
||||
l[tk] = tv
|
||||
}
|
||||
|
||||
for _, val := range point.Fields() {
|
||||
switch val := val.(type) {
|
||||
default:
|
||||
log.Printf("Prometheus output, unsupported type. key: %s, type: %T\n",
|
||||
key, val)
|
||||
case int64:
|
||||
m, err := p.metrics[key].GetMetricWith(l)
|
||||
if err != nil {
|
||||
log.Printf("ERROR Getting metric in Prometheus output, "+
|
||||
"key: %s, labels: %v,\nerr: %s\n",
|
||||
key, l, err.Error())
|
||||
continue
|
||||
}
|
||||
m.Set(float64(val))
|
||||
case float64:
|
||||
m, err := p.metrics[key].GetMetricWith(l)
|
||||
if err != nil {
|
||||
log.Printf("ERROR Getting metric in Prometheus output, "+
|
||||
"key: %s, labels: %v,\nerr: %s\n",
|
||||
key, l, err.Error())
|
||||
continue
|
||||
}
|
||||
m.Set(val)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("prometheus_client", func() outputs.Output {
|
||||
return &PrometheusClient{}
|
||||
})
|
||||
}
|
||||
100
plugins/outputs/prometheus_client/prometheus_client_test.go
Normal file
100
plugins/outputs/prometheus_client/prometheus_client_test.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package prometheus_client
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/plugins/inputs/prometheus"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
)
|
||||
|
||||
var pTesting *PrometheusClient
|
||||
|
||||
func TestPrometheusWritePointEmptyTag(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p := &prometheus.Prometheus{
|
||||
Urls: []string{"http://localhost:9126/metrics"},
|
||||
}
|
||||
tags := make(map[string]string)
|
||||
pt1, _ := client.NewPoint(
|
||||
"test_point_1",
|
||||
tags,
|
||||
map[string]interface{}{"value": 0.0})
|
||||
pt2, _ := client.NewPoint(
|
||||
"test_point_2",
|
||||
tags,
|
||||
map[string]interface{}{"value": 1.0})
|
||||
var points = []*client.Point{
|
||||
pt1,
|
||||
pt2,
|
||||
}
|
||||
require.NoError(t, pTesting.Write(points))
|
||||
|
||||
expected := []struct {
|
||||
name string
|
||||
value float64
|
||||
tags map[string]string
|
||||
}{
|
||||
{"test_point_1", 0.0, tags},
|
||||
{"test_point_2", 1.0, tags},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
require.NoError(t, p.Gather(&acc))
|
||||
for _, e := range expected {
|
||||
acc.AssertContainsFields(t, "prometheus_"+e.name,
|
||||
map[string]interface{}{"value": e.value})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrometheusWritePointTag(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p := &prometheus.Prometheus{
|
||||
Urls: []string{"http://localhost:9126/metrics"},
|
||||
}
|
||||
tags := make(map[string]string)
|
||||
tags["testtag"] = "testvalue"
|
||||
pt1, _ := client.NewPoint(
|
||||
"test_point_3",
|
||||
tags,
|
||||
map[string]interface{}{"value": 0.0})
|
||||
pt2, _ := client.NewPoint(
|
||||
"test_point_4",
|
||||
tags,
|
||||
map[string]interface{}{"value": 1.0})
|
||||
var points = []*client.Point{
|
||||
pt1,
|
||||
pt2,
|
||||
}
|
||||
require.NoError(t, pTesting.Write(points))
|
||||
|
||||
expected := []struct {
|
||||
name string
|
||||
value float64
|
||||
}{
|
||||
{"test_point_3", 0.0},
|
||||
{"test_point_4", 1.0},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
require.NoError(t, p.Gather(&acc))
|
||||
for _, e := range expected {
|
||||
acc.AssertContainsFields(t, "prometheus_"+e.name,
|
||||
map[string]interface{}{"value": e.value})
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
pTesting = &PrometheusClient{Listen: "localhost:9126"}
|
||||
pTesting.Start()
|
||||
}
|
||||
43
plugins/outputs/registry.go
Normal file
43
plugins/outputs/registry.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package outputs
|
||||
|
||||
import (
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
)
|
||||
|
||||
type Output interface {
|
||||
// Connect to the Output
|
||||
Connect() error
|
||||
// Close any connections to the Output
|
||||
Close() error
|
||||
// Description returns a one-sentence description on the Output
|
||||
Description() string
|
||||
// SampleConfig returns the default configuration of the Output
|
||||
SampleConfig() string
|
||||
// Write takes in group of points to be written to the Output
|
||||
Write(points []*client.Point) error
|
||||
}
|
||||
|
||||
type ServiceOutput interface {
|
||||
// Connect to the Output
|
||||
Connect() error
|
||||
// Close any connections to the Output
|
||||
Close() error
|
||||
// Description returns a one-sentence description on the Output
|
||||
Description() string
|
||||
// SampleConfig returns the default configuration of the Output
|
||||
SampleConfig() string
|
||||
// Write takes in group of points to be written to the Output
|
||||
Write(points []*client.Point) error
|
||||
// Start the "service" that will provide an Output
|
||||
Start() error
|
||||
// Stop the "service" that will provide an Output
|
||||
Stop()
|
||||
}
|
||||
|
||||
type Creator func() Output
|
||||
|
||||
var Outputs = map[string]Creator{}
|
||||
|
||||
func Add(name string, creator Creator) {
|
||||
Outputs[name] = creator
|
||||
}
|
||||
101
plugins/outputs/riemann/riemann.go
Normal file
101
plugins/outputs/riemann/riemann.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package riemann
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/amir/raidman"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
type Riemann struct {
|
||||
URL string
|
||||
Transport string
|
||||
|
||||
client *raidman.Client
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# URL of server
|
||||
url = "localhost:5555"
|
||||
# transport protocol to use either tcp or udp
|
||||
transport = "tcp"
|
||||
`
|
||||
|
||||
func (r *Riemann) Connect() error {
|
||||
c, err := raidman.Dial(r.Transport, r.URL)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.client = c
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Riemann) Close() error {
|
||||
r.client.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Riemann) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (r *Riemann) Description() string {
|
||||
return "Configuration for the Riemann server to send metrics to"
|
||||
}
|
||||
|
||||
func (r *Riemann) Write(points []*client.Point) error {
|
||||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var events []*raidman.Event
|
||||
for _, p := range points {
|
||||
evs := buildEvents(p)
|
||||
for _, ev := range evs {
|
||||
events = append(events, ev)
|
||||
}
|
||||
}
|
||||
|
||||
var senderr = r.client.SendMulti(events)
|
||||
if senderr != nil {
|
||||
return errors.New(fmt.Sprintf("FAILED to send riemann message: %s\n",
|
||||
senderr))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildEvents(p *client.Point) []*raidman.Event {
|
||||
events := []*raidman.Event{}
|
||||
for fieldName, value := range p.Fields() {
|
||||
host, ok := p.Tags()["host"]
|
||||
if !ok {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
host = "unknown"
|
||||
} else {
|
||||
host = hostname
|
||||
}
|
||||
}
|
||||
|
||||
event := &raidman.Event{
|
||||
Host: host,
|
||||
Service: p.Name() + "_" + fieldName,
|
||||
Metric: value,
|
||||
}
|
||||
events = append(events, event)
|
||||
}
|
||||
|
||||
return events
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("riemann", func() outputs.Output {
|
||||
return &Riemann{}
|
||||
})
|
||||
}
|
||||
27
plugins/outputs/riemann/riemann_test.go
Normal file
27
plugins/outputs/riemann/riemann_test.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package riemann
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConnectAndWrite(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
url := testutil.GetLocalHost() + ":5555"
|
||||
|
||||
r := &Riemann{
|
||||
URL: url,
|
||||
Transport: "tcp",
|
||||
}
|
||||
|
||||
err := r.Connect()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = r.Write(testutil.MockBatchPoints().Points())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
Reference in New Issue
Block a user