2016-07-28 11:31:11 +00:00
|
|
|
package models
|
2016-02-17 23:46:53 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"sync"
|
|
|
|
"testing"
|
|
|
|
|
|
|
|
"github.com/influxdata/telegraf"
|
|
|
|
"github.com/influxdata/telegraf/testutil"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
)
|
|
|
|
|
|
|
|
var first5 = []telegraf.Metric{
|
|
|
|
testutil.TestMetric(101, "metric1"),
|
|
|
|
testutil.TestMetric(101, "metric2"),
|
|
|
|
testutil.TestMetric(101, "metric3"),
|
|
|
|
testutil.TestMetric(101, "metric4"),
|
|
|
|
testutil.TestMetric(101, "metric5"),
|
|
|
|
}
|
|
|
|
|
|
|
|
var next5 = []telegraf.Metric{
|
|
|
|
testutil.TestMetric(101, "metric6"),
|
|
|
|
testutil.TestMetric(101, "metric7"),
|
|
|
|
testutil.TestMetric(101, "metric8"),
|
|
|
|
testutil.TestMetric(101, "metric9"),
|
|
|
|
testutil.TestMetric(101, "metric10"),
|
|
|
|
}
|
|
|
|
|
2019-01-15 19:48:52 +00:00
|
|
|
func reverse(metrics []telegraf.Metric) []telegraf.Metric {
|
|
|
|
result := make([]telegraf.Metric, 0, len(metrics))
|
|
|
|
for i := len(metrics) - 1; i >= 0; i-- {
|
|
|
|
result = append(result, metrics[i])
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2016-04-25 23:49:06 +00:00
|
|
|
// Benchmark adding metrics.
|
|
|
|
func BenchmarkRunningOutputAddWrite(b *testing.B) {
|
|
|
|
conf := &OutputConfig{
|
2016-09-05 15:16:37 +00:00
|
|
|
Filter: Filter{},
|
2016-04-25 23:49:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
m := &perfOutput{}
|
|
|
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
|
|
|
|
|
|
|
for n := 0; n < b.N; n++ {
|
2016-11-28 18:19:35 +00:00
|
|
|
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
2016-04-25 23:49:06 +00:00
|
|
|
ro.Write()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Benchmark adding metrics.
|
|
|
|
func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) {
|
|
|
|
conf := &OutputConfig{
|
2016-09-05 15:16:37 +00:00
|
|
|
Filter: Filter{},
|
2016-04-25 23:49:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
m := &perfOutput{}
|
|
|
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
|
|
|
|
|
|
|
for n := 0; n < b.N; n++ {
|
2016-11-28 18:19:35 +00:00
|
|
|
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
2016-04-25 23:49:06 +00:00
|
|
|
if n%100 == 0 {
|
|
|
|
ro.Write()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Benchmark adding metrics.
|
|
|
|
func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
|
|
|
|
conf := &OutputConfig{
|
2016-09-05 15:16:37 +00:00
|
|
|
Filter: Filter{},
|
2016-04-25 23:49:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
m := &perfOutput{}
|
|
|
|
m.failWrite = true
|
|
|
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
|
|
|
|
|
|
|
for n := 0; n < b.N; n++ {
|
2016-11-28 18:19:35 +00:00
|
|
|
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
2016-04-25 23:49:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-12 23:06:27 +00:00
|
|
|
// Test that NameDrop filters ger properly applied.
|
|
|
|
func TestRunningOutput_DropFilter(t *testing.T) {
|
|
|
|
conf := &OutputConfig{
|
|
|
|
Filter: Filter{
|
|
|
|
NameDrop: []string{"metric1", "metric2"},
|
|
|
|
},
|
|
|
|
}
|
2016-09-05 15:16:37 +00:00
|
|
|
assert.NoError(t, conf.Filter.Compile())
|
2016-04-12 23:06:27 +00:00
|
|
|
|
|
|
|
m := &mockOutput{}
|
2016-04-25 23:49:06 +00:00
|
|
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
2016-04-12 23:06:27 +00:00
|
|
|
|
|
|
|
for _, metric := range first5 {
|
|
|
|
ro.AddMetric(metric)
|
|
|
|
}
|
|
|
|
for _, metric := range next5 {
|
|
|
|
ro.AddMetric(metric)
|
|
|
|
}
|
|
|
|
assert.Len(t, m.Metrics(), 0)
|
|
|
|
|
|
|
|
err := ro.Write()
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.Len(t, m.Metrics(), 8)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that NameDrop filters without a match do nothing.
|
|
|
|
func TestRunningOutput_PassFilter(t *testing.T) {
|
|
|
|
conf := &OutputConfig{
|
|
|
|
Filter: Filter{
|
|
|
|
NameDrop: []string{"metric1000", "foo*"},
|
|
|
|
},
|
|
|
|
}
|
2016-09-05 15:16:37 +00:00
|
|
|
assert.NoError(t, conf.Filter.Compile())
|
2016-04-12 23:06:27 +00:00
|
|
|
|
|
|
|
m := &mockOutput{}
|
2016-04-25 23:49:06 +00:00
|
|
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
2016-04-12 23:06:27 +00:00
|
|
|
|
|
|
|
for _, metric := range first5 {
|
|
|
|
ro.AddMetric(metric)
|
|
|
|
}
|
|
|
|
for _, metric := range next5 {
|
|
|
|
ro.AddMetric(metric)
|
|
|
|
}
|
|
|
|
assert.Len(t, m.Metrics(), 0)
|
|
|
|
|
|
|
|
err := ro.Write()
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.Len(t, m.Metrics(), 10)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that tags are properly included
|
|
|
|
func TestRunningOutput_TagIncludeNoMatch(t *testing.T) {
|
|
|
|
conf := &OutputConfig{
|
|
|
|
Filter: Filter{
|
|
|
|
TagInclude: []string{"nothing*"},
|
|
|
|
},
|
|
|
|
}
|
2016-09-05 15:16:37 +00:00
|
|
|
assert.NoError(t, conf.Filter.Compile())
|
2016-04-12 23:06:27 +00:00
|
|
|
|
|
|
|
m := &mockOutput{}
|
2016-04-25 23:49:06 +00:00
|
|
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
2016-04-12 23:06:27 +00:00
|
|
|
|
2016-11-28 18:19:35 +00:00
|
|
|
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
2016-04-12 23:06:27 +00:00
|
|
|
assert.Len(t, m.Metrics(), 0)
|
|
|
|
|
|
|
|
err := ro.Write()
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.Len(t, m.Metrics(), 1)
|
|
|
|
assert.Empty(t, m.Metrics()[0].Tags())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that tags are properly excluded
|
|
|
|
func TestRunningOutput_TagExcludeMatch(t *testing.T) {
|
|
|
|
conf := &OutputConfig{
|
|
|
|
Filter: Filter{
|
|
|
|
TagExclude: []string{"tag*"},
|
|
|
|
},
|
|
|
|
}
|
2016-09-05 15:16:37 +00:00
|
|
|
assert.NoError(t, conf.Filter.Compile())
|
2016-04-12 23:06:27 +00:00
|
|
|
|
|
|
|
m := &mockOutput{}
|
2016-04-25 23:49:06 +00:00
|
|
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
2016-04-12 23:06:27 +00:00
|
|
|
|
2016-11-28 18:19:35 +00:00
|
|
|
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
2016-04-12 23:06:27 +00:00
|
|
|
assert.Len(t, m.Metrics(), 0)
|
|
|
|
|
|
|
|
err := ro.Write()
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.Len(t, m.Metrics(), 1)
|
|
|
|
assert.Len(t, m.Metrics()[0].Tags(), 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that tags are properly Excluded
|
|
|
|
func TestRunningOutput_TagExcludeNoMatch(t *testing.T) {
|
|
|
|
conf := &OutputConfig{
|
|
|
|
Filter: Filter{
|
|
|
|
TagExclude: []string{"nothing*"},
|
|
|
|
},
|
|
|
|
}
|
2016-09-05 15:16:37 +00:00
|
|
|
assert.NoError(t, conf.Filter.Compile())
|
2016-04-12 23:06:27 +00:00
|
|
|
|
|
|
|
m := &mockOutput{}
|
2016-04-25 23:49:06 +00:00
|
|
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
2016-04-12 23:06:27 +00:00
|
|
|
|
2016-11-28 18:19:35 +00:00
|
|
|
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
2016-04-12 23:06:27 +00:00
|
|
|
assert.Len(t, m.Metrics(), 0)
|
|
|
|
|
|
|
|
err := ro.Write()
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.Len(t, m.Metrics(), 1)
|
|
|
|
assert.Len(t, m.Metrics()[0].Tags(), 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that tags are properly included
|
|
|
|
func TestRunningOutput_TagIncludeMatch(t *testing.T) {
|
|
|
|
conf := &OutputConfig{
|
|
|
|
Filter: Filter{
|
|
|
|
TagInclude: []string{"tag*"},
|
|
|
|
},
|
|
|
|
}
|
2016-09-05 15:16:37 +00:00
|
|
|
assert.NoError(t, conf.Filter.Compile())
|
2016-04-12 23:06:27 +00:00
|
|
|
|
|
|
|
m := &mockOutput{}
|
2016-04-25 23:49:06 +00:00
|
|
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
2016-04-12 23:06:27 +00:00
|
|
|
|
2016-11-28 18:19:35 +00:00
|
|
|
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
2016-04-12 23:06:27 +00:00
|
|
|
assert.Len(t, m.Metrics(), 0)
|
|
|
|
|
|
|
|
err := ro.Write()
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.Len(t, m.Metrics(), 1)
|
|
|
|
assert.Len(t, m.Metrics()[0].Tags(), 1)
|
|
|
|
}
|
|
|
|
|
2016-02-17 23:46:53 +00:00
|
|
|
// Test that we can write metrics with simple default setup.
|
|
|
|
func TestRunningOutputDefault(t *testing.T) {
|
|
|
|
conf := &OutputConfig{
|
2016-09-05 15:16:37 +00:00
|
|
|
Filter: Filter{},
|
2016-02-17 23:46:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
m := &mockOutput{}
|
2016-04-25 23:49:06 +00:00
|
|
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
2016-02-17 23:46:53 +00:00
|
|
|
|
|
|
|
for _, metric := range first5 {
|
|
|
|
ro.AddMetric(metric)
|
|
|
|
}
|
|
|
|
for _, metric := range next5 {
|
|
|
|
ro.AddMetric(metric)
|
|
|
|
}
|
|
|
|
assert.Len(t, m.Metrics(), 0)
|
|
|
|
|
|
|
|
err := ro.Write()
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.Len(t, m.Metrics(), 10)
|
|
|
|
}
|
|
|
|
|
2016-04-25 23:49:06 +00:00
|
|
|
func TestRunningOutputWriteFail(t *testing.T) {
|
2016-02-17 23:46:53 +00:00
|
|
|
conf := &OutputConfig{
|
2016-09-05 15:16:37 +00:00
|
|
|
Filter: Filter{},
|
2016-02-17 23:46:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
m := &mockOutput{}
|
2016-04-25 23:49:06 +00:00
|
|
|
m.failWrite = true
|
|
|
|
ro := NewRunningOutput("test", m, conf, 4, 12)
|
2016-02-17 23:46:53 +00:00
|
|
|
|
2016-04-25 23:49:06 +00:00
|
|
|
// Fill buffer to limit twice
|
2016-02-17 23:46:53 +00:00
|
|
|
for _, metric := range first5 {
|
|
|
|
ro.AddMetric(metric)
|
|
|
|
}
|
|
|
|
for _, metric := range next5 {
|
|
|
|
ro.AddMetric(metric)
|
|
|
|
}
|
2016-04-25 23:49:06 +00:00
|
|
|
// no successful flush yet
|
|
|
|
assert.Len(t, m.Metrics(), 0)
|
2016-02-17 23:46:53 +00:00
|
|
|
|
2016-04-25 23:49:06 +00:00
|
|
|
// manual write fails
|
2016-02-17 23:46:53 +00:00
|
|
|
err := ro.Write()
|
2016-04-25 23:49:06 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
// no successful flush yet
|
|
|
|
assert.Len(t, m.Metrics(), 0)
|
2016-02-17 23:46:53 +00:00
|
|
|
|
2016-04-25 23:49:06 +00:00
|
|
|
m.failWrite = false
|
|
|
|
err = ro.Write()
|
|
|
|
require.NoError(t, err)
|
2016-02-17 23:46:53 +00:00
|
|
|
|
2016-04-25 23:49:06 +00:00
|
|
|
assert.Len(t, m.Metrics(), 10)
|
2016-02-17 23:46:53 +00:00
|
|
|
}
|
|
|
|
|
2016-04-25 23:49:06 +00:00
|
|
|
// Verify that the order of points is preserved during a write failure.
|
|
|
|
func TestRunningOutputWriteFailOrder(t *testing.T) {
|
2016-02-17 23:46:53 +00:00
|
|
|
conf := &OutputConfig{
|
2016-09-05 15:16:37 +00:00
|
|
|
Filter: Filter{},
|
2016-02-17 23:46:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
m := &mockOutput{}
|
2016-04-25 23:49:06 +00:00
|
|
|
m.failWrite = true
|
|
|
|
ro := NewRunningOutput("test", m, conf, 100, 1000)
|
2016-02-17 23:46:53 +00:00
|
|
|
|
2016-04-25 23:49:06 +00:00
|
|
|
// add 5 metrics
|
2016-02-17 23:46:53 +00:00
|
|
|
for _, metric := range first5 {
|
|
|
|
ro.AddMetric(metric)
|
|
|
|
}
|
2016-04-25 23:49:06 +00:00
|
|
|
// no successful flush yet
|
2016-02-17 23:46:53 +00:00
|
|
|
assert.Len(t, m.Metrics(), 0)
|
|
|
|
|
2016-04-25 23:49:06 +00:00
|
|
|
// Write fails
|
2016-02-17 23:46:53 +00:00
|
|
|
err := ro.Write()
|
2016-04-25 23:49:06 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
// no successful flush yet
|
|
|
|
assert.Len(t, m.Metrics(), 0)
|
|
|
|
|
|
|
|
m.failWrite = false
|
|
|
|
// add 5 more metrics
|
|
|
|
for _, metric := range next5 {
|
|
|
|
ro.AddMetric(metric)
|
|
|
|
}
|
|
|
|
err = ro.Write()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Verify that 10 metrics were written
|
|
|
|
assert.Len(t, m.Metrics(), 10)
|
|
|
|
// Verify that they are in order
|
2019-01-15 19:48:52 +00:00
|
|
|
expected := append(reverse(next5), reverse(first5)...)
|
2016-04-25 23:49:06 +00:00
|
|
|
assert.Equal(t, expected, m.Metrics())
|
2016-02-17 23:46:53 +00:00
|
|
|
}
|
|
|
|
|
2016-04-25 23:49:06 +00:00
|
|
|
// Verify that the order of points is preserved during many write failures.
|
|
|
|
func TestRunningOutputWriteFailOrder2(t *testing.T) {
|
2016-02-17 23:46:53 +00:00
|
|
|
conf := &OutputConfig{
|
2016-09-05 15:16:37 +00:00
|
|
|
Filter: Filter{},
|
2016-02-17 23:46:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
m := &mockOutput{}
|
2016-04-25 23:49:06 +00:00
|
|
|
m.failWrite = true
|
|
|
|
ro := NewRunningOutput("test", m, conf, 5, 100)
|
2016-02-17 23:46:53 +00:00
|
|
|
|
2016-04-25 23:49:06 +00:00
|
|
|
// add 5 metrics
|
2016-02-17 23:46:53 +00:00
|
|
|
for _, metric := range first5 {
|
|
|
|
ro.AddMetric(metric)
|
|
|
|
}
|
2016-04-25 23:49:06 +00:00
|
|
|
// Write fails
|
|
|
|
err := ro.Write()
|
|
|
|
require.Error(t, err)
|
|
|
|
// no successful flush yet
|
|
|
|
assert.Len(t, m.Metrics(), 0)
|
|
|
|
|
|
|
|
// add 5 metrics
|
2016-02-17 23:46:53 +00:00
|
|
|
for _, metric := range next5 {
|
|
|
|
ro.AddMetric(metric)
|
|
|
|
}
|
2016-04-25 23:49:06 +00:00
|
|
|
// Write fails
|
|
|
|
err = ro.Write()
|
|
|
|
require.Error(t, err)
|
|
|
|
// no successful flush yet
|
|
|
|
assert.Len(t, m.Metrics(), 0)
|
|
|
|
|
|
|
|
// add 5 metrics
|
|
|
|
for _, metric := range first5 {
|
|
|
|
ro.AddMetric(metric)
|
|
|
|
}
|
|
|
|
// Write fails
|
|
|
|
err = ro.Write()
|
|
|
|
require.Error(t, err)
|
|
|
|
// no successful flush yet
|
|
|
|
assert.Len(t, m.Metrics(), 0)
|
|
|
|
|
|
|
|
// add 5 metrics
|
|
|
|
for _, metric := range next5 {
|
|
|
|
ro.AddMetric(metric)
|
|
|
|
}
|
|
|
|
// Write fails
|
|
|
|
err = ro.Write()
|
|
|
|
require.Error(t, err)
|
|
|
|
// no successful flush yet
|
|
|
|
assert.Len(t, m.Metrics(), 0)
|
|
|
|
|
|
|
|
m.failWrite = false
|
|
|
|
err = ro.Write()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-01-15 19:48:52 +00:00
|
|
|
// Verify that 20 metrics were written
|
2016-04-25 23:49:06 +00:00
|
|
|
assert.Len(t, m.Metrics(), 20)
|
|
|
|
// Verify that they are in order
|
2019-01-15 19:48:52 +00:00
|
|
|
expected := append(reverse(next5), reverse(first5)...)
|
|
|
|
expected = append(expected, reverse(next5)...)
|
|
|
|
expected = append(expected, reverse(first5)...)
|
2016-04-25 23:49:06 +00:00
|
|
|
assert.Equal(t, expected, m.Metrics())
|
2016-02-17 23:46:53 +00:00
|
|
|
}
|
|
|
|
|
2016-04-25 23:49:06 +00:00
|
|
|
// Verify that the order of points is preserved when there is a remainder
|
|
|
|
// of points for the batch.
|
|
|
|
func TestRunningOutputWriteFailOrder3(t *testing.T) {
|
2016-02-17 23:46:53 +00:00
|
|
|
conf := &OutputConfig{
|
2016-09-05 15:16:37 +00:00
|
|
|
Filter: Filter{},
|
2016-02-17 23:46:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
m := &mockOutput{}
|
|
|
|
m.failWrite = true
|
2016-04-25 23:49:06 +00:00
|
|
|
ro := NewRunningOutput("test", m, conf, 5, 1000)
|
2016-02-17 23:46:53 +00:00
|
|
|
|
2016-04-25 23:49:06 +00:00
|
|
|
// add 5 metrics
|
2016-02-17 23:46:53 +00:00
|
|
|
for _, metric := range first5 {
|
|
|
|
ro.AddMetric(metric)
|
|
|
|
}
|
|
|
|
// no successful flush yet
|
|
|
|
assert.Len(t, m.Metrics(), 0)
|
|
|
|
|
2016-04-25 23:49:06 +00:00
|
|
|
// Write fails
|
2016-02-17 23:46:53 +00:00
|
|
|
err := ro.Write()
|
|
|
|
require.Error(t, err)
|
|
|
|
// no successful flush yet
|
|
|
|
assert.Len(t, m.Metrics(), 0)
|
|
|
|
|
2016-04-25 23:49:06 +00:00
|
|
|
// add and attempt to write a single metric:
|
|
|
|
ro.AddMetric(next5[0])
|
|
|
|
err = ro.Write()
|
|
|
|
require.Error(t, err)
|
|
|
|
|
|
|
|
// unset fail and write metrics
|
2016-02-17 23:46:53 +00:00
|
|
|
m.failWrite = false
|
|
|
|
err = ro.Write()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2016-04-25 23:49:06 +00:00
|
|
|
// Verify that 6 metrics were written
|
|
|
|
assert.Len(t, m.Metrics(), 6)
|
|
|
|
// Verify that they are in order
|
2019-01-15 19:48:52 +00:00
|
|
|
expected := []telegraf.Metric{next5[0], first5[4], first5[3], first5[2], first5[1], first5[0]}
|
2016-04-25 23:49:06 +00:00
|
|
|
assert.Equal(t, expected, m.Metrics())
|
2016-02-17 23:46:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type mockOutput struct {
|
|
|
|
sync.Mutex
|
|
|
|
|
|
|
|
metrics []telegraf.Metric
|
|
|
|
|
|
|
|
// if true, mock a write failure
|
|
|
|
failWrite bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockOutput) Connect() error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockOutput) Close() error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockOutput) Description() string {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockOutput) SampleConfig() string {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockOutput) Write(metrics []telegraf.Metric) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
if m.failWrite {
|
|
|
|
return fmt.Errorf("Failed Write!")
|
|
|
|
}
|
|
|
|
|
|
|
|
if m.metrics == nil {
|
|
|
|
m.metrics = []telegraf.Metric{}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, metric := range metrics {
|
|
|
|
m.metrics = append(m.metrics, metric)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockOutput) Metrics() []telegraf.Metric {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.metrics
|
|
|
|
}
|
2016-04-25 23:49:06 +00:00
|
|
|
|
|
|
|
type perfOutput struct {
|
|
|
|
// if true, mock a write failure
|
|
|
|
failWrite bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *perfOutput) Connect() error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *perfOutput) Close() error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *perfOutput) Description() string {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *perfOutput) SampleConfig() string {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *perfOutput) Write(metrics []telegraf.Metric) error {
|
|
|
|
if m.failWrite {
|
|
|
|
return fmt.Errorf("Failed Write!")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|