Refactor handling of MinMax functionality into RunningAggregator

allows for easier addition of a sliding window at a later time.

Also makes `period` be a generic argument for all aggregator plugins.
This commit is contained in:
Cameron Sparr
2016-09-22 18:10:51 +01:00
parent ef885eda62
commit fead80844e
7 changed files with 252 additions and 297 deletions

View File

@@ -1,28 +1,21 @@
package minmax
import (
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/aggregators"
)
type MinMax struct {
Period internal.Duration
// metrics waiting to be processed
metrics chan telegraf.Metric
shutdown chan struct{}
wg sync.WaitGroup
// caches for metric fields, names, and tags
fieldCache map[uint64]map[string]minmax
nameCache map[uint64]string
tagCache map[uint64]map[string]string
}
acc telegraf.Accumulator
func NewMinMax() telegraf.Aggregator {
mm := &MinMax{}
mm.Reset()
return mm
}
type minmax struct {
@@ -43,11 +36,7 @@ func (m *MinMax) Description() string {
return "Keep the aggregate min/max of each metric passing through."
}
func (m *MinMax) Apply(in telegraf.Metric) {
m.metrics <- in
}
func (m *MinMax) apply(in telegraf.Metric) {
func (m *MinMax) Add(in telegraf.Metric) {
id := in.HashID()
if _, ok := m.nameCache[id]; !ok {
// hit an uncached metric, create caches for first time:
@@ -90,84 +79,23 @@ func (m *MinMax) apply(in telegraf.Metric) {
}
}
func (m *MinMax) Start(acc telegraf.Accumulator) error {
m.metrics = make(chan telegraf.Metric, 10)
m.shutdown = make(chan struct{})
m.clearCache()
m.acc = acc
m.wg.Add(1)
if m.Period.Duration > 0 {
go m.periodHandler()
} else {
go m.continuousHandler()
func (m *MinMax) Push(acc telegraf.Accumulator) {
for id, _ := range m.nameCache {
fields := map[string]interface{}{}
for k, v := range m.fieldCache[id] {
fields[k+"_min"] = v.min
fields[k+"_max"] = v.max
}
acc.AddFields(m.nameCache[id], fields, m.tagCache[id])
}
return nil
}
func (m *MinMax) Stop() {
close(m.shutdown)
m.wg.Wait()
}
func (m *MinMax) addfields(id uint64) {
fields := map[string]interface{}{}
for k, v := range m.fieldCache[id] {
fields[k+"_min"] = v.min
fields[k+"_max"] = v.max
}
m.acc.AddFields(m.nameCache[id], fields, m.tagCache[id])
}
func (m *MinMax) clearCache() {
func (m *MinMax) Reset() {
m.fieldCache = make(map[uint64]map[string]minmax)
m.nameCache = make(map[uint64]string)
m.tagCache = make(map[uint64]map[string]string)
}
// periodHandler only adds the aggregate metrics on the configured Period.
// thus if telegraf's collection interval is 10s, and period is 30s, there
// will only be one aggregate sent every 3 metrics.
func (m *MinMax) periodHandler() {
// TODO make this sleep less of a hack!
time.Sleep(time.Millisecond * 200)
defer m.wg.Done()
ticker := time.NewTicker(m.Period.Duration)
defer ticker.Stop()
for {
select {
case in := <-m.metrics:
m.apply(in)
case <-m.shutdown:
if len(m.metrics) > 0 {
continue
}
return
case <-ticker.C:
for id, _ := range m.nameCache {
m.addfields(id)
}
m.clearCache()
}
}
}
// continuousHandler sends one metric for every metric that passes through it.
func (m *MinMax) continuousHandler() {
defer m.wg.Done()
for {
select {
case in := <-m.metrics:
m.apply(in)
m.addfields(in.HashID())
case <-m.shutdown:
if len(m.metrics) > 0 {
continue
}
return
}
}
}
func compare(a, b float64) int {
if a < b {
return -1
@@ -190,6 +118,6 @@ func convert(in interface{}) (float64, bool) {
func init() {
aggregators.Add("minmax", func() telegraf.Aggregator {
return &MinMax{}
return NewMinMax()
})
}

View File

@@ -5,10 +5,7 @@ import (
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
var m1, _ = telegraf.NewMetric("m1",
@@ -48,34 +45,22 @@ var m2, _ = telegraf.NewMetric("m1",
)
func BenchmarkApply(b *testing.B) {
minmax := MinMax{}
minmax.clearCache()
minmax := NewMinMax()
for n := 0; n < b.N; n++ {
minmax.apply(m1)
minmax.apply(m2)
minmax.Add(m1)
minmax.Add(m2)
}
}
// Test two metrics getting added, when running with a period, and the metrics
// are added in the same period.
// Test two metrics getting added.
func TestMinMaxWithPeriod(t *testing.T) {
acc := testutil.Accumulator{}
minmax := MinMax{
Period: internal.Duration{Duration: time.Millisecond * 500},
}
assert.NoError(t, minmax.Start(&acc))
defer minmax.Stop()
minmax := NewMinMax()
minmax.Apply(m1)
minmax.Apply(m2)
for {
if acc.NMetrics() > 0 {
break
}
time.Sleep(time.Millisecond)
}
minmax.Add(m1)
minmax.Add(m2)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_max": float64(1),
@@ -107,23 +92,14 @@ func TestMinMaxWithPeriod(t *testing.T) {
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test two metrics getting added, when running with a period, and the metrics
// are added in two different periods.
// Test two metrics getting added with a push/reset in between (simulates
// getting added in different periods.)
func TestMinMaxDifferentPeriods(t *testing.T) {
acc := testutil.Accumulator{}
minmax := MinMax{
Period: internal.Duration{Duration: time.Millisecond * 100},
}
assert.NoError(t, minmax.Start(&acc))
defer minmax.Stop()
minmax := NewMinMax()
minmax.Apply(m1)
for {
if acc.NMetrics() > 0 {
break
}
time.Sleep(time.Millisecond)
}
minmax.Add(m1)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_max": float64(1),
"a_min": float64(1),
@@ -152,13 +128,9 @@ func TestMinMaxDifferentPeriods(t *testing.T) {
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
acc.ClearMetrics()
minmax.Apply(m2)
for {
if acc.NMetrics() > 0 {
break
}
time.Sleep(time.Millisecond)
}
minmax.Reset()
minmax.Add(m2)
minmax.Push(&acc)
expectedFields = map[string]interface{}{
"a_max": float64(1),
"a_min": float64(1),
@@ -188,82 +160,3 @@ func TestMinMaxDifferentPeriods(t *testing.T) {
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test two metrics getting added, when running without a period.
func TestMinMaxWithoutPeriod(t *testing.T) {
acc := testutil.Accumulator{}
minmax := MinMax{}
assert.NoError(t, minmax.Start(&acc))
defer minmax.Stop()
minmax.Apply(m1)
for {
if acc.NMetrics() > 0 {
break
}
time.Sleep(time.Millisecond)
}
expectedFields := map[string]interface{}{
"a_max": float64(1),
"a_min": float64(1),
"b_max": float64(1),
"b_min": float64(1),
"c_max": float64(1),
"c_min": float64(1),
"d_max": float64(1),
"d_min": float64(1),
"e_max": float64(1),
"e_min": float64(1),
"f_max": float64(2),
"f_min": float64(2),
"g_max": float64(2),
"g_min": float64(2),
"h_max": float64(2),
"h_min": float64(2),
"i_max": float64(2),
"i_min": float64(2),
"j_max": float64(3),
"j_min": float64(3),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
acc.ClearMetrics()
minmax.Apply(m2)
for {
if acc.NMetrics() > 0 {
break
}
time.Sleep(time.Millisecond)
}
expectedFields = map[string]interface{}{
"a_max": float64(1),
"a_min": float64(1),
"b_max": float64(3),
"b_min": float64(1),
"c_max": float64(3),
"c_min": float64(1),
"d_max": float64(3),
"d_min": float64(1),
"e_max": float64(3),
"e_min": float64(1),
"f_max": float64(2),
"f_min": float64(1),
"g_max": float64(2),
"g_min": float64(1),
"h_max": float64(2),
"h_min": float64(1),
"i_max": float64(2),
"i_min": float64(1),
"j_max": float64(3),
"j_min": float64(1),
"k_max": float64(200),
"k_min": float64(200),
}
expectedTags = map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}