Utilizing new client and overhauling Accumulator interface

Fixes #280
Fixes #281
Fixes #289
This commit is contained in:
Cameron Sparr
2015-10-16 16:13:32 -06:00
parent 6263bc2d1b
commit c26ce9c4fe
27 changed files with 498 additions and 550 deletions

View File

@@ -3,11 +3,13 @@ package exec
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"github.com/gonuts/go-shellquote"
"github.com/influxdb/telegraf/plugins"
"math"
"os/exec"
"strings"
"sync"
"time"
)
@@ -88,19 +90,32 @@ func (e *Exec) Description() string {
func (e *Exec) Gather(acc plugins.Accumulator) error {
var wg sync.WaitGroup
var outerr error
errorChannel := make(chan error, len(e.Commands))
for _, c := range e.Commands {
wg.Add(1)
go func(c *Command, acc plugins.Accumulator) {
defer wg.Done()
outerr = e.gatherCommand(c, acc)
err := e.gatherCommand(c, acc)
if err != nil {
errorChannel <- err
}
}(c, acc)
}
wg.Wait()
close(errorChannel)
return outerr
// Get all errors and return them as one giant error
errorStrings := []string{}
for err := range errorChannel {
errorStrings = append(errorStrings, err.Error())
}
if len(errorStrings) == 0 {
return nil
}
return errors.New(strings.Join(errorStrings, "\n"))
}
func (e *Exec) gatherCommand(c *Command, acc plugins.Accumulator) error {

View File

@@ -93,7 +93,7 @@ func emitMetrics(k *Kafka, acc plugins.Accumulator, metricConsumer <-chan []byte
}
for _, point := range points {
acc.AddFieldsWithTime(point.Name(), point.Fields(), point.Tags(), point.Time())
acc.AddFields(point.Name(), point.Fields(), point.Tags(), point.Time())
}
case <-timeout:
return nil

View File

@@ -50,5 +50,5 @@ func TestReadsMetricsFromKafka(t *testing.T) {
"direction": "in",
"region": "us-west",
}, point.Tags)
assert.Equal(t, time.Unix(0, 1422568543702900257), point.Time)
assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix())
}

View File

@@ -89,7 +89,7 @@ func (d *MongodbData) addStat(acc plugins.Accumulator, statLine reflect.Value, s
}
func (d *MongodbData) add(acc plugins.Accumulator, key string, val interface{}) {
acc.AddFieldsWithTime(
acc.AddFields(
key,
map[string]interface{}{
"value": val,

View File

@@ -6,17 +6,15 @@ type Accumulator interface {
// Create a point with a value, decorating it with tags
// NOTE: tags is expected to be owned by the caller, don't mutate
// it after passing to Add.
Add(measurement string, value interface{}, tags map[string]string)
// Create a point with a set of values, decorating it with tags
// NOTE: tags and values are expected to be owned by the caller, don't mutate
// them after passing to AddFieldsWithTime.
AddFieldsWithTime(
measurement string,
values map[string]interface{},
Add(measurement string,
value interface{},
tags map[string]string,
timestamp time.Time,
)
t ...time.Time)
AddFields(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
}
type Plugin interface {

View File

@@ -22,19 +22,20 @@ func TestZookeeperGeneratesMetrics(t *testing.T) {
err := z.Gather(&acc)
require.NoError(t, err)
intMetrics := []string{"zookeeper_avg_latency",
"zookeeper_max_latency",
"zookeeper_min_latency",
"zookeeper_packets_received",
"zookeeper_packets_sent",
"zookeeper_outstanding_requests",
"zookeeper_znode_count",
"zookeeper_watch_count",
"zookeeper_ephemerals_count",
"zookeeper_approximate_data_size",
"zookeeper_pending_syncs",
"zookeeper_open_file_descriptor_count",
"zookeeper_max_file_descriptor_count"}
intMetrics := []string{
"avg_latency",
"max_latency",
"min_latency",
"packets_received",
"packets_sent",
"outstanding_requests",
"znode_count",
"watch_count",
"ephemerals_count",
"approximate_data_size",
"open_file_descriptor_count",
"max_file_descriptor_count",
}
for _, metric := range intMetrics {
assert.True(t, acc.HasIntValue(metric), metric)