use AddError everywhere (#2372)

This commit is contained in:
Patrick Hemmer 2017-04-24 14:13:26 -04:00 committed by Daniel Nelson
parent 801f6cb8a0
commit 06baf7cf78
95 changed files with 341 additions and 531 deletions

View File

@ -1,37 +0,0 @@
package errchan
import (
"fmt"
"strings"
)
type ErrChan struct {
C chan error
}
// New returns an error channel of max length 'n'
// errors can be sent to the ErrChan.C channel, and will be returned when
// ErrChan.Error() is called.
func New(n int) *ErrChan {
return &ErrChan{
C: make(chan error, n),
}
}
// Error closes the ErrChan.C channel and returns an error if there are any
// non-nil errors, otherwise returns nil.
func (e *ErrChan) Error() error {
close(e.C)
var out string
for err := range e.C {
if err != nil {
out += "[" + err.Error() + "], "
}
}
if out != "" {
return fmt.Errorf("Errors encountered: " + strings.TrimRight(out, ", "))
}
return nil
}

View File

@ -10,7 +10,6 @@ import (
"time" "time"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
as "github.com/aerospike/aerospike-client-go" as "github.com/aerospike/aerospike-client-go"
@ -41,17 +40,16 @@ func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
} }
var wg sync.WaitGroup var wg sync.WaitGroup
errChan := errchan.New(len(a.Servers))
wg.Add(len(a.Servers)) wg.Add(len(a.Servers))
for _, server := range a.Servers { for _, server := range a.Servers {
go func(serv string) { go func(serv string) {
defer wg.Done() defer wg.Done()
errChan.C <- a.gatherServer(serv, acc) acc.AddError(a.gatherServer(serv, acc))
}(server) }(server)
} }
wg.Wait() wg.Wait()
return errChan.Error() return nil
} }
func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) error { func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) error {

View File

@ -19,7 +19,7 @@ func TestAerospikeStatistics(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
err := a.Gather(&acc) err := acc.GatherError(a.Gather)
require.NoError(t, err) require.NoError(t, err)
assert.True(t, acc.HasMeasurement("aerospike_node")) assert.True(t, acc.HasMeasurement("aerospike_node"))
@ -41,8 +41,7 @@ func TestAerospikeStatisticsPartialErr(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
err := a.Gather(&acc) require.Error(t, acc.GatherError(a.Gather))
require.Error(t, err)
assert.True(t, acc.HasMeasurement("aerospike_node")) assert.True(t, acc.HasMeasurement("aerospike_node"))
assert.True(t, acc.HasMeasurement("aerospike_namespace")) assert.True(t, acc.HasMeasurement("aerospike_namespace"))

View File

@ -8,6 +8,7 @@ import (
"net/url" "net/url"
"strconv" "strconv"
"strings" "strings"
"sync"
"time" "time"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
@ -65,28 +66,23 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
n.ResponseTimeout.Duration = time.Second * 5 n.ResponseTimeout.Duration = time.Second * 5
} }
var outerr error var wg sync.WaitGroup
var errch = make(chan error) wg.Add(len(n.Urls))
for _, u := range n.Urls { for _, u := range n.Urls {
addr, err := url.Parse(u) addr, err := url.Parse(u)
if err != nil { if err != nil {
return fmt.Errorf("Unable to parse address '%s': %s", u, err) acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err))
continue
} }
go func(addr *url.URL) { go func(addr *url.URL) {
errch <- n.gatherUrl(addr, acc) defer wg.Done()
acc.AddError(n.gatherUrl(addr, acc))
}(addr) }(addr)
} }
// Drain channel, waiting for all requests to finish and save last error. wg.Wait()
for range n.Urls { return nil
if err := <-errch; err != nil {
outerr = err
}
}
return outerr
} }
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {

View File

@ -41,7 +41,7 @@ func TestHTTPApache(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
err := a.Gather(&acc) err := acc.GatherError(a.Gather)
require.NoError(t, err) require.NoError(t, err)
fields := map[string]interface{}{ fields := map[string]interface{}{

View File

@ -7,7 +7,6 @@ import (
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
"io/ioutil" "io/ioutil"
"log"
"net/http" "net/http"
"net/url" "net/url"
"strings" "strings"
@ -123,8 +122,8 @@ func (j javaMetric) addTagsFields(out map[string]interface{}) {
} }
j.acc.AddFields(tokens["class"]+tokens["type"], fields, tags) j.acc.AddFields(tokens["class"]+tokens["type"], fields, tags)
} else { } else {
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n", j.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n",
j.metric, out) j.metric, out))
} }
} }
@ -155,8 +154,8 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
addCassandraMetric(k, c, v.(map[string]interface{})) addCassandraMetric(k, c, v.(map[string]interface{}))
} }
} else { } else {
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n", c.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n",
c.metric, out) c.metric, out))
return return
} }
} else { } else {
@ -164,8 +163,8 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
addCassandraMetric(r.(map[string]interface{})["mbean"].(string), addCassandraMetric(r.(map[string]interface{})["mbean"].(string),
c, values.(map[string]interface{})) c, values.(map[string]interface{}))
} else { } else {
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n", c.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n",
c.metric, out) c.metric, out))
return return
} }
} }
@ -274,8 +273,8 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
m = newCassandraMetric(serverTokens["host"], metric, acc) m = newCassandraMetric(serverTokens["host"], metric, acc)
} else { } else {
// unsupported metric type // unsupported metric type
log.Printf("I! Unsupported Cassandra metric [%s], skipping", acc.AddError(fmt.Errorf("E! Unsupported Cassandra metric [%s], skipping",
metric) metric))
continue continue
} }
@ -283,7 +282,8 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
requestUrl, err := url.Parse("http://" + serverTokens["host"] + ":" + requestUrl, err := url.Parse("http://" + serverTokens["host"] + ":" +
serverTokens["port"] + context + metric) serverTokens["port"] + context + metric)
if err != nil { if err != nil {
return err acc.AddError(err)
continue
} }
if serverTokens["user"] != "" && serverTokens["passwd"] != "" { if serverTokens["user"] != "" && serverTokens["passwd"] != "" {
requestUrl.User = url.UserPassword(serverTokens["user"], requestUrl.User = url.UserPassword(serverTokens["user"],
@ -291,8 +291,12 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
} }
out, err := c.getAttr(requestUrl) out, err := c.getAttr(requestUrl)
if err != nil {
acc.AddError(err)
continue
}
if out["status"] != 200.0 { if out["status"] != 200.0 {
fmt.Printf("URL returned with status %v\n", out["status"]) acc.AddError(fmt.Errorf("URL returned with status %v\n", out["status"]))
continue continue
} }
m.addTagsFields(out) m.addTagsFields(out)

View File

@ -151,7 +151,7 @@ func TestHttpJsonJavaMultiValue(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
acc.SetDebug(true) acc.SetDebug(true)
err := cassandra.Gather(&acc) err := acc.GatherError(cassandra.Gather)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, 2, len(acc.Metrics)) assert.Equal(t, 2, len(acc.Metrics))
@ -180,7 +180,7 @@ func TestHttpJsonJavaMultiType(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
acc.SetDebug(true) acc.SetDebug(true)
err := cassandra.Gather(&acc) err := acc.GatherError(cassandra.Gather)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, 2, len(acc.Metrics)) assert.Equal(t, 2, len(acc.Metrics))
@ -197,16 +197,17 @@ func TestHttpJsonJavaMultiType(t *testing.T) {
} }
// Test that the proper values are ignored or collected // Test that the proper values are ignored or collected
func TestHttpJsonOn404(t *testing.T) { func TestHttp404(t *testing.T) {
jolokia := genJolokiaClientStub(validJavaMultiValueJSON, 404, Servers, jolokia := genJolokiaClientStub(invalidJSON, 404, Servers,
[]string{HeapMetric}) []string{HeapMetric})
var acc testutil.Accumulator var acc testutil.Accumulator
err := jolokia.Gather(&acc) err := acc.GatherError(jolokia.Gather)
assert.Nil(t, err) assert.Error(t, err)
assert.Equal(t, 0, len(acc.Metrics)) assert.Equal(t, 0, len(acc.Metrics))
assert.Contains(t, err.Error(), "has status code 404")
} }
// Test that the proper values are ignored or collected for class=Cassandra // Test that the proper values are ignored or collected for class=Cassandra
@ -214,7 +215,7 @@ func TestHttpJsonCassandraMultiValue(t *testing.T) {
cassandra := genJolokiaClientStub(validCassandraMultiValueJSON, 200, Servers, []string{ReadLatencyMetric}) cassandra := genJolokiaClientStub(validCassandraMultiValueJSON, 200, Servers, []string{ReadLatencyMetric})
var acc testutil.Accumulator var acc testutil.Accumulator
err := cassandra.Gather(&acc) err := acc.GatherError(cassandra.Gather)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, 1, len(acc.Metrics)) assert.Equal(t, 1, len(acc.Metrics))
@ -246,7 +247,7 @@ func TestHttpJsonCassandraNestedMultiValue(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
acc.SetDebug(true) acc.SetDebug(true)
err := cassandra.Gather(&acc) err := acc.GatherError(cassandra.Gather)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, 2, len(acc.Metrics)) assert.Equal(t, 2, len(acc.Metrics))

View File

@ -101,12 +101,12 @@ func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error {
for _, s := range sockets { for _, s := range sockets {
dump, err := perfDump(c.CephBinary, s) dump, err := perfDump(c.CephBinary, s)
if err != nil { if err != nil {
log.Printf("E! error reading from socket '%s': %v", s.socket, err) acc.AddError(fmt.Errorf("E! error reading from socket '%s': %v", s.socket, err))
continue continue
} }
data, err := parseDump(dump) data, err := parseDump(dump)
if err != nil { if err != nil {
log.Printf("E! error parsing dump from socket '%s': %v", s.socket, err) acc.AddError(fmt.Errorf("E! error parsing dump from socket '%s': %v", s.socket, err))
continue continue
} }
for tag, metrics := range data { for tag, metrics := range data {

View File

@ -22,10 +22,11 @@ func (g *CGroup) Gather(acc telegraf.Accumulator) error {
for dir := range list { for dir := range list {
if dir.err != nil { if dir.err != nil {
return dir.err acc.AddError(dir.err)
continue
} }
if err := g.gatherDir(dir.path, acc); err != nil { if err := g.gatherDir(dir.path, acc); err != nil {
return err acc.AddError(err)
} }
} }

View File

@ -24,7 +24,7 @@ var cg1 = &CGroup{
func TestCgroupStatistics_1(t *testing.T) { func TestCgroupStatistics_1(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
err := cg1.Gather(&acc) err := acc.GatherError(cg1.Gather)
require.NoError(t, err) require.NoError(t, err)
tags := map[string]string{ tags := map[string]string{
@ -56,7 +56,7 @@ var cg2 = &CGroup{
func TestCgroupStatistics_2(t *testing.T) { func TestCgroupStatistics_2(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
err := cg2.Gather(&acc) err := acc.GatherError(cg2.Gather)
require.NoError(t, err) require.NoError(t, err)
tags := map[string]string{ tags := map[string]string{
@ -81,7 +81,7 @@ var cg3 = &CGroup{
func TestCgroupStatistics_3(t *testing.T) { func TestCgroupStatistics_3(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
err := cg3.Gather(&acc) err := acc.GatherError(cg3.Gather)
require.NoError(t, err) require.NoError(t, err)
tags := map[string]string{ tags := map[string]string{
@ -108,7 +108,7 @@ var cg4 = &CGroup{
func TestCgroupStatistics_4(t *testing.T) { func TestCgroupStatistics_4(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
err := cg4.Gather(&acc) err := acc.GatherError(cg4.Gather)
require.NoError(t, err) require.NoError(t, err)
tags := map[string]string{ tags := map[string]string{
@ -140,7 +140,7 @@ var cg5 = &CGroup{
func TestCgroupStatistics_5(t *testing.T) { func TestCgroupStatistics_5(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
err := cg5.Gather(&acc) err := acc.GatherError(cg5.Gather)
require.NoError(t, err) require.NoError(t, err)
tags := map[string]string{ tags := map[string]string{
@ -167,7 +167,7 @@ var cg6 = &CGroup{
func TestCgroupStatistics_6(t *testing.T) { func TestCgroupStatistics_6(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
err := cg6.Gather(&acc) err := acc.GatherError(cg6.Gather)
require.NoError(t, err) require.NoError(t, err)
tags := map[string]string{ tags := map[string]string{

View File

@ -13,7 +13,6 @@ import (
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal"
internalaws "github.com/influxdata/telegraf/internal/config/aws" internalaws "github.com/influxdata/telegraf/internal/config/aws"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/internal/limiter" "github.com/influxdata/telegraf/internal/limiter"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
) )
@ -186,8 +185,6 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
if err != nil { if err != nil {
return err return err
} }
metricCount := len(metrics)
errChan := errchan.New(metricCount)
now := time.Now() now := time.Now()
@ -202,12 +199,12 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
<-lmtr.C <-lmtr.C
go func(inm *cloudwatch.Metric) { go func(inm *cloudwatch.Metric) {
defer wg.Done() defer wg.Done()
c.gatherMetric(acc, inm, now, errChan.C) acc.AddError(c.gatherMetric(acc, inm, now))
}(m) }(m)
} }
wg.Wait() wg.Wait()
return errChan.Error() return nil
} }
func init() { func init() {
@ -285,13 +282,11 @@ func (c *CloudWatch) gatherMetric(
acc telegraf.Accumulator, acc telegraf.Accumulator,
metric *cloudwatch.Metric, metric *cloudwatch.Metric,
now time.Time, now time.Time,
errChan chan error, ) error {
) {
params := c.getStatisticsInput(metric, now) params := c.getStatisticsInput(metric, now)
resp, err := c.client.GetMetricStatistics(params) resp, err := c.client.GetMetricStatistics(params)
if err != nil { if err != nil {
errChan <- err return err
return
} }
for _, point := range resp.Datapoints { for _, point := range resp.Datapoints {
@ -326,7 +321,7 @@ func (c *CloudWatch) gatherMetric(
acc.AddFields(formatMeasurement(c.Namespace), fields, tags, *point.Timestamp) acc.AddFields(formatMeasurement(c.Namespace), fields, tags, *point.Timestamp)
} }
errChan <- nil return nil
} }
/* /*

View File

@ -64,7 +64,7 @@ func TestGather(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
c.client = &mockGatherCloudWatchClient{} c.client = &mockGatherCloudWatchClient{}
c.Gather(&acc) acc.GatherError(c.Gather)
fields := map[string]interface{}{} fields := map[string]interface{}{}
fields["latency_minimum"] = 0.1 fields["latency_minimum"] = 0.1

View File

@ -11,7 +11,6 @@ import (
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
"log"
"path/filepath" "path/filepath"
) )
@ -93,15 +92,15 @@ func (c *Conntrack) Gather(acc telegraf.Accumulator) error {
contents, err := ioutil.ReadFile(fName) contents, err := ioutil.ReadFile(fName)
if err != nil { if err != nil {
log.Printf("E! failed to read file '%s': %v", fName, err) acc.AddError(fmt.Errorf("E! failed to read file '%s': %v", fName, err))
continue continue
} }
v := strings.TrimSpace(string(contents)) v := strings.TrimSpace(string(contents))
fields[metricKey], err = strconv.ParseFloat(v, 64) fields[metricKey], err = strconv.ParseFloat(v, 64)
if err != nil { if err != nil {
log.Printf("E! failed to parse metric, expected number but "+ acc.AddError(fmt.Errorf("E! failed to parse metric, expected number but "+
" found '%s': %v", v, err) " found '%s': %v", v, err))
} }
} }
} }

View File

@ -42,19 +42,17 @@ func (r *Couchbase) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup var wg sync.WaitGroup
var outerr error
for _, serv := range r.Servers { for _, serv := range r.Servers {
wg.Add(1) wg.Add(1)
go func(serv string) { go func(serv string) {
defer wg.Done() defer wg.Done()
outerr = r.gatherServer(serv, acc, nil) acc.AddError(r.gatherServer(serv, acc, nil))
}(serv) }(serv)
} }
wg.Wait() wg.Wait()
return outerr return nil
} }
func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator, pool *couchbase.Pool) error { func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator, pool *couchbase.Pool) error {

View File

@ -2,13 +2,11 @@ package couchdb
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
"net/http" "net/http"
"reflect" "reflect"
"strings"
"sync" "sync"
"time" "time"
) )
@ -83,34 +81,20 @@ func (*CouchDB) SampleConfig() string {
} }
func (c *CouchDB) Gather(accumulator telegraf.Accumulator) error { func (c *CouchDB) Gather(accumulator telegraf.Accumulator) error {
errorChannel := make(chan error, len(c.HOSTs))
var wg sync.WaitGroup var wg sync.WaitGroup
for _, u := range c.HOSTs { for _, u := range c.HOSTs {
wg.Add(1) wg.Add(1)
go func(host string) { go func(host string) {
defer wg.Done() defer wg.Done()
if err := c.fetchAndInsertData(accumulator, host); err != nil { if err := c.fetchAndInsertData(accumulator, host); err != nil {
errorChannel <- fmt.Errorf("[host=%s]: %s", host, err) accumulator.AddError(fmt.Errorf("[host=%s]: %s", host, err))
} }
}(u) }(u)
} }
wg.Wait() wg.Wait()
close(errorChannel)
// If there weren't any errors, we can return nil now.
if len(errorChannel) == 0 {
return nil return nil
}
// There were errors, so join them all together as one big error.
errorStrings := make([]string, 0, len(errorChannel))
for err := range errorChannel {
errorStrings = append(errorStrings, err.Error())
}
return errors.New(strings.Join(errorStrings, "\n"))
} }
var tr = &http.Transport{ var tr = &http.Transport{

View File

@ -316,5 +316,5 @@ func TestBasic(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
require.NoError(t, plugin.Gather(&acc)) require.NoError(t, acc.GatherError(plugin.Gather))
} }

View File

@ -75,12 +75,11 @@ func (g *Disque) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup var wg sync.WaitGroup
var outerr error
for _, serv := range g.Servers { for _, serv := range g.Servers {
u, err := url.Parse(serv) u, err := url.Parse(serv)
if err != nil { if err != nil {
return fmt.Errorf("Unable to parse to address '%s': %s", serv, err) acc.AddError(fmt.Errorf("Unable to parse to address '%s': %s", serv, err))
continue
} else if u.Scheme == "" { } else if u.Scheme == "" {
// fallback to simple string based address (i.e. "10.0.0.1:10000") // fallback to simple string based address (i.e. "10.0.0.1:10000")
u.Scheme = "tcp" u.Scheme = "tcp"
@ -90,13 +89,13 @@ func (g *Disque) Gather(acc telegraf.Accumulator) error {
wg.Add(1) wg.Add(1)
go func(serv string) { go func(serv string) {
defer wg.Done() defer wg.Done()
outerr = g.gatherServer(u, acc) acc.AddError(g.gatherServer(u, acc))
}(serv) }(serv)
} }
wg.Wait() wg.Wait()
return outerr return nil
} }
const defaultPort = "7711" const defaultPort = "7711"

View File

@ -51,7 +51,7 @@ func TestDisqueGeneratesMetrics(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
err = r.Gather(&acc) err = acc.GatherError(r.Gather)
require.NoError(t, err) require.NoError(t, err)
fields := map[string]interface{}{ fields := map[string]interface{}{
@ -117,7 +117,7 @@ func TestDisqueCanPullStatsFromMultipleServers(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
err = r.Gather(&acc) err = acc.GatherError(r.Gather)
require.NoError(t, err) require.NoError(t, err)
fields := map[string]interface{}{ fields := map[string]interface{}{

View File

@ -9,7 +9,6 @@ import (
"time" "time"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
) )
@ -58,11 +57,10 @@ func (d *DnsQuery) Description() string {
func (d *DnsQuery) Gather(acc telegraf.Accumulator) error { func (d *DnsQuery) Gather(acc telegraf.Accumulator) error {
d.setDefaultValues() d.setDefaultValues()
errChan := errchan.New(len(d.Domains) * len(d.Servers))
for _, domain := range d.Domains { for _, domain := range d.Domains {
for _, server := range d.Servers { for _, server := range d.Servers {
dnsQueryTime, err := d.getDnsQueryTime(domain, server) dnsQueryTime, err := d.getDnsQueryTime(domain, server)
errChan.C <- err acc.AddError(err)
tags := map[string]string{ tags := map[string]string{
"server": server, "server": server,
"domain": domain, "domain": domain,
@ -74,7 +72,7 @@ func (d *DnsQuery) Gather(acc telegraf.Accumulator) error {
} }
} }
return errChan.Error() return nil
} }
func (d *DnsQuery) setDefaultValues() { func (d *DnsQuery) setDefaultValues() {

View File

@ -24,7 +24,7 @@ func TestGathering(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
err := dnsConfig.Gather(&acc) err := acc.GatherError(dnsConfig.Gather)
assert.NoError(t, err) assert.NoError(t, err)
metric, ok := acc.Get("dns_query") metric, ok := acc.Get("dns_query")
require.True(t, ok) require.True(t, ok)
@ -44,7 +44,7 @@ func TestGatheringMxRecord(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
dnsConfig.RecordType = "MX" dnsConfig.RecordType = "MX"
err := dnsConfig.Gather(&acc) err := acc.GatherError(dnsConfig.Gather)
assert.NoError(t, err) assert.NoError(t, err)
metric, ok := acc.Get("dns_query") metric, ok := acc.Get("dns_query")
require.True(t, ok) require.True(t, ok)
@ -70,7 +70,7 @@ func TestGatheringRootDomain(t *testing.T) {
} }
fields := map[string]interface{}{} fields := map[string]interface{}{}
err := dnsConfig.Gather(&acc) err := acc.GatherError(dnsConfig.Gather)
assert.NoError(t, err) assert.NoError(t, err)
metric, ok := acc.Get("dns_query") metric, ok := acc.Get("dns_query")
require.True(t, ok) require.True(t, ok)
@ -96,7 +96,7 @@ func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) {
} }
fields := map[string]interface{}{} fields := map[string]interface{}{}
err := dnsConfig.Gather(&acc) err := acc.GatherError(dnsConfig.Gather)
assert.NoError(t, err) assert.NoError(t, err)
metric, ok := acc.Get("dns_query") metric, ok := acc.Get("dns_query")
require.True(t, ok) require.True(t, ok)
@ -121,7 +121,7 @@ func TestGatheringTimeout(t *testing.T) {
channel := make(chan error, 1) channel := make(chan error, 1)
go func() { go func() {
channel <- dnsConfig.Gather(&acc) channel <- acc.GatherError(dnsConfig.Gather)
}() }()
select { select {
case res := <-channel: case res := <-channel:

View File

@ -5,7 +5,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"log"
"regexp" "regexp"
"strconv" "strconv"
"strings" "strings"
@ -159,7 +158,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
// Get daemon info // Get daemon info
err := d.gatherInfo(acc) err := d.gatherInfo(acc)
if err != nil { if err != nil {
fmt.Println(err.Error()) acc.AddError(err)
} }
// List containers // List containers
@ -179,8 +178,8 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
defer wg.Done() defer wg.Done()
err := d.gatherContainer(c, acc) err := d.gatherContainer(c, acc)
if err != nil { if err != nil {
log.Printf("E! Error gathering container %s stats: %s\n", acc.AddError(fmt.Errorf("E! Error gathering container %s stats: %s\n",
c.Names, err.Error()) c.Names, err.Error()))
} }
}(container) }(container)
} }

View File

@ -302,7 +302,7 @@ func TestDockerGatherInfo(t *testing.T) {
testing: true, testing: true,
} }
err := d.Gather(&acc) err := acc.GatherError(d.Gather)
require.NoError(t, err) require.NoError(t, err)
acc.AssertContainsTaggedFields(t, acc.AssertContainsTaggedFields(t,

View File

@ -12,7 +12,6 @@ import (
"time" "time"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
) )
@ -66,19 +65,18 @@ func (d *Dovecot) Gather(acc telegraf.Accumulator) error {
} }
var wg sync.WaitGroup var wg sync.WaitGroup
errChan := errchan.New(len(d.Servers) * len(d.Filters))
for _, server := range d.Servers { for _, server := range d.Servers {
for _, filter := range d.Filters { for _, filter := range d.Filters {
wg.Add(1) wg.Add(1)
go func(s string, f string) { go func(s string, f string) {
defer wg.Done() defer wg.Done()
errChan.C <- d.gatherServer(s, acc, d.Type, f) acc.AddError(d.gatherServer(s, acc, d.Type, f))
}(server, filter) }(server, filter)
} }
} }
wg.Wait() wg.Wait()
return errChan.Error() return nil
} }
func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype string, filter string) error { func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype string, filter string) error {

View File

@ -10,7 +10,6 @@ import (
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
"io/ioutil" "io/ioutil"
@ -153,7 +152,6 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
e.client = client e.client = client
} }
errChan := errchan.New(len(e.Servers) * 3)
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(len(e.Servers)) wg.Add(len(e.Servers))
@ -176,24 +174,21 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
// Always gather node states // Always gather node states
if err := e.gatherNodeStats(url, acc); err != nil { if err := e.gatherNodeStats(url, acc); err != nil {
err = fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")) acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
errChan.C <- err
return return
} }
if e.ClusterHealth { if e.ClusterHealth {
url = s + "/_cluster/health?level=indices" url = s + "/_cluster/health?level=indices"
if err := e.gatherClusterHealth(url, acc); err != nil { if err := e.gatherClusterHealth(url, acc); err != nil {
err = fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")) acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
errChan.C <- err
return return
} }
} }
if e.ClusterStats && e.isMaster { if e.ClusterStats && e.isMaster {
if err := e.gatherClusterStats(s+"/_cluster/stats", acc); err != nil { if err := e.gatherClusterStats(s+"/_cluster/stats", acc); err != nil {
err = fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")) acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
errChan.C <- err
return return
} }
} }
@ -201,7 +196,7 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
} }
wg.Wait() wg.Wait()
return errChan.Error() return nil
} }
func (e *Elasticsearch) createHttpClient() (*http.Client, error) { func (e *Elasticsearch) createHttpClient() (*http.Client, error) {

View File

@ -71,7 +71,7 @@ func TestGather(t *testing.T) {
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse) es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse)
var acc testutil.Accumulator var acc testutil.Accumulator
if err := es.Gather(&acc); err != nil { if err := acc.GatherError(es.Gather); err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -15,7 +15,6 @@ import (
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/parsers"
"github.com/influxdata/telegraf/plugins/parsers/nagios" "github.com/influxdata/telegraf/plugins/parsers/nagios"
@ -50,7 +49,6 @@ type Exec struct {
parser parsers.Parser parser parsers.Parser
runner Runner runner Runner
errChan chan error
} }
func NewExec() *Exec { func NewExec() *Exec {
@ -150,13 +148,13 @@ func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync
out, err := e.runner.Run(e, command, acc) out, err := e.runner.Run(e, command, acc)
if err != nil { if err != nil {
e.errChan <- err acc.AddError(err)
return return
} }
metrics, err := e.parser.Parse(out) metrics, err := e.parser.Parse(out)
if err != nil { if err != nil {
e.errChan <- err acc.AddError(err)
} else { } else {
for _, metric := range metrics { for _, metric := range metrics {
acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time()) acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time())
@ -193,7 +191,8 @@ func (e *Exec) Gather(acc telegraf.Accumulator) error {
matches, err := filepath.Glob(cmdAndArgs[0]) matches, err := filepath.Glob(cmdAndArgs[0])
if err != nil { if err != nil {
return err acc.AddError(err)
continue
} }
if len(matches) == 0 { if len(matches) == 0 {
@ -214,15 +213,12 @@ func (e *Exec) Gather(acc telegraf.Accumulator) error {
} }
} }
errChan := errchan.New(len(commands))
e.errChan = errChan.C
wg.Add(len(commands)) wg.Add(len(commands))
for _, command := range commands { for _, command := range commands {
go e.ProcessCommand(command, acc, &wg) go e.ProcessCommand(command, acc, &wg)
} }
wg.Wait() wg.Wait()
return errChan.Error() return nil
} }
func init() { func init() {

View File

@ -101,7 +101,7 @@ func TestExec(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
err := e.Gather(&acc) err := acc.GatherError(e.Gather)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, acc.NFields(), 8, "non-numeric measurements should be ignored") assert.Equal(t, acc.NFields(), 8, "non-numeric measurements should be ignored")
@ -127,8 +127,7 @@ func TestExecMalformed(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
err := e.Gather(&acc) require.Error(t, acc.GatherError(e.Gather))
require.Error(t, err)
assert.Equal(t, acc.NFields(), 0, "No new points should have been added") assert.Equal(t, acc.NFields(), 0, "No new points should have been added")
} }
@ -141,8 +140,7 @@ func TestCommandError(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
err := e.Gather(&acc) require.Error(t, acc.GatherError(e.Gather))
require.Error(t, err)
assert.Equal(t, acc.NFields(), 0, "No new points should have been added") assert.Equal(t, acc.NFields(), 0, "No new points should have been added")
} }
@ -155,8 +153,7 @@ func TestLineProtocolParse(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
err := e.Gather(&acc) require.NoError(t, acc.GatherError(e.Gather))
require.NoError(t, err)
fields := map[string]interface{}{ fields := map[string]interface{}{
"usage_idle": float64(99), "usage_idle": float64(99),
@ -191,7 +188,7 @@ func TestLineProtocolShortParse(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
err := e.Gather(&acc) err := acc.GatherError(e.Gather)
require.Error(t, err) require.Error(t, err)
assert.Contains(t, err.Error(), "buffer too short", "A buffer too short error was expected") assert.Contains(t, err.Error(), "buffer too short", "A buffer too short error was expected")
} }
@ -205,7 +202,7 @@ func TestLineProtocolParseMultiple(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
err := e.Gather(&acc) err := acc.GatherError(e.Gather)
require.NoError(t, err) require.NoError(t, err)
fields := map[string]interface{}{ fields := map[string]interface{}{
@ -231,7 +228,7 @@ func TestExecCommandWithGlob(t *testing.T) {
e.SetParser(parser) e.SetParser(parser)
var acc testutil.Accumulator var acc testutil.Accumulator
err := e.Gather(&acc) err := acc.GatherError(e.Gather)
require.NoError(t, err) require.NoError(t, err)
fields := map[string]interface{}{ fields := map[string]interface{}{
@ -247,7 +244,7 @@ func TestExecCommandWithoutGlob(t *testing.T) {
e.SetParser(parser) e.SetParser(parser)
var acc testutil.Accumulator var acc testutil.Accumulator
err := e.Gather(&acc) err := acc.GatherError(e.Gather)
require.NoError(t, err) require.NoError(t, err)
fields := map[string]interface{}{ fields := map[string]interface{}{
@ -263,7 +260,7 @@ func TestExecCommandWithoutGlobAndPath(t *testing.T) {
e.SetParser(parser) e.SetParser(parser)
var acc testutil.Accumulator var acc testutil.Accumulator
err := e.Gather(&acc) err := acc.GatherError(e.Gather)
require.NoError(t, err) require.NoError(t, err)
fields := map[string]interface{}{ fields := map[string]interface{}{

View File

@ -48,7 +48,6 @@ func (_ *FileStat) Description() string {
func (_ *FileStat) SampleConfig() string { return sampleConfig } func (_ *FileStat) SampleConfig() string { return sampleConfig }
func (f *FileStat) Gather(acc telegraf.Accumulator) error { func (f *FileStat) Gather(acc telegraf.Accumulator) error {
var errS string
var err error var err error
for _, filepath := range f.Files { for _, filepath := range f.Files {
@ -56,7 +55,7 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error {
g, ok := f.globs[filepath] g, ok := f.globs[filepath]
if !ok { if !ok {
if g, err = globpath.Compile(filepath); err != nil { if g, err = globpath.Compile(filepath); err != nil {
errS += err.Error() + " " acc.AddError(err)
continue continue
} }
f.globs[filepath] = g f.globs[filepath] = g
@ -92,7 +91,7 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error {
if f.Md5 { if f.Md5 {
md5, err := getMd5(fileName) md5, err := getMd5(fileName)
if err != nil { if err != nil {
errS += err.Error() + " " acc.AddError(err)
} else { } else {
fields["md5_sum"] = md5 fields["md5_sum"] = md5
} }
@ -102,9 +101,6 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error {
} }
} }
if errS != "" {
return fmt.Errorf(errS)
}
return nil return nil
} }

View File

@ -19,7 +19,7 @@ func TestGatherNoMd5(t *testing.T) {
} }
acc := testutil.Accumulator{} acc := testutil.Accumulator{}
fs.Gather(&acc) acc.GatherError(fs.Gather)
tags1 := map[string]string{ tags1 := map[string]string{
"file": dir + "log1.log", "file": dir + "log1.log",
@ -59,7 +59,7 @@ func TestGatherExplicitFiles(t *testing.T) {
} }
acc := testutil.Accumulator{} acc := testutil.Accumulator{}
fs.Gather(&acc) acc.GatherError(fs.Gather)
tags1 := map[string]string{ tags1 := map[string]string{
"file": dir + "log1.log", "file": dir + "log1.log",
@ -99,7 +99,7 @@ func TestGatherGlob(t *testing.T) {
} }
acc := testutil.Accumulator{} acc := testutil.Accumulator{}
fs.Gather(&acc) acc.GatherError(fs.Gather)
tags1 := map[string]string{ tags1 := map[string]string{
"file": dir + "log1.log", "file": dir + "log1.log",
@ -131,7 +131,7 @@ func TestGatherSuperAsterisk(t *testing.T) {
} }
acc := testutil.Accumulator{} acc := testutil.Accumulator{}
fs.Gather(&acc) acc.GatherError(fs.Gather)
tags1 := map[string]string{ tags1 := map[string]string{
"file": dir + "log1.log", "file": dir + "log1.log",

View File

@ -4,7 +4,6 @@ import (
"bytes" "bytes"
"encoding/base64" "encoding/base64"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net" "net"
@ -149,31 +148,17 @@ func (h *GrayLog) Gather(acc telegraf.Accumulator) error {
h.client.SetHTTPClient(client) h.client.SetHTTPClient(client)
} }
errorChannel := make(chan error, len(h.Servers))
for _, server := range h.Servers { for _, server := range h.Servers {
wg.Add(1) wg.Add(1)
go func(server string) { go func(server string) {
defer wg.Done() defer wg.Done()
if err := h.gatherServer(acc, server); err != nil { acc.AddError(h.gatherServer(acc, server))
errorChannel <- err
}
}(server) }(server)
} }
wg.Wait() wg.Wait()
close(errorChannel)
// Get all errors and return them as one giant error
errorStrings := []string{}
for err := range errorChannel {
errorStrings = append(errorStrings, err.Error())
}
if len(errorStrings) == 0 {
return nil return nil
}
return errors.New(strings.Join(errorStrings, "\n"))
} }
// Gathers data from a particular server // Gathers data from a particular server

View File

@ -157,7 +157,7 @@ func TestNormalResponse(t *testing.T) {
for _, service := range graylog { for _, service := range graylog {
var acc testutil.Accumulator var acc testutil.Accumulator
err := service.Gather(&acc) err := acc.GatherError(service.Gather)
require.NoError(t, err) require.NoError(t, err)
for k, v := range expectedFields { for k, v := range expectedFields {
acc.AssertContainsTaggedFields(t, k, v, validTags[k]) acc.AssertContainsTaggedFields(t, k, v, validTags[k])
@ -170,9 +170,9 @@ func TestHttpJson500(t *testing.T) {
graylog := genMockGrayLog(validJSON, 500) graylog := genMockGrayLog(validJSON, 500)
var acc testutil.Accumulator var acc testutil.Accumulator
err := graylog[0].Gather(&acc) err := acc.GatherError(graylog[0].Gather)
assert.NotNil(t, err) assert.Error(t, err)
assert.Equal(t, 0, acc.NFields()) assert.Equal(t, 0, acc.NFields())
} }
@ -181,9 +181,9 @@ func TestHttpJsonBadJson(t *testing.T) {
graylog := genMockGrayLog(invalidJSON, 200) graylog := genMockGrayLog(invalidJSON, 200)
var acc testutil.Accumulator var acc testutil.Accumulator
err := graylog[0].Gather(&acc) err := acc.GatherError(graylog[0].Gather)
assert.NotNil(t, err) assert.Error(t, err)
assert.Equal(t, 0, acc.NFields()) assert.Equal(t, 0, acc.NFields())
} }
@ -192,8 +192,8 @@ func TestHttpJsonEmptyResponse(t *testing.T) {
graylog := genMockGrayLog(empty, 200) graylog := genMockGrayLog(empty, 200)
var acc testutil.Accumulator var acc testutil.Accumulator
err := graylog[0].Gather(&acc) err := acc.GatherError(graylog[0].Gather)
assert.NotNil(t, err) assert.Error(t, err)
assert.Equal(t, 0, acc.NFields()) assert.Equal(t, 0, acc.NFields())
} }

View File

@ -1,7 +1,6 @@
package httpjson package httpjson
import ( import (
"errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
@ -145,31 +144,17 @@ func (h *HttpJson) Gather(acc telegraf.Accumulator) error {
h.client.SetHTTPClient(client) h.client.SetHTTPClient(client)
} }
errorChannel := make(chan error, len(h.Servers))
for _, server := range h.Servers { for _, server := range h.Servers {
wg.Add(1) wg.Add(1)
go func(server string) { go func(server string) {
defer wg.Done() defer wg.Done()
if err := h.gatherServer(acc, server); err != nil { acc.AddError(h.gatherServer(acc, server))
errorChannel <- err
}
}(server) }(server)
} }
wg.Wait() wg.Wait()
close(errorChannel)
// Get all errors and return them as one giant error
errorStrings := []string{}
for err := range errorChannel {
errorStrings = append(errorStrings, err.Error())
}
if len(errorStrings) == 0 {
return nil return nil
}
return errors.New(strings.Join(errorStrings, "\n"))
} }
// Gathers data from a particular server // Gathers data from a particular server

View File

@ -210,7 +210,7 @@ func TestHttpJson200(t *testing.T) {
for _, service := range httpjson { for _, service := range httpjson {
var acc testutil.Accumulator var acc testutil.Accumulator
err := service.Gather(&acc) err := acc.GatherError(service.Gather)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 12, acc.NFields()) assert.Equal(t, 12, acc.NFields())
// Set responsetime // Set responsetime
@ -245,7 +245,7 @@ func TestHttpJsonGET_URL(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
err := a.Gather(&acc) err := acc.GatherError(a.Gather)
require.NoError(t, err) require.NoError(t, err)
// remove response_time from gathered fields because it's non-deterministic // remove response_time from gathered fields because it's non-deterministic
@ -318,7 +318,7 @@ func TestHttpJsonGET(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
err := a.Gather(&acc) err := acc.GatherError(a.Gather)
require.NoError(t, err) require.NoError(t, err)
// remove response_time from gathered fields because it's non-deterministic // remove response_time from gathered fields because it's non-deterministic
@ -392,7 +392,7 @@ func TestHttpJsonPOST(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
err := a.Gather(&acc) err := acc.GatherError(a.Gather)
require.NoError(t, err) require.NoError(t, err)
// remove response_time from gathered fields because it's non-deterministic // remove response_time from gathered fields because it's non-deterministic
@ -448,9 +448,9 @@ func TestHttpJson500(t *testing.T) {
httpjson := genMockHttpJson(validJSON, 500) httpjson := genMockHttpJson(validJSON, 500)
var acc testutil.Accumulator var acc testutil.Accumulator
err := httpjson[0].Gather(&acc) err := acc.GatherError(httpjson[0].Gather)
assert.NotNil(t, err) assert.Error(t, err)
assert.Equal(t, 0, acc.NFields()) assert.Equal(t, 0, acc.NFields())
} }
@ -460,9 +460,9 @@ func TestHttpJsonBadMethod(t *testing.T) {
httpjson[0].Method = "NOT_A_REAL_METHOD" httpjson[0].Method = "NOT_A_REAL_METHOD"
var acc testutil.Accumulator var acc testutil.Accumulator
err := httpjson[0].Gather(&acc) err := acc.GatherError(httpjson[0].Gather)
assert.NotNil(t, err) assert.Error(t, err)
assert.Equal(t, 0, acc.NFields()) assert.Equal(t, 0, acc.NFields())
} }
@ -471,9 +471,9 @@ func TestHttpJsonBadJson(t *testing.T) {
httpjson := genMockHttpJson(invalidJSON, 200) httpjson := genMockHttpJson(invalidJSON, 200)
var acc testutil.Accumulator var acc testutil.Accumulator
err := httpjson[0].Gather(&acc) err := acc.GatherError(httpjson[0].Gather)
assert.NotNil(t, err) assert.Error(t, err)
assert.Equal(t, 0, acc.NFields()) assert.Equal(t, 0, acc.NFields())
} }
@ -482,9 +482,9 @@ func TestHttpJsonEmptyResponse(t *testing.T) {
httpjson := genMockHttpJson(empty, 200) httpjson := genMockHttpJson(empty, 200)
var acc testutil.Accumulator var acc testutil.Accumulator
err := httpjson[0].Gather(&acc) err := acc.GatherError(httpjson[0].Gather)
assert.NotNil(t, err) assert.Error(t, err)
assert.Equal(t, 0, acc.NFields()) assert.Equal(t, 0, acc.NFields())
} }
@ -495,7 +495,7 @@ func TestHttpJson200Tags(t *testing.T) {
for _, service := range httpjson { for _, service := range httpjson {
if service.Name == "other_webapp" { if service.Name == "other_webapp" {
var acc testutil.Accumulator var acc testutil.Accumulator
err := service.Gather(&acc) err := acc.GatherError(service.Gather)
// Set responsetime // Set responsetime
for _, p := range acc.Metrics { for _, p := range acc.Metrics {
p.Fields["response_time"] = 1.0 p.Fields["response_time"] = 1.0
@ -533,7 +533,7 @@ func TestHttpJsonArray200Tags(t *testing.T) {
for _, service := range httpjson { for _, service := range httpjson {
if service.Name == "other_webapp" { if service.Name == "other_webapp" {
var acc testutil.Accumulator var acc testutil.Accumulator
err := service.Gather(&acc) err := acc.GatherError(service.Gather)
// Set responsetime // Set responsetime
for _, p := range acc.Metrics { for _, p := range acc.Metrics {
p.Fields["response_time"] = 1.0 p.Fields["response_time"] = 1.0

View File

@ -5,7 +5,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"net/http" "net/http"
"strings"
"sync" "sync"
"time" "time"
@ -57,34 +56,20 @@ func (i *InfluxDB) Gather(acc telegraf.Accumulator) error {
} }
} }
errorChannel := make(chan error, len(i.URLs))
var wg sync.WaitGroup var wg sync.WaitGroup
for _, u := range i.URLs { for _, u := range i.URLs {
wg.Add(1) wg.Add(1)
go func(url string) { go func(url string) {
defer wg.Done() defer wg.Done()
if err := i.gatherURL(acc, url); err != nil { if err := i.gatherURL(acc, url); err != nil {
errorChannel <- fmt.Errorf("[url=%s]: %s", url, err) acc.AddError(fmt.Errorf("[url=%s]: %s", url, err))
} }
}(u) }(u)
} }
wg.Wait() wg.Wait()
close(errorChannel)
// If there weren't any errors, we can return nil now.
if len(errorChannel) == 0 {
return nil return nil
}
// There were errors, so join them all together as one big error.
errorStrings := make([]string, 0, len(errorChannel))
for err := range errorChannel {
errorStrings = append(errorStrings, err.Error())
}
return errors.New(strings.Join(errorStrings, "\n"))
} }
type point struct { type point struct {

View File

@ -25,7 +25,7 @@ func TestBasic(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
require.NoError(t, plugin.Gather(&acc)) require.NoError(t, acc.GatherError(plugin.Gather))
require.Len(t, acc.Metrics, 3) require.Len(t, acc.Metrics, 3)
fields := map[string]interface{}{ fields := map[string]interface{}{
@ -72,7 +72,7 @@ func TestInfluxDB(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
require.NoError(t, plugin.Gather(&acc)) require.NoError(t, acc.GatherError(plugin.Gather))
require.Len(t, acc.Metrics, 34) require.Len(t, acc.Metrics, 34)
@ -132,7 +132,7 @@ func TestInfluxDB2(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
require.NoError(t, plugin.Gather(&acc)) require.NoError(t, acc.GatherError(plugin.Gather))
require.Len(t, acc.Metrics, 34) require.Len(t, acc.Metrics, 34)
@ -157,7 +157,7 @@ func TestErrorHandling(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
require.Error(t, plugin.Gather(&acc)) require.Error(t, acc.GatherError(plugin.Gather))
} }
func TestErrorHandling404(t *testing.T) { func TestErrorHandling404(t *testing.T) {
@ -175,7 +175,7 @@ func TestErrorHandling404(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
require.Error(t, plugin.Gather(&acc)) require.Error(t, acc.GatherError(plugin.Gather))
} }
const basicJSON = ` const basicJSON = `

View File

@ -52,7 +52,8 @@ func (m *Ipmi) Gather(acc telegraf.Accumulator) error {
for _, server := range m.Servers { for _, server := range m.Servers {
err := m.parse(acc, server) err := m.parse(acc, server)
if err != nil { if err != nil {
return err acc.AddError(err)
continue
} }
} }
} else { } else {

View File

@ -20,7 +20,7 @@ func TestGather(t *testing.T) {
execCommand = fakeExecCommand execCommand = fakeExecCommand
var acc testutil.Accumulator var acc testutil.Accumulator
err := i.Gather(&acc) err := acc.GatherError(i.Gather)
require.NoError(t, err) require.NoError(t, err)
@ -121,7 +121,7 @@ func TestGather(t *testing.T) {
Path: "ipmitool", Path: "ipmitool",
} }
err = i.Gather(&acc) err = acc.GatherError(i.Gather)
var testsWithoutServer = []struct { var testsWithoutServer = []struct {
fields map[string]interface{} fields map[string]interface{}

View File

@ -54,20 +54,19 @@ func (ipt *Iptables) Gather(acc telegraf.Accumulator) error {
} }
// best effort : we continue through the chains even if an error is encountered, // best effort : we continue through the chains even if an error is encountered,
// but we keep track of the last error. // but we keep track of the last error.
var err error
for _, chain := range ipt.Chains { for _, chain := range ipt.Chains {
data, e := ipt.lister(ipt.Table, chain) data, e := ipt.lister(ipt.Table, chain)
if e != nil { if e != nil {
err = e acc.AddError(e)
continue continue
} }
e = ipt.parseAndGather(data, acc) e = ipt.parseAndGather(data, acc)
if e != nil { if e != nil {
err = e acc.AddError(e)
continue continue
} }
} }
return err return nil
} }
func (ipt *Iptables) chainList(table, chain string) (string, error) { func (ipt *Iptables) chainList(table, chain string) (string, error) {

View File

@ -141,7 +141,7 @@ func TestIptables_Gather(t *testing.T) {
}, },
} }
acc := new(testutil.Accumulator) acc := new(testutil.Accumulator)
err := ipt.Gather(acc) err := acc.GatherError(ipt.Gather)
if !reflect.DeepEqual(tt.err, err) { if !reflect.DeepEqual(tt.err, err) {
t.Errorf("%d: expected error '%#v' got '%#v'", i, tt.err, err) t.Errorf("%d: expected error '%#v' got '%#v'", i, tt.err, err)
} }
@ -199,7 +199,7 @@ func TestIptables_Gather_listerError(t *testing.T) {
}, },
} }
acc := new(testutil.Accumulator) acc := new(testutil.Accumulator)
err := ipt.Gather(acc) err := acc.GatherError(ipt.Gather)
if !reflect.DeepEqual(err, errFoo) { if !reflect.DeepEqual(err, errFoo) {
t.Errorf("Expected error %#v got\n%#v\n", errFoo, err) t.Errorf("Expected error %#v got\n%#v\n", errFoo, err)
} }

View File

@ -158,7 +158,7 @@ func TestHttpJsonMultiValue(t *testing.T) {
jolokia := genJolokiaClientStub(validMultiValueJSON, 200, Servers, []Metric{HeapMetric}) jolokia := genJolokiaClientStub(validMultiValueJSON, 200, Servers, []Metric{HeapMetric})
var acc testutil.Accumulator var acc testutil.Accumulator
err := jolokia.Gather(&acc) err := acc.GatherError(jolokia.Gather)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, 1, len(acc.Metrics)) assert.Equal(t, 1, len(acc.Metrics))
@ -210,7 +210,7 @@ func TestHttpJsonThreeLevelMultiValue(t *testing.T) {
jolokia := genJolokiaClientStub(validThreeLevelMultiValueJSON, 200, Servers, []Metric{HeapMetric}) jolokia := genJolokiaClientStub(validThreeLevelMultiValueJSON, 200, Servers, []Metric{HeapMetric})
var acc testutil.Accumulator var acc testutil.Accumulator
err := jolokia.Gather(&acc) err := acc.GatherError(jolokia.Gather)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, 1, len(acc.Metrics)) assert.Equal(t, 1, len(acc.Metrics))
@ -238,17 +238,18 @@ func TestHttpJsonThreeLevelMultiValue(t *testing.T) {
} }
// Test that the proper values are ignored or collected // Test that the proper values are ignored or collected
func TestHttpJsonOn404(t *testing.T) { func TestHttp404(t *testing.T) {
jolokia := genJolokiaClientStub(validMultiValueJSON, 404, Servers, jolokia := genJolokiaClientStub(invalidJSON, 404, Servers,
[]Metric{UsedHeapMetric}) []Metric{UsedHeapMetric})
var acc testutil.Accumulator var acc testutil.Accumulator
acc.SetDebug(true) acc.SetDebug(true)
err := jolokia.Gather(&acc) err := acc.GatherError(jolokia.Gather)
assert.Nil(t, err) assert.Error(t, err)
assert.Equal(t, 0, len(acc.Metrics)) assert.Equal(t, 0, len(acc.Metrics))
assert.Contains(t, err.Error(), "has status code 404")
} }
// Test that the proper values are ignored or collected // Test that the proper values are ignored or collected
@ -259,8 +260,9 @@ func TestHttpInvalidJson(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
acc.SetDebug(true) acc.SetDebug(true)
err := jolokia.Gather(&acc) err := acc.GatherError(jolokia.Gather)
assert.Nil(t, err) assert.Error(t, err)
assert.Equal(t, 0, len(acc.Metrics)) assert.Equal(t, 0, len(acc.Metrics))
assert.Contains(t, err.Error(), "Error decoding JSON response")
} }

View File

@ -59,7 +59,7 @@ func TestReadsMetricsFromKafka(t *testing.T) {
waitForPoint(&acc, t) waitForPoint(&acc, t)
// Gather points // Gather points
err = k.Gather(&acc) err = acc.GatherError(k.Gather)
require.NoError(t, err) require.NoError(t, err)
if len(acc.Metrics) == 1 { if len(acc.Metrics) == 1 {
point := acc.Metrics[0] point := acc.Metrics[0]

View File

@ -92,7 +92,7 @@ func TestRunParserAndGather(t *testing.T) {
in <- saramaMsg(testMsg) in <- saramaMsg(testMsg)
acc.Wait(1) acc.Wait(1)
k.Gather(&acc) acc.GatherError(k.Gather)
assert.Equal(t, acc.NFields(), 1) assert.Equal(t, acc.NFields(), 1)
acc.AssertContainsFields(t, "cpu_load_short", acc.AssertContainsFields(t, "cpu_load_short",
@ -111,7 +111,7 @@ func TestRunParserAndGatherGraphite(t *testing.T) {
in <- saramaMsg(testMsgGraphite) in <- saramaMsg(testMsgGraphite)
acc.Wait(1) acc.Wait(1)
k.Gather(&acc) acc.GatherError(k.Gather)
assert.Equal(t, acc.NFields(), 1) assert.Equal(t, acc.NFields(), 1)
acc.AssertContainsFields(t, "cpu_load_short_graphite", acc.AssertContainsFields(t, "cpu_load_short_graphite",
@ -130,7 +130,7 @@ func TestRunParserAndGatherJSON(t *testing.T) {
in <- saramaMsg(testMsgJSON) in <- saramaMsg(testMsgJSON)
acc.Wait(1) acc.Wait(1)
k.Gather(&acc) acc.GatherError(k.Gather)
assert.Equal(t, acc.NFields(), 2) assert.Equal(t, acc.NFields(), 2)
acc.AssertContainsFields(t, "kafka_json_test", acc.AssertContainsFields(t, "kafka_json_test",

View File

@ -11,7 +11,6 @@ import (
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
) )
@ -72,14 +71,13 @@ func (k *Kubernetes) Description() string {
//Gather collects kubernetes metrics from a given URL //Gather collects kubernetes metrics from a given URL
func (k *Kubernetes) Gather(acc telegraf.Accumulator) error { func (k *Kubernetes) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup var wg sync.WaitGroup
errChan := errchan.New(1)
wg.Add(1) wg.Add(1)
go func(k *Kubernetes) { go func(k *Kubernetes) {
defer wg.Done() defer wg.Done()
errChan.C <- k.gatherSummary(k.URL, acc) acc.AddError(k.gatherSummary(k.URL, acc))
}(k) }(k)
wg.Wait() wg.Wait()
return errChan.Error() return nil
} }
func buildURL(endpoint string, base string) (*url.URL, error) { func buildURL(endpoint string, base string) (*url.URL, error) {

View File

@ -22,7 +22,7 @@ func TestKubernetesStats(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
err := k.Gather(&acc) err := acc.GatherError(k.Gather)
require.NoError(t, err) require.NoError(t, err)
fields := map[string]interface{}{ fields := map[string]interface{}{

View File

@ -154,15 +154,16 @@ func (l *LeoFS) Gather(acc telegraf.Accumulator) error {
return nil return nil
} }
var wg sync.WaitGroup var wg sync.WaitGroup
var outerr error
for _, endpoint := range l.Servers { for _, endpoint := range l.Servers {
_, err := url.Parse(endpoint) _, err := url.Parse(endpoint)
if err != nil { if err != nil {
return fmt.Errorf("Unable to parse the address:%s, err:%s", endpoint, err) acc.AddError(fmt.Errorf("Unable to parse the address:%s, err:%s", endpoint, err))
continue
} }
port, err := retrieveTokenAfterColon(endpoint) port, err := retrieveTokenAfterColon(endpoint)
if err != nil { if err != nil {
return err acc.AddError(err)
continue
} }
st, ok := serverTypeMapping[port] st, ok := serverTypeMapping[port]
if !ok { if !ok {
@ -171,11 +172,11 @@ func (l *LeoFS) Gather(acc telegraf.Accumulator) error {
wg.Add(1) wg.Add(1)
go func(endpoint string, st ServerType) { go func(endpoint string, st ServerType) {
defer wg.Done() defer wg.Done()
outerr = l.gatherServer(endpoint, st, acc) acc.AddError(l.gatherServer(endpoint, st, acc))
}(endpoint, st) }(endpoint, st)
} }
wg.Wait() wg.Wait()
return outerr return nil
} }
func (l *LeoFS) gatherServer( func (l *LeoFS) gatherServer(

View File

@ -146,7 +146,7 @@ func testMain(t *testing.T, code string, endpoint string, serverType ServerType)
var acc testutil.Accumulator var acc testutil.Accumulator
acc.SetDebug(true) acc.SetDebug(true)
err := l.Gather(&acc) err := acc.GatherError(l.Gather)
require.NoError(t, err) require.NoError(t, err)
floatMetrics := KeyMapping[serverType] floatMetrics := KeyMapping[serverType]

View File

@ -9,7 +9,6 @@ import (
"github.com/influxdata/tail" "github.com/influxdata/tail"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/internal/globpath"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
@ -118,14 +117,15 @@ func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error {
} }
// compile log parser patterns: // compile log parser patterns:
errChan := errchan.New(len(l.parsers)) var haveError bool
for _, parser := range l.parsers { for _, parser := range l.parsers {
if err := parser.Compile(); err != nil { if err := parser.Compile(); err != nil {
errChan.C <- err acc.AddError(err)
haveError = true
} }
} }
if err := errChan.Error(); err != nil { if haveError {
return err return nil
} }
l.wg.Add(1) l.wg.Add(1)
@ -143,8 +143,6 @@ func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error {
seek.Offset = 0 seek.Offset = 0
} }
errChan := errchan.New(len(l.Files))
// Create a "tailer" for each file // Create a "tailer" for each file
for _, filepath := range l.Files { for _, filepath := range l.Files {
g, err := globpath.Compile(filepath) g, err := globpath.Compile(filepath)
@ -153,7 +151,6 @@ func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error {
continue continue
} }
files := g.Match() files := g.Match()
errChan = errchan.New(len(files))
for file, _ := range files { for file, _ := range files {
if _, ok := l.tailers[file]; ok { if _, ok := l.tailers[file]; ok {
@ -168,7 +165,7 @@ func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error {
Location: &seek, Location: &seek,
MustExist: true, MustExist: true,
}) })
errChan.C <- err l.acc.AddError(err)
// create a goroutine for each "tailer" // create a goroutine for each "tailer"
l.wg.Add(1) l.wg.Add(1)
@ -177,7 +174,7 @@ func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error {
} }
} }
return errChan.Error() return nil
} }
// receiver is launched as a goroutine to continuously watch a tailed logfile // receiver is launched as a goroutine to continuously watch a tailed logfile

View File

@ -38,7 +38,10 @@ func TestGrokParseLogFilesNonExistPattern(t *testing.T) {
} }
acc := testutil.Accumulator{} acc := testutil.Accumulator{}
assert.Error(t, logparser.Start(&acc)) logparser.Start(&acc)
if assert.NotEmpty(t, acc.Errors) {
assert.Error(t, acc.Errors[0])
}
logparser.Stop() logparser.Stop()
} }
@ -106,7 +109,7 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) {
os.Symlink( os.Symlink(
thisdir+"grok/testdata/test_a.log", thisdir+"grok/testdata/test_a.log",
emptydir+"/test_a.log") emptydir+"/test_a.log")
assert.NoError(t, logparser.Gather(&acc)) assert.NoError(t, acc.GatherError(logparser.Gather))
acc.Wait(1) acc.Wait(1)
logparser.Stop() logparser.Stop()

View File

@ -140,7 +140,7 @@ func TestMailChimpGatherReport(t *testing.T) {
} }
func TestMailChimpGatherError(t *testing.T) { func TestMailChimpGatherErroror(t *testing.T) {
ts := httptest.NewServer( ts := httptest.NewServer(
http.HandlerFunc( http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) { func(w http.ResponseWriter, r *http.Request) {

View File

@ -9,7 +9,6 @@ import (
"time" "time"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
) )
@ -73,16 +72,15 @@ func (m *Memcached) Gather(acc telegraf.Accumulator) error {
return m.gatherServer(":11211", false, acc) return m.gatherServer(":11211", false, acc)
} }
errChan := errchan.New(len(m.Servers) + len(m.UnixSockets))
for _, serverAddress := range m.Servers { for _, serverAddress := range m.Servers {
errChan.C <- m.gatherServer(serverAddress, false, acc) acc.AddError(m.gatherServer(serverAddress, false, acc))
} }
for _, unixAddress := range m.UnixSockets { for _, unixAddress := range m.UnixSockets {
errChan.C <- m.gatherServer(unixAddress, true, acc) acc.AddError(m.gatherServer(unixAddress, true, acc))
} }
return errChan.Error() return nil
} }
func (m *Memcached) gatherServer( func (m *Memcached) gatherServer(

View File

@ -21,7 +21,7 @@ func TestMemcachedGeneratesMetrics(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
err := m.Gather(&acc) err := acc.GatherError(m.Gather)
require.NoError(t, err) require.NoError(t, err)
intMetrics := []string{"get_hits", "get_misses", "evictions", intMetrics := []string{"get_hits", "get_misses", "evictions",

View File

@ -8,7 +8,6 @@ import (
"net" "net"
"net/http" "net/http"
"strconv" "strconv"
"strings"
"sync" "sync"
"time" "time"
@ -96,16 +95,13 @@ func (m *Mesos) SetDefaults() {
// Gather() metrics from given list of Mesos Masters // Gather() metrics from given list of Mesos Masters
func (m *Mesos) Gather(acc telegraf.Accumulator) error { func (m *Mesos) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup var wg sync.WaitGroup
var errorChannel chan error
m.SetDefaults() m.SetDefaults()
errorChannel = make(chan error, len(m.Masters)+2*len(m.Slaves))
for _, v := range m.Masters { for _, v := range m.Masters {
wg.Add(1) wg.Add(1)
go func(c string) { go func(c string) {
errorChannel <- m.gatherMainMetrics(c, ":5050", MASTER, acc) acc.AddError(m.gatherMainMetrics(c, ":5050", MASTER, acc))
wg.Done() wg.Done()
return return
}(v) }(v)
@ -114,7 +110,7 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error {
for _, v := range m.Slaves { for _, v := range m.Slaves {
wg.Add(1) wg.Add(1)
go func(c string) { go func(c string) {
errorChannel <- m.gatherMainMetrics(c, ":5051", SLAVE, acc) acc.AddError(m.gatherMainMetrics(c, ":5051", SLAVE, acc))
wg.Done() wg.Done()
return return
}(v) }(v)
@ -125,26 +121,14 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error {
// wg.Add(1) // wg.Add(1)
// go func(c string) { // go func(c string) {
// errorChannel <- m.gatherSlaveTaskMetrics(c, ":5051", acc) // acc.AddError(m.gatherSlaveTaskMetrics(c, ":5051", acc))
// wg.Done() // wg.Done()
// return // return
// }(v) // }(v)
} }
wg.Wait() wg.Wait()
close(errorChannel)
errorStrings := []string{}
// Gather all errors for returning them at once
for err := range errorChannel {
if err != nil {
errorStrings = append(errorStrings, err.Error())
}
}
if len(errorStrings) > 0 {
return errors.New(strings.Join(errorStrings, "\n"))
}
return nil return nil
} }

View File

@ -282,7 +282,7 @@ func TestMesosMaster(t *testing.T) {
Timeout: 10, Timeout: 10,
} }
err := m.Gather(&acc) err := acc.GatherError(m.Gather)
if err != nil { if err != nil {
t.Errorf(err.Error()) t.Errorf(err.Error())
@ -330,7 +330,7 @@ func TestMesosSlave(t *testing.T) {
Timeout: 10, Timeout: 10,
} }
err := m.Gather(&acc) err := acc.GatherError(m.Gather)
if err != nil { if err != nil {
t.Errorf(err.Error()) t.Errorf(err.Error())

View File

@ -11,7 +11,6 @@ import (
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
"gopkg.in/mgo.v2" "gopkg.in/mgo.v2"
) )
@ -73,11 +72,11 @@ func (m *MongoDB) Gather(acc telegraf.Accumulator) error {
} }
var wg sync.WaitGroup var wg sync.WaitGroup
errChan := errchan.New(len(m.Servers))
for _, serv := range m.Servers { for _, serv := range m.Servers {
u, err := url.Parse(serv) u, err := url.Parse(serv)
if err != nil { if err != nil {
return fmt.Errorf("Unable to parse to address '%s': %s", serv, err) acc.AddError(fmt.Errorf("Unable to parse to address '%s': %s", serv, err))
continue
} else if u.Scheme == "" { } else if u.Scheme == "" {
u.Scheme = "mongodb" u.Scheme = "mongodb"
// fallback to simple string based address (i.e. "10.0.0.1:10000") // fallback to simple string based address (i.e. "10.0.0.1:10000")
@ -89,12 +88,12 @@ func (m *MongoDB) Gather(acc telegraf.Accumulator) error {
wg.Add(1) wg.Add(1)
go func(srv *Server) { go func(srv *Server) {
defer wg.Done() defer wg.Done()
errChan.C <- m.gatherServer(srv, acc) acc.AddError(m.gatherServer(srv, acc))
}(m.getMongoServer(u)) }(m.getMongoServer(u))
} }
wg.Wait() wg.Wait()
return errChan.Error() return nil
} }
func (m *MongoDB) getMongoServer(url *url.URL) *Server { func (m *MongoDB) getMongoServer(url *url.URL) *Server {

View File

@ -11,7 +11,6 @@ import (
"time" "time"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
"github.com/go-sql-driver/mysql" "github.com/go-sql-driver/mysql"
@ -137,19 +136,18 @@ func (m *Mysql) Gather(acc telegraf.Accumulator) error {
m.InitMysql() m.InitMysql()
} }
var wg sync.WaitGroup var wg sync.WaitGroup
errChan := errchan.New(len(m.Servers))
// Loop through each server and collect metrics // Loop through each server and collect metrics
for _, server := range m.Servers { for _, server := range m.Servers {
wg.Add(1) wg.Add(1)
go func(s string) { go func(s string) {
defer wg.Done() defer wg.Done()
errChan.C <- m.gatherServer(s, acc) acc.AddError(m.gatherServer(s, acc))
}(server) }(server)
} }
wg.Wait() wg.Wait()
return errChan.Error() return nil
} }
type mapping struct { type mapping struct {

View File

@ -12,7 +12,6 @@ import (
"time" "time"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
) )
@ -35,23 +34,22 @@ func (n *Nginx) Description() string {
func (n *Nginx) Gather(acc telegraf.Accumulator) error { func (n *Nginx) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup var wg sync.WaitGroup
errChan := errchan.New(len(n.Urls))
for _, u := range n.Urls { for _, u := range n.Urls {
addr, err := url.Parse(u) addr, err := url.Parse(u)
if err != nil { if err != nil {
return fmt.Errorf("Unable to parse address '%s': %s", u, err) acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err))
} }
wg.Add(1) wg.Add(1)
go func(addr *url.URL) { go func(addr *url.URL) {
defer wg.Done() defer wg.Done()
errChan.C <- n.gatherUrl(addr, acc) acc.AddError(n.gatherUrl(addr, acc))
}(addr) }(addr)
} }
wg.Wait() wg.Wait()
return errChan.Error() return nil
} }
var tr = &http.Transport{ var tr = &http.Transport{

View File

@ -64,8 +64,8 @@ func TestNginxGeneratesMetrics(t *testing.T) {
var acc_nginx testutil.Accumulator var acc_nginx testutil.Accumulator
var acc_tengine testutil.Accumulator var acc_tengine testutil.Accumulator
err_nginx := n.Gather(&acc_nginx) err_nginx := acc_nginx.GatherError(n.Gather)
err_tengine := nt.Gather(&acc_tengine) err_tengine := acc_tengine.GatherError(nt.Gather)
require.NoError(t, err_nginx) require.NoError(t, err_nginx)
require.NoError(t, err_tengine) require.NoError(t, err_tengine)

View File

@ -32,7 +32,6 @@ import (
"time" "time"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
) )
@ -66,17 +65,16 @@ func (n *NSQ) Description() string {
func (n *NSQ) Gather(acc telegraf.Accumulator) error { func (n *NSQ) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup var wg sync.WaitGroup
errChan := errchan.New(len(n.Endpoints))
for _, e := range n.Endpoints { for _, e := range n.Endpoints {
wg.Add(1) wg.Add(1)
go func(e string) { go func(e string) {
defer wg.Done() defer wg.Done()
errChan.C <- n.gatherEndpoint(e, acc) acc.AddError(n.gatherEndpoint(e, acc))
}(e) }(e)
} }
wg.Wait() wg.Wait()
return errChan.Error() return nil
} }
var tr = &http.Transport{ var tr = &http.Transport{

View File

@ -24,7 +24,7 @@ func TestNSQStats(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
err := n.Gather(&acc) err := acc.GatherError(n.Gather)
require.NoError(t, err) require.NoError(t, err)
u, err := url.Parse(ts.URL) u, err := url.Parse(ts.URL)

View File

@ -1,7 +1,7 @@
package nsq_consumer package nsq_consumer
import ( import (
"log" "fmt"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
@ -62,7 +62,7 @@ func (n *NSQConsumer) Start(acc telegraf.Accumulator) error {
n.consumer.AddConcurrentHandlers(nsq.HandlerFunc(func(message *nsq.Message) error { n.consumer.AddConcurrentHandlers(nsq.HandlerFunc(func(message *nsq.Message) error {
metrics, err := n.parser.Parse(message.Body) metrics, err := n.parser.Parse(message.Body)
if err != nil { if err != nil {
log.Printf("E! NSQConsumer Parse Error\nmessage:%s\nerror:%s", string(message.Body), err.Error()) acc.AddError(fmt.Errorf("E! NSQConsumer Parse Error\nmessage:%s\nerror:%s", string(message.Body), err.Error()))
return nil return nil
} }
for _, metric := range metrics { for _, metric := range metrics {

View File

@ -5,7 +5,7 @@ package ntpq
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"log" "fmt"
"os/exec" "os/exec"
"strconv" "strconv"
"strings" "strings"
@ -132,7 +132,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
case strings.HasSuffix(when, "h"): case strings.HasSuffix(when, "h"):
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "h")) m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "h"))
if err != nil { if err != nil {
log.Printf("E! Error ntpq: parsing int: %s", fields[index]) acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index]))
continue continue
} }
// seconds in an hour // seconds in an hour
@ -141,7 +141,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
case strings.HasSuffix(when, "d"): case strings.HasSuffix(when, "d"):
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "d")) m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "d"))
if err != nil { if err != nil {
log.Printf("E! Error ntpq: parsing int: %s", fields[index]) acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index]))
continue continue
} }
// seconds in a day // seconds in a day
@ -150,7 +150,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
case strings.HasSuffix(when, "m"): case strings.HasSuffix(when, "m"):
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "m")) m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "m"))
if err != nil { if err != nil {
log.Printf("E! Error ntpq: parsing int: %s", fields[index]) acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index]))
continue continue
} }
// seconds in a day // seconds in a day
@ -161,7 +161,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
m, err := strconv.Atoi(fields[index]) m, err := strconv.Atoi(fields[index])
if err != nil { if err != nil {
log.Printf("E! Error ntpq: parsing int: %s", fields[index]) acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index]))
continue continue
} }
mFields[key] = int64(m) mFields[key] = int64(m)
@ -178,7 +178,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
m, err := strconv.ParseFloat(fields[index], 64) m, err := strconv.ParseFloat(fields[index], 64)
if err != nil { if err != nil {
log.Printf("E! Error ntpq: parsing float: %s", fields[index]) acc.AddError(fmt.Errorf("E! Error ntpq: parsing float: %s", fields[index]))
continue continue
} }
mFields[key] = m mFields[key] = m

View File

@ -21,7 +21,7 @@ func TestSingleNTPQ(t *testing.T) {
} }
acc := testutil.Accumulator{} acc := testutil.Accumulator{}
assert.NoError(t, n.Gather(&acc)) assert.NoError(t, acc.GatherError(n.Gather))
fields := map[string]interface{}{ fields := map[string]interface{}{
"when": int64(101), "when": int64(101),
@ -51,7 +51,7 @@ func TestMissingJitterField(t *testing.T) {
} }
acc := testutil.Accumulator{} acc := testutil.Accumulator{}
assert.NoError(t, n.Gather(&acc)) assert.NoError(t, acc.GatherError(n.Gather))
fields := map[string]interface{}{ fields := map[string]interface{}{
"when": int64(101), "when": int64(101),
@ -80,7 +80,7 @@ func TestBadIntNTPQ(t *testing.T) {
} }
acc := testutil.Accumulator{} acc := testutil.Accumulator{}
assert.NoError(t, n.Gather(&acc)) assert.Error(t, acc.GatherError(n.Gather))
fields := map[string]interface{}{ fields := map[string]interface{}{
"when": int64(101), "when": int64(101),
@ -109,7 +109,7 @@ func TestBadFloatNTPQ(t *testing.T) {
} }
acc := testutil.Accumulator{} acc := testutil.Accumulator{}
assert.NoError(t, n.Gather(&acc)) assert.Error(t, acc.GatherError(n.Gather))
fields := map[string]interface{}{ fields := map[string]interface{}{
"when": int64(2), "when": int64(2),
@ -138,7 +138,7 @@ func TestDaysNTPQ(t *testing.T) {
} }
acc := testutil.Accumulator{} acc := testutil.Accumulator{}
assert.NoError(t, n.Gather(&acc)) assert.NoError(t, acc.GatherError(n.Gather))
fields := map[string]interface{}{ fields := map[string]interface{}{
"when": int64(172800), "when": int64(172800),
@ -168,7 +168,7 @@ func TestHoursNTPQ(t *testing.T) {
} }
acc := testutil.Accumulator{} acc := testutil.Accumulator{}
assert.NoError(t, n.Gather(&acc)) assert.NoError(t, acc.GatherError(n.Gather))
fields := map[string]interface{}{ fields := map[string]interface{}{
"when": int64(7200), "when": int64(7200),
@ -198,7 +198,7 @@ func TestMinutesNTPQ(t *testing.T) {
} }
acc := testutil.Accumulator{} acc := testutil.Accumulator{}
assert.NoError(t, n.Gather(&acc)) assert.NoError(t, acc.GatherError(n.Gather))
fields := map[string]interface{}{ fields := map[string]interface{}{
"when": int64(120), "when": int64(120),
@ -228,7 +228,7 @@ func TestBadWhenNTPQ(t *testing.T) {
} }
acc := testutil.Accumulator{} acc := testutil.Accumulator{}
assert.NoError(t, n.Gather(&acc)) assert.Error(t, acc.GatherError(n.Gather))
fields := map[string]interface{}{ fields := map[string]interface{}{
"poll": int64(256), "poll": int64(256),
@ -257,7 +257,7 @@ func TestMultiNTPQ(t *testing.T) {
} }
acc := testutil.Accumulator{} acc := testutil.Accumulator{}
assert.NoError(t, n.Gather(&acc)) assert.NoError(t, acc.GatherError(n.Gather))
fields := map[string]interface{}{ fields := map[string]interface{}{
"delay": float64(54.033), "delay": float64(54.033),
@ -303,7 +303,7 @@ func TestBadHeaderNTPQ(t *testing.T) {
} }
acc := testutil.Accumulator{} acc := testutil.Accumulator{}
assert.NoError(t, n.Gather(&acc)) assert.NoError(t, acc.GatherError(n.Gather))
fields := map[string]interface{}{ fields := map[string]interface{}{
"when": int64(101), "when": int64(101),
@ -333,7 +333,7 @@ func TestMissingDelayColumnNTPQ(t *testing.T) {
} }
acc := testutil.Accumulator{} acc := testutil.Accumulator{}
assert.NoError(t, n.Gather(&acc)) assert.NoError(t, acc.GatherError(n.Gather))
fields := map[string]interface{}{ fields := map[string]interface{}{
"when": int64(101), "when": int64(101),
@ -361,7 +361,7 @@ func TestFailedNTPQ(t *testing.T) {
} }
acc := testutil.Accumulator{} acc := testutil.Accumulator{}
assert.Error(t, n.Gather(&acc)) assert.Error(t, acc.GatherError(n.Gather))
} }
type tester struct { type tester struct {

View File

@ -80,19 +80,17 @@ func (g *phpfpm) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup var wg sync.WaitGroup
var outerr error
for _, serv := range g.Urls { for _, serv := range g.Urls {
wg.Add(1) wg.Add(1)
go func(serv string) { go func(serv string) {
defer wg.Done() defer wg.Done()
outerr = g.gatherServer(serv, acc) acc.AddError(g.gatherServer(serv, acc))
}(serv) }(serv)
} }
wg.Wait() wg.Wait()
return outerr return nil
} }
// Request status page to get stat raw data and import it // Request status page to get stat raw data and import it

View File

@ -35,7 +35,7 @@ func TestPhpFpmGeneratesMetrics_From_Http(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
err := r.Gather(&acc) err := acc.GatherError(r.Gather)
require.NoError(t, err) require.NoError(t, err)
tags := map[string]string{ tags := map[string]string{
@ -75,7 +75,7 @@ func TestPhpFpmGeneratesMetrics_From_Fcgi(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
err = r.Gather(&acc) err = acc.GatherError(r.Gather)
require.NoError(t, err) require.NoError(t, err)
tags := map[string]string{ tags := map[string]string{
@ -119,7 +119,7 @@ func TestPhpFpmGeneratesMetrics_From_Socket(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
err = r.Gather(&acc) err = acc.GatherError(r.Gather)
require.NoError(t, err) require.NoError(t, err)
tags := map[string]string{ tags := map[string]string{
@ -163,7 +163,7 @@ func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
err = r.Gather(&acc) err = acc.GatherError(r.Gather)
require.NoError(t, err) require.NoError(t, err)
tags := map[string]string{ tags := map[string]string{
@ -193,7 +193,7 @@ func TestPhpFpmDefaultGetFromLocalhost(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
err := r.Gather(&acc) err := acc.GatherError(r.Gather)
require.Error(t, err) require.Error(t, err)
assert.Contains(t, err.Error(), "127.0.0.1/status") assert.Contains(t, err.Error(), "127.0.0.1/status")
} }
@ -205,7 +205,7 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t
var acc testutil.Accumulator var acc testutil.Accumulator
err := r.Gather(&acc) err := acc.GatherError(r.Gather)
require.Error(t, err) require.Error(t, err)
assert.Contains(t, err.Error(), `Unable to connect to phpfpm status page 'http://aninvalidone': Get http://aninvalidone: dial tcp: lookup aninvalidone`) assert.Contains(t, err.Error(), `Unable to connect to phpfpm status page 'http://aninvalidone': Get http://aninvalidone: dial tcp: lookup aninvalidone`)
} }
@ -217,7 +217,7 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testi
var acc testutil.Accumulator var acc testutil.Accumulator
err := r.Gather(&acc) err := acc.GatherError(r.Gather)
require.Error(t, err) require.Error(t, err)
assert.Equal(t, `Socket doesn't exist '/tmp/invalid.sock': stat /tmp/invalid.sock: no such file or directory`, err.Error()) assert.Equal(t, `Socket doesn't exist '/tmp/invalid.sock': stat /tmp/invalid.sock: no such file or directory`, err.Error())

View File

@ -68,7 +68,6 @@ func (_ *Ping) SampleConfig() string {
func (p *Ping) Gather(acc telegraf.Accumulator) error { func (p *Ping) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup var wg sync.WaitGroup
errorChannel := make(chan error, len(p.Urls)*2)
// Spin off a go routine for each url to ping // Spin off a go routine for each url to ping
for _, url := range p.Urls { for _, url := range p.Urls {
@ -80,14 +79,14 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error {
out, err := p.pingHost(totalTimeout, args...) out, err := p.pingHost(totalTimeout, args...)
if err != nil { if err != nil {
// Combine go err + stderr output // Combine go err + stderr output
errorChannel <- errors.New( acc.AddError(errors.New(
strings.TrimSpace(out) + ", " + err.Error()) strings.TrimSpace(out) + ", " + err.Error()))
} }
tags := map[string]string{"url": u} tags := map[string]string{"url": u}
trans, rec, avg, stddev, err := processPingOutput(out) trans, rec, avg, stddev, err := processPingOutput(out)
if err != nil { if err != nil {
// fatal error // fatal error
errorChannel <- err acc.AddError(err)
return return
} }
// Calculate packet loss percentage // Calculate packet loss percentage
@ -108,18 +107,8 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error {
} }
wg.Wait() wg.Wait()
close(errorChannel)
// Get all errors and return them as one giant error
errorStrings := []string{}
for err := range errorChannel {
errorStrings = append(errorStrings, err.Error())
}
if len(errorStrings) == 0 {
return nil return nil
}
return errors.New(strings.Join(errorStrings, "\n"))
} }
func hostPinger(timeout float64, args ...string) (string, error) { func hostPinger(timeout float64, args ...string) (string, error) {

View File

@ -144,7 +144,7 @@ func TestPingGather(t *testing.T) {
pingHost: mockHostPinger, pingHost: mockHostPinger,
} }
p.Gather(&acc) acc.GatherError(p.Gather)
tags := map[string]string{"url": "www.google.com"} tags := map[string]string{"url": "www.google.com"}
fields := map[string]interface{}{ fields := map[string]interface{}{
"packets_transmitted": 5, "packets_transmitted": 5,
@ -182,7 +182,7 @@ func TestLossyPingGather(t *testing.T) {
pingHost: mockLossyHostPinger, pingHost: mockLossyHostPinger,
} }
p.Gather(&acc) acc.GatherError(p.Gather)
tags := map[string]string{"url": "www.google.com"} tags := map[string]string{"url": "www.google.com"}
fields := map[string]interface{}{ fields := map[string]interface{}{
"packets_transmitted": 5, "packets_transmitted": 5,
@ -215,7 +215,7 @@ func TestBadPingGather(t *testing.T) {
pingHost: mockErrorHostPinger, pingHost: mockErrorHostPinger,
} }
p.Gather(&acc) acc.GatherError(p.Gather)
tags := map[string]string{"url": "www.amazon.com"} tags := map[string]string{"url": "www.amazon.com"}
fields := map[string]interface{}{ fields := map[string]interface{}{
"packets_transmitted": 2, "packets_transmitted": 2,
@ -237,7 +237,7 @@ func TestFatalPingGather(t *testing.T) {
pingHost: mockFatalHostPinger, pingHost: mockFatalHostPinger,
} }
p.Gather(&acc) acc.GatherError(p.Gather)
assert.False(t, acc.HasMeasurement("packets_transmitted"), assert.False(t, acc.HasMeasurement("packets_transmitted"),
"Fatal ping should not have packet measurements") "Fatal ping should not have packet measurements")
assert.False(t, acc.HasMeasurement("packets_received"), assert.False(t, acc.HasMeasurement("packets_received"),

View File

@ -69,7 +69,7 @@ func TestPingGather(t *testing.T) {
pingHost: mockHostPinger, pingHost: mockHostPinger,
} }
p.Gather(&acc) acc.GatherError(p.Gather)
tags := map[string]string{"url": "www.google.com"} tags := map[string]string{"url": "www.google.com"}
fields := map[string]interface{}{ fields := map[string]interface{}{
"packets_transmitted": 4, "packets_transmitted": 4,
@ -112,7 +112,7 @@ func TestBadPingGather(t *testing.T) {
pingHost: mockErrorHostPinger, pingHost: mockErrorHostPinger,
} }
p.Gather(&acc) acc.GatherError(p.Gather)
tags := map[string]string{"url": "www.amazon.com"} tags := map[string]string{"url": "www.amazon.com"}
fields := map[string]interface{}{ fields := map[string]interface{}{
"packets_transmitted": 4, "packets_transmitted": 4,
@ -155,7 +155,7 @@ func TestLossyPingGather(t *testing.T) {
pingHost: mockLossyHostPinger, pingHost: mockLossyHostPinger,
} }
p.Gather(&acc) acc.GatherError(p.Gather)
tags := map[string]string{"url": "www.google.com"} tags := map[string]string{"url": "www.google.com"}
fields := map[string]interface{}{ fields := map[string]interface{}{
"packets_transmitted": 9, "packets_transmitted": 9,
@ -214,7 +214,7 @@ func TestFatalPingGather(t *testing.T) {
pingHost: mockFatalHostPinger, pingHost: mockFatalHostPinger,
} }
p.Gather(&acc) acc.GatherError(p.Gather)
assert.True(t, acc.HasFloatField("ping", "errors"), assert.True(t, acc.HasFloatField("ping", "errors"),
"Fatal ping should have packet measurements") "Fatal ping should have packet measurements")
assert.False(t, acc.HasIntField("ping", "packets_transmitted"), assert.False(t, acc.HasIntField("ping", "packets_transmitted"),
@ -259,7 +259,7 @@ func TestUnreachablePingGather(t *testing.T) {
pingHost: mockUnreachableHostPinger, pingHost: mockUnreachableHostPinger,
} }
p.Gather(&acc) acc.GatherError(p.Gather)
tags := map[string]string{"url": "www.google.com"} tags := map[string]string{"url": "www.google.com"}
fields := map[string]interface{}{ fields := map[string]interface{}{
@ -305,7 +305,7 @@ func TestTTLExpiredPingGather(t *testing.T) {
pingHost: mockTTLExpiredPinger, pingHost: mockTTLExpiredPinger,
} }
p.Gather(&acc) acc.GatherError(p.Gather)
tags := map[string]string{"url": "www.google.com"} tags := map[string]string{"url": "www.google.com"}
fields := map[string]interface{}{ fields := map[string]interface{}{

View File

@ -170,7 +170,8 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
if p.Query[i].Version <= db_version { if p.Query[i].Version <= db_version {
rows, err := db.Query(sql_query) rows, err := db.Query(sql_query)
if err != nil { if err != nil {
return err acc.AddError(err)
continue
} }
defer rows.Close() defer rows.Close()
@ -178,7 +179,8 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
// grab the column information from the result // grab the column information from the result
p.OrderedColumns, err = rows.Columns() p.OrderedColumns, err = rows.Columns()
if err != nil { if err != nil {
return err acc.AddError(err)
continue
} else { } else {
for _, v := range p.OrderedColumns { for _, v := range p.OrderedColumns {
p.AllColumns = append(p.AllColumns, v) p.AllColumns = append(p.AllColumns, v)
@ -195,7 +197,8 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
for rows.Next() { for rows.Next() {
err = p.accRow(meas_name, rows, acc) err = p.accRow(meas_name, rows, acc)
if err != nil { if err != nil {
return err acc.AddError(err)
break
} }
} }
} }

View File

@ -26,7 +26,7 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) {
}, },
} }
var acc testutil.Accumulator var acc testutil.Accumulator
err := p.Gather(&acc) err := acc.GatherError(p.Gather)
require.NoError(t, err) require.NoError(t, err)
availableColumns := make(map[string]bool) availableColumns := make(map[string]bool)
@ -114,7 +114,7 @@ func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
err := p.Gather(&acc) err := acc.GatherError(p.Gather)
require.NoError(t, err) require.NoError(t, err)
for col := range p.IgnoredColumns() { for col := range p.IgnoredColumns() {

View File

@ -41,7 +41,7 @@ func (p *Powerdns) Gather(acc telegraf.Accumulator) error {
for _, serverSocket := range p.UnixSockets { for _, serverSocket := range p.UnixSockets {
if err := p.gatherServer(serverSocket, acc); err != nil { if err := p.gatherServer(serverSocket, acc); err != nil {
return err acc.AddError(err)
} }
} }

View File

@ -90,7 +90,7 @@ func TestMemcachedGeneratesMetrics(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
err = p.Gather(&acc) err = acc.GatherError(p.Gather)
require.NoError(t, err) require.NoError(t, err)
intMetrics := []string{"corrupt-packets", "deferred-cache-inserts", intMetrics := []string{"corrupt-packets", "deferred-cache-inserts",

View File

@ -71,9 +71,8 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error {
procs, err := p.updateProcesses(p.procs) procs, err := p.updateProcesses(p.procs)
if err != nil { if err != nil {
return fmt.Errorf( acc.AddError(fmt.Errorf("E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s",
"E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s", p.Exe, p.PidFile, p.Pattern, p.User, err.Error()))
p.Exe, p.PidFile, p.Pattern, p.User, err.Error())
} }
p.procs = procs p.procs = procs

View File

@ -108,7 +108,7 @@ func TestGather_CreateProcessErrorOk(t *testing.T) {
return nil, fmt.Errorf("createProcess error") return nil, fmt.Errorf("createProcess error")
}, },
} }
require.NoError(t, p.Gather(&acc)) require.NoError(t, acc.GatherError(p.Gather))
} }
func TestGather_CreatePIDFinderError(t *testing.T) { func TestGather_CreatePIDFinderError(t *testing.T) {
@ -120,7 +120,7 @@ func TestGather_CreatePIDFinderError(t *testing.T) {
}, },
createProcess: newTestProc, createProcess: newTestProc,
} }
require.Error(t, p.Gather(&acc)) require.Error(t, acc.GatherError(p.Gather))
} }
func TestGather_ProcessName(t *testing.T) { func TestGather_ProcessName(t *testing.T) {
@ -132,7 +132,7 @@ func TestGather_ProcessName(t *testing.T) {
createPIDFinder: pidFinder([]PID{pid}, nil), createPIDFinder: pidFinder([]PID{pid}, nil),
createProcess: newTestProc, createProcess: newTestProc,
} }
require.NoError(t, p.Gather(&acc)) require.NoError(t, acc.GatherError(p.Gather))
assert.Equal(t, "custom_name", acc.TagValue("procstat", "process_name")) assert.Equal(t, "custom_name", acc.TagValue("procstat", "process_name"))
} }
@ -146,7 +146,7 @@ func TestGather_NoProcessNameUsesReal(t *testing.T) {
createPIDFinder: pidFinder([]PID{pid}, nil), createPIDFinder: pidFinder([]PID{pid}, nil),
createProcess: newTestProc, createProcess: newTestProc,
} }
require.NoError(t, p.Gather(&acc)) require.NoError(t, acc.GatherError(p.Gather))
assert.True(t, acc.HasTag("procstat", "process_name")) assert.True(t, acc.HasTag("procstat", "process_name"))
} }
@ -159,7 +159,7 @@ func TestGather_NoPidTag(t *testing.T) {
createPIDFinder: pidFinder([]PID{pid}, nil), createPIDFinder: pidFinder([]PID{pid}, nil),
createProcess: newTestProc, createProcess: newTestProc,
} }
require.NoError(t, p.Gather(&acc)) require.NoError(t, acc.GatherError(p.Gather))
assert.True(t, acc.HasInt32Field("procstat", "pid")) assert.True(t, acc.HasInt32Field("procstat", "pid"))
assert.False(t, acc.HasTag("procstat", "pid")) assert.False(t, acc.HasTag("procstat", "pid"))
} }
@ -173,7 +173,7 @@ func TestGather_PidTag(t *testing.T) {
createPIDFinder: pidFinder([]PID{pid}, nil), createPIDFinder: pidFinder([]PID{pid}, nil),
createProcess: newTestProc, createProcess: newTestProc,
} }
require.NoError(t, p.Gather(&acc)) require.NoError(t, acc.GatherError(p.Gather))
assert.Equal(t, "42", acc.TagValue("procstat", "pid")) assert.Equal(t, "42", acc.TagValue("procstat", "pid"))
assert.False(t, acc.HasInt32Field("procstat", "pid")) assert.False(t, acc.HasInt32Field("procstat", "pid"))
} }
@ -187,7 +187,7 @@ func TestGather_Prefix(t *testing.T) {
createPIDFinder: pidFinder([]PID{pid}, nil), createPIDFinder: pidFinder([]PID{pid}, nil),
createProcess: newTestProc, createProcess: newTestProc,
} }
require.NoError(t, p.Gather(&acc)) require.NoError(t, acc.GatherError(p.Gather))
assert.True(t, acc.HasInt32Field("procstat", "custom_prefix_num_fds")) assert.True(t, acc.HasInt32Field("procstat", "custom_prefix_num_fds"))
} }
@ -199,7 +199,7 @@ func TestGather_Exe(t *testing.T) {
createPIDFinder: pidFinder([]PID{pid}, nil), createPIDFinder: pidFinder([]PID{pid}, nil),
createProcess: newTestProc, createProcess: newTestProc,
} }
require.NoError(t, p.Gather(&acc)) require.NoError(t, acc.GatherError(p.Gather))
assert.Equal(t, exe, acc.TagValue("procstat", "exe")) assert.Equal(t, exe, acc.TagValue("procstat", "exe"))
} }
@ -213,7 +213,7 @@ func TestGather_User(t *testing.T) {
createPIDFinder: pidFinder([]PID{pid}, nil), createPIDFinder: pidFinder([]PID{pid}, nil),
createProcess: newTestProc, createProcess: newTestProc,
} }
require.NoError(t, p.Gather(&acc)) require.NoError(t, acc.GatherError(p.Gather))
assert.Equal(t, user, acc.TagValue("procstat", "user")) assert.Equal(t, user, acc.TagValue("procstat", "user"))
} }
@ -227,7 +227,7 @@ func TestGather_Pattern(t *testing.T) {
createPIDFinder: pidFinder([]PID{pid}, nil), createPIDFinder: pidFinder([]PID{pid}, nil),
createProcess: newTestProc, createProcess: newTestProc,
} }
require.NoError(t, p.Gather(&acc)) require.NoError(t, acc.GatherError(p.Gather))
assert.Equal(t, pattern, acc.TagValue("procstat", "pattern")) assert.Equal(t, pattern, acc.TagValue("procstat", "pattern"))
} }
@ -239,7 +239,7 @@ func TestGather_MissingPidMethod(t *testing.T) {
createPIDFinder: pidFinder([]PID{pid}, nil), createPIDFinder: pidFinder([]PID{pid}, nil),
createProcess: newTestProc, createProcess: newTestProc,
} }
require.Error(t, p.Gather(&acc)) require.Error(t, acc.GatherError(p.Gather))
} }
func TestGather_PidFile(t *testing.T) { func TestGather_PidFile(t *testing.T) {
@ -251,7 +251,7 @@ func TestGather_PidFile(t *testing.T) {
createPIDFinder: pidFinder([]PID{pid}, nil), createPIDFinder: pidFinder([]PID{pid}, nil),
createProcess: newTestProc, createProcess: newTestProc,
} }
require.NoError(t, p.Gather(&acc)) require.NoError(t, acc.GatherError(p.Gather))
assert.Equal(t, pidfile, acc.TagValue("procstat", "pidfile")) assert.Equal(t, pidfile, acc.TagValue("procstat", "pidfile"))
} }
@ -266,7 +266,7 @@ func TestGather_PercentFirstPass(t *testing.T) {
createPIDFinder: pidFinder([]PID{pid}, nil), createPIDFinder: pidFinder([]PID{pid}, nil),
createProcess: NewProc, createProcess: NewProc,
} }
require.NoError(t, p.Gather(&acc)) require.NoError(t, acc.GatherError(p.Gather))
assert.True(t, acc.HasFloatField("procstat", "cpu_time_user")) assert.True(t, acc.HasFloatField("procstat", "cpu_time_user"))
assert.False(t, acc.HasFloatField("procstat", "cpu_usage")) assert.False(t, acc.HasFloatField("procstat", "cpu_usage"))
@ -282,8 +282,8 @@ func TestGather_PercentSecondPass(t *testing.T) {
createPIDFinder: pidFinder([]PID{pid}, nil), createPIDFinder: pidFinder([]PID{pid}, nil),
createProcess: NewProc, createProcess: NewProc,
} }
require.NoError(t, p.Gather(&acc)) require.NoError(t, acc.GatherError(p.Gather))
require.NoError(t, p.Gather(&acc)) require.NoError(t, acc.GatherError(p.Gather))
assert.True(t, acc.HasFloatField("procstat", "cpu_time_user")) assert.True(t, acc.HasFloatField("procstat", "cpu_time_user"))
assert.True(t, acc.HasFloatField("procstat", "cpu_usage")) assert.True(t, acc.HasFloatField("procstat", "cpu_usage"))

View File

@ -67,19 +67,17 @@ var ErrProtocolError = errors.New("prometheus protocol error")
func (p *Prometheus) Gather(acc telegraf.Accumulator) error { func (p *Prometheus) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup var wg sync.WaitGroup
var outerr error
for _, serv := range p.Urls { for _, serv := range p.Urls {
wg.Add(1) wg.Add(1)
go func(serv string) { go func(serv string) {
defer wg.Done() defer wg.Done()
outerr = p.gatherURL(serv, acc) acc.AddError(p.gatherURL(serv, acc))
}(serv) }(serv)
} }
wg.Wait() wg.Wait()
return outerr return nil
} }
var tr = &http.Transport{ var tr = &http.Transport{

View File

@ -41,7 +41,7 @@ func TestPrometheusGeneratesMetrics(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
err := p.Gather(&acc) err := acc.GatherError(p.Gather)
require.NoError(t, err) require.NoError(t, err)
assert.True(t, acc.HasFloatField("go_gc_duration_seconds", "count")) assert.True(t, acc.HasFloatField("go_gc_duration_seconds", "count"))

View File

@ -10,7 +10,6 @@ import (
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
) )
@ -136,7 +135,7 @@ type Node struct {
} }
// gatherFunc ... // gatherFunc ...
type gatherFunc func(r *RabbitMQ, acc telegraf.Accumulator, errChan chan error) type gatherFunc func(r *RabbitMQ, acc telegraf.Accumulator)
var gatherFunctions = []gatherFunc{gatherOverview, gatherNodes, gatherQueues} var gatherFunctions = []gatherFunc{gatherOverview, gatherNodes, gatherQueues}
@ -198,16 +197,15 @@ func (r *RabbitMQ) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(len(gatherFunctions)) wg.Add(len(gatherFunctions))
errChan := errchan.New(len(gatherFunctions))
for _, f := range gatherFunctions { for _, f := range gatherFunctions {
go func(gf gatherFunc) { go func(gf gatherFunc) {
defer wg.Done() defer wg.Done()
gf(r, acc, errChan.C) gf(r, acc)
}(f) }(f)
} }
wg.Wait() wg.Wait()
return errChan.Error() return nil
} }
func (r *RabbitMQ) requestJSON(u string, target interface{}) error { func (r *RabbitMQ) requestJSON(u string, target interface{}) error {
@ -245,17 +243,17 @@ func (r *RabbitMQ) requestJSON(u string, target interface{}) error {
return nil return nil
} }
func gatherOverview(r *RabbitMQ, acc telegraf.Accumulator, errChan chan error) { func gatherOverview(r *RabbitMQ, acc telegraf.Accumulator) {
overview := &OverviewResponse{} overview := &OverviewResponse{}
err := r.requestJSON("/api/overview", &overview) err := r.requestJSON("/api/overview", &overview)
if err != nil { if err != nil {
errChan <- err acc.AddError(err)
return return
} }
if overview.QueueTotals == nil || overview.ObjectTotals == nil || overview.MessageStats == nil { if overview.QueueTotals == nil || overview.ObjectTotals == nil || overview.MessageStats == nil {
errChan <- fmt.Errorf("Wrong answer from rabbitmq. Probably auth issue") acc.AddError(fmt.Errorf("Wrong answer from rabbitmq. Probably auth issue"))
return return
} }
@ -277,16 +275,14 @@ func gatherOverview(r *RabbitMQ, acc telegraf.Accumulator, errChan chan error) {
"messages_published": overview.MessageStats.Publish, "messages_published": overview.MessageStats.Publish,
} }
acc.AddFields("rabbitmq_overview", fields, tags) acc.AddFields("rabbitmq_overview", fields, tags)
errChan <- nil
} }
func gatherNodes(r *RabbitMQ, acc telegraf.Accumulator, errChan chan error) { func gatherNodes(r *RabbitMQ, acc telegraf.Accumulator) {
nodes := make([]Node, 0) nodes := make([]Node, 0)
// Gather information about nodes // Gather information about nodes
err := r.requestJSON("/api/nodes", &nodes) err := r.requestJSON("/api/nodes", &nodes)
if err != nil { if err != nil {
errChan <- err acc.AddError(err)
return return
} }
now := time.Now() now := time.Now()
@ -314,16 +310,14 @@ func gatherNodes(r *RabbitMQ, acc telegraf.Accumulator, errChan chan error) {
} }
acc.AddFields("rabbitmq_node", fields, tags, now) acc.AddFields("rabbitmq_node", fields, tags, now)
} }
errChan <- nil
} }
func gatherQueues(r *RabbitMQ, acc telegraf.Accumulator, errChan chan error) { func gatherQueues(r *RabbitMQ, acc telegraf.Accumulator) {
// Gather information about queues // Gather information about queues
queues := make([]Queue, 0) queues := make([]Queue, 0)
err := r.requestJSON("/api/queues", &queues) err := r.requestJSON("/api/queues", &queues)
if err != nil { if err != nil {
errChan <- err acc.AddError(err)
return return
} }
@ -371,8 +365,6 @@ func gatherQueues(r *RabbitMQ, acc telegraf.Accumulator, errChan chan error) {
tags, tags,
) )
} }
errChan <- nil
} }
func (r *RabbitMQ) shouldGatherNode(node Node) bool { func (r *RabbitMQ) shouldGatherNode(node Node) bool {

View File

@ -399,7 +399,7 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
err := r.Gather(&acc) err := acc.GatherError(r.Gather)
require.NoError(t, err) require.NoError(t, err)
intMetrics := []string{ intMetrics := []string{

View File

@ -35,24 +35,24 @@ func (r *Raindrops) Description() string {
func (r *Raindrops) Gather(acc telegraf.Accumulator) error { func (r *Raindrops) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup var wg sync.WaitGroup
var outerr error
for _, u := range r.Urls { for _, u := range r.Urls {
addr, err := url.Parse(u) addr, err := url.Parse(u)
if err != nil { if err != nil {
return fmt.Errorf("Unable to parse address '%s': %s", u, err) acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err))
continue
} }
wg.Add(1) wg.Add(1)
go func(addr *url.URL) { go func(addr *url.URL) {
defer wg.Done() defer wg.Done()
outerr = r.gatherUrl(addr, acc) acc.AddError(r.gatherUrl(addr, acc))
}(addr) }(addr)
} }
wg.Wait() wg.Wait()
return outerr return nil
} }
func (r *Raindrops) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { func (r *Raindrops) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {

View File

@ -68,7 +68,7 @@ func TestRaindropsGeneratesMetrics(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
err := n.Gather(&acc) err := acc.GatherError(n.Gather)
require.NoError(t, err) require.NoError(t, err)
fields := map[string]interface{}{ fields := map[string]interface{}{

View File

@ -12,7 +12,6 @@ import (
"time" "time"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
) )
@ -66,7 +65,6 @@ func (r *Redis) Gather(acc telegraf.Accumulator) error {
} }
var wg sync.WaitGroup var wg sync.WaitGroup
errChan := errchan.New(len(r.Servers))
for _, serv := range r.Servers { for _, serv := range r.Servers {
if !strings.HasPrefix(serv, "tcp://") && !strings.HasPrefix(serv, "unix://") { if !strings.HasPrefix(serv, "tcp://") && !strings.HasPrefix(serv, "unix://") {
serv = "tcp://" + serv serv = "tcp://" + serv
@ -74,7 +72,8 @@ func (r *Redis) Gather(acc telegraf.Accumulator) error {
u, err := url.Parse(serv) u, err := url.Parse(serv)
if err != nil { if err != nil {
return fmt.Errorf("Unable to parse to address '%s': %s", serv, err) acc.AddError(fmt.Errorf("Unable to parse to address '%s': %s", serv, err))
continue
} else if u.Scheme == "" { } else if u.Scheme == "" {
// fallback to simple string based address (i.e. "10.0.0.1:10000") // fallback to simple string based address (i.e. "10.0.0.1:10000")
u.Scheme = "tcp" u.Scheme = "tcp"
@ -91,12 +90,12 @@ func (r *Redis) Gather(acc telegraf.Accumulator) error {
wg.Add(1) wg.Add(1)
go func(serv string) { go func(serv string) {
defer wg.Done() defer wg.Done()
errChan.C <- r.gatherServer(u, acc) acc.AddError(r.gatherServer(u, acc))
}(serv) }(serv)
} }
wg.Wait() wg.Wait()
return errChan.Error() return nil
} }
func (r *Redis) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { func (r *Redis) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {

View File

@ -25,7 +25,7 @@ func TestRedisConnect(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
err := r.Gather(&acc) err := acc.GatherError(r.Gather)
require.NoError(t, err) require.NoError(t, err)
} }

View File

@ -44,12 +44,11 @@ func (r *RethinkDB) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup var wg sync.WaitGroup
var outerr error
for _, serv := range r.Servers { for _, serv := range r.Servers {
u, err := url.Parse(serv) u, err := url.Parse(serv)
if err != nil { if err != nil {
return fmt.Errorf("Unable to parse to address '%s': %s", serv, err) acc.AddError(fmt.Errorf("Unable to parse to address '%s': %s", serv, err))
continue
} else if u.Scheme == "" { } else if u.Scheme == "" {
// fallback to simple string based address (i.e. "10.0.0.1:10000") // fallback to simple string based address (i.e. "10.0.0.1:10000")
u.Host = serv u.Host = serv
@ -57,13 +56,13 @@ func (r *RethinkDB) Gather(acc telegraf.Accumulator) error {
wg.Add(1) wg.Add(1)
go func(serv string) { go func(serv string) {
defer wg.Done() defer wg.Done()
outerr = r.gatherServer(&Server{Url: u}, acc) acc.AddError(r.gatherServer(&Server{Url: u}, acc))
}(serv) }(serv)
} }
wg.Wait() wg.Wait()
return outerr return nil
} }
func (r *RethinkDB) gatherServer(server *Server, acc telegraf.Accumulator) error { func (r *RethinkDB) gatherServer(server *Server, acc telegraf.Accumulator) error {

View File

@ -104,9 +104,7 @@ func (r *Riak) Gather(acc telegraf.Accumulator) error {
// Range over all servers, gathering stats. Returns early in case of any error. // Range over all servers, gathering stats. Returns early in case of any error.
for _, s := range r.Servers { for _, s := range r.Servers {
if err := r.gatherServer(s, acc); err != nil { acc.AddError(r.gatherServer(s, acc))
return err
}
} }
return nil return nil

View File

@ -1,6 +1,7 @@
package snmp_legacy package snmp_legacy
import ( import (
"fmt"
"io/ioutil" "io/ioutil"
"log" "log"
"net" "net"
@ -394,16 +395,16 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
// only if len(s.OidInstanceMapping) == 0 // only if len(s.OidInstanceMapping) == 0
if len(host.OidInstanceMapping) >= 0 { if len(host.OidInstanceMapping) >= 0 {
if err := host.SNMPMap(acc, s.nameToOid, s.subTableMap); err != nil { if err := host.SNMPMap(acc, s.nameToOid, s.subTableMap); err != nil {
log.Printf("E! SNMP Mapping error for host '%s': %s", host.Address, err) acc.AddError(fmt.Errorf("E! SNMP Mapping error for host '%s': %s", host.Address, err))
continue continue
} }
} }
// Launch Get requests // Launch Get requests
if err := host.SNMPGet(acc, s.initNode); err != nil { if err := host.SNMPGet(acc, s.initNode); err != nil {
log.Printf("E! SNMP Error for host '%s': %s", host.Address, err) acc.AddError(fmt.Errorf("E! SNMP Error for host '%s': %s", host.Address, err))
} }
if err := host.SNMPBulk(acc, s.initNode); err != nil { if err := host.SNMPBulk(acc, s.initNode); err != nil {
log.Printf("E! SNMP Error for host '%s': %s", host.Address, err) acc.AddError(fmt.Errorf("E! SNMP Error for host '%s': %s", host.Address, err))
} }
} }
return nil return nil

View File

@ -79,20 +79,19 @@ func (s *SQLServer) Gather(acc telegraf.Accumulator) error {
} }
var wg sync.WaitGroup var wg sync.WaitGroup
var outerr error
for _, serv := range s.Servers { for _, serv := range s.Servers {
for _, query := range queries { for _, query := range queries {
wg.Add(1) wg.Add(1)
go func(serv string, query Query) { go func(serv string, query Query) {
defer wg.Done() defer wg.Done()
outerr = s.gatherServer(serv, query, acc) acc.AddError(s.gatherServer(serv, query, acc))
}(serv, query) }(serv, query)
} }
} }
wg.Wait() wg.Wait()
return outerr return nil
} }
func (s *SQLServer) gatherServer(server string, query Query, acc telegraf.Accumulator) error { func (s *SQLServer) gatherServer(server string, query Query, acc telegraf.Accumulator) error {

View File

@ -5,7 +5,6 @@ package sysstat
import ( import (
"bufio" "bufio"
"encoding/csv" "encoding/csv"
"errors"
"fmt" "fmt"
"io" "io"
"log" "log"
@ -149,34 +148,20 @@ func (s *Sysstat) Gather(acc telegraf.Accumulator) error {
return err return err
} }
var wg sync.WaitGroup var wg sync.WaitGroup
errorChannel := make(chan error, len(s.Options)*2)
for option := range s.Options { for option := range s.Options {
wg.Add(1) wg.Add(1)
go func(acc telegraf.Accumulator, option string) { go func(acc telegraf.Accumulator, option string) {
defer wg.Done() defer wg.Done()
if err := s.parse(acc, option, ts); err != nil { acc.AddError(s.parse(acc, option, ts))
errorChannel <- err
}
}(acc, option) }(acc, option)
} }
wg.Wait() wg.Wait()
close(errorChannel)
errorStrings := []string{}
for err := range errorChannel {
errorStrings = append(errorStrings, err.Error())
}
if _, err := os.Stat(s.tmpFile); err == nil { if _, err := os.Stat(s.tmpFile); err == nil {
if err := os.Remove(s.tmpFile); err != nil { acc.AddError(os.Remove(s.tmpFile))
errorStrings = append(errorStrings, err.Error())
}
} }
if len(errorStrings) == 0 {
return nil return nil
}
return errors.New(strings.Join(errorStrings, "\n"))
} }
// collect collects sysstat data with the collector utility sadc. // collect collects sysstat data with the collector utility sadc.

View File

@ -26,14 +26,14 @@ func TestInterval(t *testing.T) {
s.interval = 0 s.interval = 0
wantedInterval := 3 wantedInterval := 3
err := s.Gather(&acc) err := acc.GatherError(s.Gather)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
time.Sleep(time.Duration(wantedInterval) * time.Second) time.Sleep(time.Duration(wantedInterval) * time.Second)
err = s.Gather(&acc) err = acc.GatherError(s.Gather)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -37,7 +37,7 @@ func TestGather(t *testing.T) {
defer func() { execCommand = exec.Command }() defer func() { execCommand = exec.Command }()
var acc testutil.Accumulator var acc testutil.Accumulator
err := s.Gather(&acc) err := acc.GatherError(s.Gather)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -160,7 +160,7 @@ func TestGatherGrouped(t *testing.T) {
defer func() { execCommand = exec.Command }() defer func() { execCommand = exec.Command }()
var acc testutil.Accumulator var acc testutil.Accumulator
err := s.Gather(&acc) err := acc.GatherError(s.Gather)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -80,7 +80,6 @@ func (t *Tail) Start(acc telegraf.Accumulator) error {
} }
} }
var errS string
// Create a "tailer" for each file // Create a "tailer" for each file
for _, filepath := range t.Files { for _, filepath := range t.Files {
g, err := globpath.Compile(filepath) g, err := globpath.Compile(filepath)
@ -97,7 +96,7 @@ func (t *Tail) Start(acc telegraf.Accumulator) error {
Pipe: t.Pipe, Pipe: t.Pipe,
}) })
if err != nil { if err != nil {
errS += err.Error() + " " acc.AddError(err)
continue continue
} }
// create a goroutine for each "tailer" // create a goroutine for each "tailer"
@ -107,9 +106,6 @@ func (t *Tail) Start(acc telegraf.Accumulator) error {
} }
} }
if errS != "" {
return fmt.Errorf(errS)
}
return nil return nil
} }

View File

@ -30,7 +30,7 @@ func TestTailFromBeginning(t *testing.T) {
acc := testutil.Accumulator{} acc := testutil.Accumulator{}
require.NoError(t, tt.Start(&acc)) require.NoError(t, tt.Start(&acc))
require.NoError(t, tt.Gather(&acc)) require.NoError(t, acc.GatherError(tt.Gather))
acc.Wait(1) acc.Wait(1)
acc.AssertContainsTaggedFields(t, "cpu", acc.AssertContainsTaggedFields(t, "cpu",
@ -67,7 +67,7 @@ func TestTailFromEnd(t *testing.T) {
_, err = tmpfile.WriteString("cpu,othertag=foo usage_idle=100\n") _, err = tmpfile.WriteString("cpu,othertag=foo usage_idle=100\n")
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, tt.Gather(&acc)) require.NoError(t, acc.GatherError(tt.Gather))
acc.Wait(1) acc.Wait(1)
acc.AssertContainsTaggedFields(t, "cpu", acc.AssertContainsTaggedFields(t, "cpu",
@ -98,7 +98,7 @@ func TestTailBadLine(t *testing.T) {
_, err = tmpfile.WriteString("cpu mytag= foo usage_idle= 100\n") _, err = tmpfile.WriteString("cpu mytag= foo usage_idle= 100\n")
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, tt.Gather(&acc)) require.NoError(t, acc.GatherError(tt.Gather))
acc.WaitError(1) acc.WaitError(1)
assert.Contains(t, acc.Errors[0].Error(), "E! Malformed log line") assert.Contains(t, acc.Errors[0].Error(), "E! Malformed log line")

View File

@ -6,7 +6,6 @@ import (
"bufio" "bufio"
"bytes" "bytes"
"fmt" "fmt"
"os"
"os/exec" "os/exec"
"strconv" "strconv"
"strings" "strings"
@ -124,8 +123,8 @@ func (s *Varnish) Gather(acc telegraf.Accumulator) error {
sectionMap[section][field], err = strconv.ParseUint(value, 10, 64) sectionMap[section][field], err = strconv.ParseUint(value, 10, 64)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Expected a numeric value for %s = %v\n", acc.AddError(fmt.Errorf("Expected a numeric value for %s = %v\n",
stat, value) stat, value))
} }
} }

View File

@ -79,7 +79,7 @@ func (wb *Webhooks) Listen(acc telegraf.Accumulator) {
err := http.ListenAndServe(fmt.Sprintf("%s", wb.ServiceAddress), r) err := http.ListenAndServe(fmt.Sprintf("%s", wb.ServiceAddress), r)
if err != nil { if err != nil {
log.Printf("E! Error starting server: %v", err) acc.AddError(fmt.Errorf("E! Error starting server: %v", err))
} }
} }

View File

@ -47,9 +47,7 @@ func (z *Zookeeper) Gather(acc telegraf.Accumulator) error {
} }
for _, serverAddress := range z.Servers { for _, serverAddress := range z.Servers {
if err := z.gatherServer(serverAddress, acc); err != nil { acc.AddError(z.gatherServer(serverAddress, acc))
return err
}
} }
return nil return nil
} }

View File

@ -19,8 +19,7 @@ func TestZookeeperGeneratesMetrics(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
err := z.Gather(&acc) require.NoError(t, acc.GatherError(z.Gather))
require.NoError(t, err)
intMetrics := []string{ intMetrics := []string{
"avg_latency", "avg_latency",

View File

@ -187,6 +187,17 @@ func (a *Accumulator) TagValue(measurement string, key string) string {
return "" return ""
} }
// Calls the given Gather function and returns the first error found.
func (a *Accumulator) GatherError(gf func(telegraf.Accumulator) error) error {
if err := gf(a); err != nil {
return err
}
if len(a.Errors) > 0 {
return a.Errors[0]
}
return nil
}
// NFields returns the total number of fields in the accumulator, across all // NFields returns the total number of fields in the accumulator, across all
// measurements // measurements
func (a *Accumulator) NFields() int { func (a *Accumulator) NFields() int {