2016-01-27 21:21:36 +00:00
|
|
|
package agent
|
2015-04-01 16:34:32 +00:00
|
|
|
|
|
|
|
import (
|
2018-11-05 21:34:28 +00:00
|
|
|
"context"
|
2015-05-18 21:10:12 +00:00
|
|
|
"fmt"
|
2015-04-01 16:34:32 +00:00
|
|
|
"log"
|
2016-01-26 08:19:34 +00:00
|
|
|
"runtime"
|
2015-05-20 05:19:32 +00:00
|
|
|
"sync"
|
2015-04-07 16:23:35 +00:00
|
|
|
"time"
|
2015-04-01 16:34:32 +00:00
|
|
|
|
2016-01-27 21:21:36 +00:00
|
|
|
"github.com/influxdata/telegraf"
|
2016-05-30 22:24:42 +00:00
|
|
|
"github.com/influxdata/telegraf/internal"
|
2016-01-20 18:57:35 +00:00
|
|
|
"github.com/influxdata/telegraf/internal/config"
|
2016-01-22 18:54:12 +00:00
|
|
|
"github.com/influxdata/telegraf/internal/models"
|
2018-11-05 21:34:28 +00:00
|
|
|
"github.com/influxdata/telegraf/plugins/serializers/influx"
|
2015-04-01 16:34:32 +00:00
|
|
|
)
|
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
// Agent runs a set of plugins.
|
2015-04-01 16:34:32 +00:00
|
|
|
type Agent struct {
|
2015-11-24 21:22:11 +00:00
|
|
|
Config *config.Config
|
2015-04-01 16:34:32 +00:00
|
|
|
}
|
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
// NewAgent returns an Agent for the given Config.
|
2015-11-24 21:22:11 +00:00
|
|
|
func NewAgent(config *config.Config) (*Agent, error) {
|
2015-11-26 01:42:07 +00:00
|
|
|
a := &Agent{
|
|
|
|
Config: config,
|
2015-09-02 16:30:44 +00:00
|
|
|
}
|
2018-11-05 21:34:28 +00:00
|
|
|
return a, nil
|
|
|
|
}
|
2015-04-01 16:34:32 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
// Run starts and runs the Agent until the context is done.
|
|
|
|
func (a *Agent) Run(ctx context.Context) error {
|
|
|
|
log.Printf("I! [agent] Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+
|
|
|
|
"Flush Interval:%s",
|
|
|
|
a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet,
|
|
|
|
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
|
2016-03-21 21:33:19 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
2015-04-07 16:56:40 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
log.Printf("D! [agent] Connecting outputs")
|
|
|
|
err := a.connectOutputs(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2015-11-24 01:00:54 +00:00
|
|
|
}
|
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
inputC := make(chan telegraf.Metric, 100)
|
|
|
|
procC := make(chan telegraf.Metric, 100)
|
|
|
|
outputC := make(chan telegraf.Metric, 100)
|
2015-04-07 00:24:24 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
startTime := time.Now()
|
2015-10-22 16:17:57 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
log.Printf("D! [agent] Starting service inputs")
|
|
|
|
err = a.startServiceInputs(ctx, inputC)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2015-04-01 16:34:32 +00:00
|
|
|
}
|
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
var wg sync.WaitGroup
|
2015-08-12 17:04:25 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
src := inputC
|
|
|
|
dst := inputC
|
2016-01-26 08:19:34 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
wg.Add(1)
|
|
|
|
go func(dst chan telegraf.Metric) {
|
|
|
|
defer wg.Done()
|
2016-01-26 08:19:34 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
err := a.runInputs(ctx, startTime, dst)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("E! [agent] Error running inputs: %v", err)
|
|
|
|
}
|
2016-11-07 08:34:46 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
log.Printf("D! [agent] Stopping service inputs")
|
|
|
|
a.stopServiceInputs()
|
2016-11-07 08:34:46 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
close(dst)
|
|
|
|
log.Printf("D! [agent] Input channel closed")
|
|
|
|
}(dst)
|
2016-05-19 15:36:58 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
src = dst
|
2015-05-20 05:19:32 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
if len(a.Config.Processors) > 0 {
|
|
|
|
dst = procC
|
2015-08-26 23:43:09 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
wg.Add(1)
|
|
|
|
go func(src, dst chan telegraf.Metric) {
|
|
|
|
defer wg.Done()
|
2015-05-20 05:19:32 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
err := a.runProcessors(src, dst)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("E! [agent] Error running processors: %v", err)
|
|
|
|
}
|
|
|
|
close(dst)
|
|
|
|
log.Printf("D! [agent] Processor channel closed")
|
|
|
|
}(src, dst)
|
|
|
|
|
|
|
|
src = dst
|
2015-05-20 05:19:32 +00:00
|
|
|
}
|
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
if len(a.Config.Aggregators) > 0 {
|
|
|
|
dst = outputC
|
2016-05-19 15:36:58 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
wg.Add(1)
|
|
|
|
go func(src, dst chan telegraf.Metric) {
|
|
|
|
defer wg.Done()
|
|
|
|
|
|
|
|
err := a.runAggregators(startTime, src, dst)
|
2016-05-19 15:36:58 +00:00
|
|
|
if err != nil {
|
2018-11-05 21:34:28 +00:00
|
|
|
log.Printf("E! [agent] Error running aggregators: %v", err)
|
2016-05-19 15:36:58 +00:00
|
|
|
}
|
2018-11-05 21:34:28 +00:00
|
|
|
close(dst)
|
|
|
|
log.Printf("D! [agent] Output channel closed")
|
|
|
|
}(src, dst)
|
|
|
|
|
|
|
|
src = dst
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
go func(src chan telegraf.Metric) {
|
|
|
|
defer wg.Done()
|
|
|
|
|
|
|
|
err := a.runOutputs(startTime, src)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("E! [agent] Error running outputs: %v", err)
|
2016-05-19 15:36:58 +00:00
|
|
|
}
|
2018-11-05 21:34:28 +00:00
|
|
|
}(src)
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
log.Printf("D! [agent] Closing outputs")
|
2019-03-22 20:59:30 +00:00
|
|
|
a.closeOutputs()
|
2018-11-05 21:34:28 +00:00
|
|
|
|
2018-12-27 02:54:50 +00:00
|
|
|
log.Printf("D! [agent] Stopped Successfully")
|
2018-11-05 21:34:28 +00:00
|
|
|
return nil
|
2016-05-19 15:36:58 +00:00
|
|
|
}
|
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
// Test runs the inputs once and prints the output to stdout in line protocol.
|
2018-11-15 23:44:36 +00:00
|
|
|
func (a *Agent) Test(ctx context.Context) error {
|
2018-11-05 21:34:28 +00:00
|
|
|
var wg sync.WaitGroup
|
2016-01-27 23:15:14 +00:00
|
|
|
metricC := make(chan telegraf.Metric)
|
2018-11-15 23:44:36 +00:00
|
|
|
nulC := make(chan telegraf.Metric)
|
2018-11-05 21:34:28 +00:00
|
|
|
defer func() {
|
|
|
|
close(metricC)
|
2018-11-15 23:44:36 +00:00
|
|
|
close(nulC)
|
2018-11-05 21:34:28 +00:00
|
|
|
wg.Wait()
|
|
|
|
}()
|
2015-04-07 00:24:24 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
wg.Add(1)
|
2015-10-22 00:32:43 +00:00
|
|
|
go func() {
|
2018-11-05 21:34:28 +00:00
|
|
|
defer wg.Done()
|
|
|
|
|
|
|
|
s := influx.NewSerializer()
|
|
|
|
s.SetFieldSortOrder(influx.SortFields)
|
|
|
|
for metric := range metricC {
|
|
|
|
octets, err := s.Serialize(metric)
|
|
|
|
if err == nil {
|
|
|
|
fmt.Print("> ", string(octets))
|
2018-11-15 23:44:36 +00:00
|
|
|
|
2015-10-22 00:32:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2015-04-07 00:24:24 +00:00
|
|
|
|
2018-11-15 23:44:36 +00:00
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
for range nulC {
|
2017-02-13 10:40:38 +00:00
|
|
|
}
|
2018-11-15 23:44:36 +00:00
|
|
|
}()
|
2017-02-13 10:40:38 +00:00
|
|
|
|
2018-11-15 23:44:36 +00:00
|
|
|
for _, input := range a.Config.Inputs {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
if _, ok := input.Input.(telegraf.ServiceInput); ok {
|
|
|
|
log.Printf("W!: [agent] skipping plugin [[%s]]: service inputs not supported in --test mode",
|
|
|
|
input.Name())
|
|
|
|
continue
|
|
|
|
}
|
2015-09-21 17:05:58 +00:00
|
|
|
|
2018-11-15 23:44:36 +00:00
|
|
|
acc := NewAccumulator(input, metricC)
|
|
|
|
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
|
|
|
a.Config.Agent.Interval.Duration)
|
|
|
|
input.SetDefaultTags(a.Config.Tags)
|
|
|
|
|
|
|
|
// Special instructions for some inputs. cpu, for example, needs to be
|
|
|
|
// run twice in order to return cpu usage percentages.
|
|
|
|
switch input.Name() {
|
|
|
|
case "inputs.cpu", "inputs.mongodb", "inputs.procstat":
|
|
|
|
nulAcc := NewAccumulator(input, nulC)
|
|
|
|
nulAcc.SetPrecision(a.Config.Agent.Precision.Duration,
|
|
|
|
a.Config.Agent.Interval.Duration)
|
|
|
|
if err := input.Input.Gather(nulAcc); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(500 * time.Millisecond)
|
|
|
|
if err := input.Input.Gather(acc); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
if err := input.Input.Gather(acc); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-09-23 20:54:22 +00:00
|
|
|
}
|
2015-09-21 17:05:58 +00:00
|
|
|
}
|
2015-09-23 20:54:22 +00:00
|
|
|
}
|
2018-11-05 21:34:28 +00:00
|
|
|
|
2015-04-07 00:24:24 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
// runInputs starts and triggers the periodic gather for Inputs.
|
|
|
|
//
|
|
|
|
// When the context is done the timers are stopped and this function returns
|
|
|
|
// after all ongoing Gather calls complete.
|
|
|
|
func (a *Agent) runInputs(
|
|
|
|
ctx context.Context,
|
|
|
|
startTime time.Time,
|
|
|
|
dst chan<- telegraf.Metric,
|
|
|
|
) error {
|
2015-10-21 20:05:27 +00:00
|
|
|
var wg sync.WaitGroup
|
2018-11-05 21:34:28 +00:00
|
|
|
for _, input := range a.Config.Inputs {
|
|
|
|
interval := a.Config.Agent.Interval.Duration
|
|
|
|
precision := a.Config.Agent.Precision.Duration
|
|
|
|
jitter := a.Config.Agent.CollectionJitter.Duration
|
2016-01-22 18:54:12 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
// Overwrite agent interval if this plugin has its own.
|
|
|
|
if input.Config.Interval != 0 {
|
|
|
|
interval = input.Config.Interval
|
|
|
|
}
|
|
|
|
|
|
|
|
acc := NewAccumulator(input, dst)
|
|
|
|
acc.SetPrecision(precision, interval)
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
go func(input *models.RunningInput) {
|
2016-01-22 18:54:12 +00:00
|
|
|
defer wg.Done()
|
2018-11-05 21:34:28 +00:00
|
|
|
|
|
|
|
if a.Config.Agent.RoundInterval {
|
|
|
|
err := internal.SleepContext(
|
|
|
|
ctx, internal.AlignDuration(startTime, interval))
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2016-01-22 18:54:12 +00:00
|
|
|
}
|
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
a.gatherOnInterval(ctx, acc, input, interval, jitter)
|
|
|
|
}(input)
|
|
|
|
}
|
2016-01-22 18:54:12 +00:00
|
|
|
wg.Wait()
|
2018-11-05 21:34:28 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// gather runs an input's gather function periodically until the context is
|
|
|
|
// done.
|
|
|
|
func (a *Agent) gatherOnInterval(
|
|
|
|
ctx context.Context,
|
|
|
|
acc telegraf.Accumulator,
|
|
|
|
input *models.RunningInput,
|
|
|
|
interval time.Duration,
|
|
|
|
jitter time.Duration,
|
|
|
|
) {
|
|
|
|
defer panicRecover(input)
|
|
|
|
|
|
|
|
ticker := time.NewTicker(interval)
|
|
|
|
defer ticker.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
err := internal.SleepContext(ctx, internal.RandomDuration(jitter))
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
err = a.gatherOnce(acc, input, interval)
|
|
|
|
if err != nil {
|
|
|
|
acc.AddError(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
continue
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// gatherOnce runs the input's Gather function once, logging a warning each
|
|
|
|
// interval it fails to complete before.
|
|
|
|
func (a *Agent) gatherOnce(
|
|
|
|
acc telegraf.Accumulator,
|
|
|
|
input *models.RunningInput,
|
|
|
|
timeout time.Duration,
|
|
|
|
) error {
|
|
|
|
ticker := time.NewTicker(timeout)
|
|
|
|
defer ticker.Stop()
|
|
|
|
|
|
|
|
done := make(chan error)
|
|
|
|
go func() {
|
|
|
|
done <- input.Gather(acc)
|
|
|
|
}()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case err := <-done:
|
|
|
|
return err
|
|
|
|
case <-ticker.C:
|
|
|
|
log.Printf("W! [agent] input %q did not complete within its interval",
|
|
|
|
input.Name())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// runProcessors applies processors to metrics.
|
|
|
|
func (a *Agent) runProcessors(
|
|
|
|
src <-chan telegraf.Metric,
|
|
|
|
agg chan<- telegraf.Metric,
|
|
|
|
) error {
|
|
|
|
for metric := range src {
|
|
|
|
metrics := a.applyProcessors(metric)
|
|
|
|
|
|
|
|
for _, metric := range metrics {
|
|
|
|
agg <- metric
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// applyProcessors applies all processors to a metric.
|
|
|
|
func (a *Agent) applyProcessors(m telegraf.Metric) []telegraf.Metric {
|
|
|
|
metrics := []telegraf.Metric{m}
|
|
|
|
for _, processor := range a.Config.Processors {
|
|
|
|
metrics = processor.Apply(metrics...)
|
|
|
|
}
|
|
|
|
|
|
|
|
return metrics
|
2015-10-16 22:13:32 +00:00
|
|
|
}
|
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
// runAggregators triggers the periodic push for Aggregators.
|
|
|
|
//
|
|
|
|
// When the context is done a final push will occur and then this function
|
|
|
|
// will return.
|
|
|
|
func (a *Agent) runAggregators(
|
|
|
|
startTime time.Time,
|
|
|
|
src <-chan telegraf.Metric,
|
|
|
|
dst chan<- telegraf.Metric,
|
2018-07-12 00:33:27 +00:00
|
|
|
) error {
|
2018-11-05 21:34:28 +00:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
|
2016-09-08 14:22:10 +00:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
2018-11-05 21:34:28 +00:00
|
|
|
for metric := range src {
|
|
|
|
var dropOriginal bool
|
|
|
|
for _, agg := range a.Config.Aggregators {
|
|
|
|
if ok := agg.Add(metric); ok {
|
|
|
|
dropOriginal = true
|
2016-09-08 14:22:10 +00:00
|
|
|
}
|
|
|
|
}
|
2015-10-23 17:23:08 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
if !dropOriginal {
|
|
|
|
dst <- metric
|
2019-03-27 01:01:50 +00:00
|
|
|
} else {
|
|
|
|
metric.Drop()
|
2018-07-12 00:33:27 +00:00
|
|
|
}
|
|
|
|
}
|
2018-11-05 21:34:28 +00:00
|
|
|
cancel()
|
2018-07-12 00:33:27 +00:00
|
|
|
}()
|
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
precision := a.Config.Agent.Precision.Duration
|
|
|
|
interval := a.Config.Agent.Interval.Duration
|
|
|
|
aggregations := make(chan telegraf.Metric, 100)
|
|
|
|
for _, agg := range a.Config.Aggregators {
|
|
|
|
wg.Add(1)
|
|
|
|
go func(agg *models.RunningAggregator) {
|
2019-03-20 17:36:51 +00:00
|
|
|
defer func() {
|
|
|
|
wg.Done()
|
|
|
|
close(aggregations)
|
|
|
|
}()
|
2018-07-12 00:33:27 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
if a.Config.Agent.RoundInterval {
|
|
|
|
// Aggregators are aligned to the agent interval regardless of
|
|
|
|
// their period.
|
|
|
|
err := internal.SleepContext(ctx, internal.AlignDuration(startTime, interval))
|
|
|
|
if err != nil {
|
|
|
|
return
|
2017-07-13 22:34:21 +00:00
|
|
|
}
|
|
|
|
}
|
2018-11-05 21:34:28 +00:00
|
|
|
|
|
|
|
agg.SetPeriodStart(startTime)
|
|
|
|
|
|
|
|
acc := NewAccumulator(agg, aggregations)
|
|
|
|
acc.SetPrecision(precision, interval)
|
|
|
|
a.push(ctx, agg, acc)
|
|
|
|
}(agg)
|
|
|
|
}
|
|
|
|
|
|
|
|
for metric := range aggregations {
|
|
|
|
metrics := a.applyProcessors(metric)
|
|
|
|
for _, metric := range metrics {
|
|
|
|
dst <- metric
|
2017-07-13 22:34:21 +00:00
|
|
|
}
|
2018-11-05 21:34:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// push runs the push for a single aggregator every period. More simple than
|
|
|
|
// the output/input version as timeout should be less likely.... not really
|
|
|
|
// because the output channel can block for now.
|
|
|
|
func (a *Agent) push(
|
|
|
|
ctx context.Context,
|
|
|
|
aggregator *models.RunningAggregator,
|
|
|
|
acc telegraf.Accumulator,
|
|
|
|
) {
|
|
|
|
ticker := time.NewTicker(aggregator.Period())
|
|
|
|
defer ticker.Stop()
|
2017-07-13 22:34:21 +00:00
|
|
|
|
2015-10-16 22:13:32 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
2018-11-05 21:34:28 +00:00
|
|
|
break
|
|
|
|
case <-ctx.Done():
|
|
|
|
aggregator.Push(acc)
|
|
|
|
return
|
2015-10-16 22:13:32 +00:00
|
|
|
}
|
2018-11-05 21:34:28 +00:00
|
|
|
|
|
|
|
aggregator.Push(acc)
|
2015-10-16 22:13:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
// runOutputs triggers the periodic write for Outputs.
|
|
|
|
//
|
|
|
|
// When the context is done, outputs continue to run until their buffer is
|
|
|
|
// closed, afterwich they run flush once more.
|
|
|
|
func (a *Agent) runOutputs(
|
|
|
|
startTime time.Time,
|
|
|
|
src <-chan telegraf.Metric,
|
|
|
|
) error {
|
|
|
|
interval := a.Config.Agent.FlushInterval.Duration
|
|
|
|
jitter := a.Config.Agent.FlushJitter.Duration
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
|
2015-05-20 05:19:32 +00:00
|
|
|
var wg sync.WaitGroup
|
2018-11-05 21:34:28 +00:00
|
|
|
for _, output := range a.Config.Outputs {
|
|
|
|
interval := interval
|
|
|
|
// Overwrite agent flush_interval if this plugin has its own.
|
|
|
|
if output.Config.FlushInterval != 0 {
|
|
|
|
interval = output.Config.FlushInterval
|
|
|
|
}
|
2015-05-20 05:19:32 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
wg.Add(1)
|
|
|
|
go func(output *models.RunningOutput) {
|
|
|
|
defer wg.Done()
|
|
|
|
|
|
|
|
if a.Config.Agent.RoundInterval {
|
|
|
|
err := internal.SleepContext(
|
|
|
|
ctx, internal.AlignDuration(startTime, interval))
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2015-10-23 17:23:08 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
a.flush(ctx, output, interval, jitter)
|
|
|
|
}(output)
|
|
|
|
}
|
|
|
|
|
|
|
|
for metric := range src {
|
|
|
|
for i, output := range a.Config.Outputs {
|
|
|
|
if i == len(a.Config.Outputs)-1 {
|
|
|
|
output.AddMetric(metric)
|
|
|
|
} else {
|
|
|
|
output.AddMetric(metric.Copy())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-07-12 00:33:27 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
log.Println("I! [agent] Hang on, flushing any cached metrics before shutdown")
|
|
|
|
cancel()
|
|
|
|
wg.Wait()
|
2018-07-12 00:33:27 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
return nil
|
|
|
|
}
|
2015-10-16 22:13:32 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
// flush runs an output's flush function periodically until the context is
|
|
|
|
// done.
|
|
|
|
func (a *Agent) flush(
|
|
|
|
ctx context.Context,
|
|
|
|
output *models.RunningOutput,
|
|
|
|
interval time.Duration,
|
|
|
|
jitter time.Duration,
|
|
|
|
) {
|
|
|
|
// since we are watching two channels we need a ticker with the jitter
|
|
|
|
// integrated.
|
|
|
|
ticker := NewTicker(interval, jitter)
|
|
|
|
defer ticker.Stop()
|
|
|
|
|
|
|
|
logError := func(err error) {
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("E! [agent] Error writing to output [%s]: %v", output.Name, err)
|
|
|
|
}
|
2015-10-21 20:05:27 +00:00
|
|
|
}
|
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
for {
|
|
|
|
// Favor shutdown over other methods.
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
logError(a.flushOnce(output, interval, output.Write))
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
logError(a.flushOnce(output, interval, output.Write))
|
|
|
|
case <-output.BatchReady:
|
|
|
|
// Favor the ticker over batch ready
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
logError(a.flushOnce(output, interval, output.Write))
|
|
|
|
default:
|
|
|
|
logError(a.flushOnce(output, interval, output.WriteBatch))
|
|
|
|
}
|
|
|
|
case <-ctx.Done():
|
|
|
|
logError(a.flushOnce(output, interval, output.Write))
|
|
|
|
return
|
2015-10-16 22:13:32 +00:00
|
|
|
}
|
2018-11-05 21:34:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// flushOnce runs the output's Write function once, logging a warning each
|
|
|
|
// interval it fails to complete before.
|
|
|
|
func (a *Agent) flushOnce(
|
|
|
|
output *models.RunningOutput,
|
|
|
|
timeout time.Duration,
|
|
|
|
writeFunc func() error,
|
|
|
|
) error {
|
|
|
|
ticker := time.NewTicker(timeout)
|
|
|
|
defer ticker.Stop()
|
|
|
|
|
|
|
|
done := make(chan error)
|
|
|
|
go func() {
|
|
|
|
done <- writeFunc()
|
2015-10-16 22:13:32 +00:00
|
|
|
}()
|
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case err := <-done:
|
|
|
|
output.LogBufferStatus()
|
|
|
|
return err
|
|
|
|
case <-ticker.C:
|
|
|
|
log.Printf("W! [agent] output %q did not complete within its flush interval",
|
|
|
|
output.Name)
|
|
|
|
output.LogBufferStatus()
|
|
|
|
}
|
2016-10-10 12:43:47 +00:00
|
|
|
}
|
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// connectOutputs connects to all outputs.
|
|
|
|
func (a *Agent) connectOutputs(ctx context.Context) error {
|
|
|
|
for _, output := range a.Config.Outputs {
|
|
|
|
log.Printf("D! [agent] Attempting connection to output: %s\n", output.Name)
|
|
|
|
err := output.Output.Connect()
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("E! [agent] Failed to connect to output %s, retrying in 15s, "+
|
|
|
|
"error was '%s' \n", output.Name, err)
|
|
|
|
|
|
|
|
err := internal.SleepContext(ctx, 15*time.Second)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = output.Output.Connect()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
log.Printf("D! [agent] Successfully connected to output: %s\n", output.Name)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// closeOutputs closes all outputs.
|
2019-03-22 20:59:30 +00:00
|
|
|
func (a *Agent) closeOutputs() {
|
2018-11-05 21:34:28 +00:00
|
|
|
for _, output := range a.Config.Outputs {
|
2019-03-22 20:59:30 +00:00
|
|
|
output.Close()
|
2018-11-05 21:34:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// startServiceInputs starts all service inputs.
|
|
|
|
func (a *Agent) startServiceInputs(
|
|
|
|
ctx context.Context,
|
|
|
|
dst chan<- telegraf.Metric,
|
|
|
|
) error {
|
|
|
|
started := []telegraf.ServiceInput{}
|
|
|
|
|
2018-06-05 23:30:53 +00:00
|
|
|
for _, input := range a.Config.Inputs {
|
2018-11-05 21:34:28 +00:00
|
|
|
if si, ok := input.Input.(telegraf.ServiceInput); ok {
|
|
|
|
// Service input plugins are not subject to timestamp rounding.
|
|
|
|
// This only applies to the accumulator passed to Start(), the
|
|
|
|
// Gather() accumulator does apply rounding according to the
|
|
|
|
// precision agent setting.
|
|
|
|
acc := NewAccumulator(input, dst)
|
2018-06-05 23:30:53 +00:00
|
|
|
acc.SetPrecision(time.Nanosecond, 0)
|
2018-11-05 21:34:28 +00:00
|
|
|
|
|
|
|
err := si.Start(acc)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("E! [agent] Service for input %s failed to start: %v",
|
|
|
|
input.Name(), err)
|
|
|
|
|
|
|
|
for _, si := range started {
|
|
|
|
si.Stop()
|
|
|
|
}
|
|
|
|
|
2018-06-05 23:30:53 +00:00
|
|
|
return err
|
|
|
|
}
|
2018-11-05 21:34:28 +00:00
|
|
|
|
|
|
|
started = append(started, si)
|
2018-06-05 23:30:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// stopServiceInputs stops all service inputs.
|
|
|
|
func (a *Agent) stopServiceInputs() {
|
2018-07-07 07:54:21 +00:00
|
|
|
for _, input := range a.Config.Inputs {
|
2018-11-05 21:34:28 +00:00
|
|
|
if si, ok := input.Input.(telegraf.ServiceInput); ok {
|
|
|
|
si.Stop()
|
2018-07-07 07:54:21 +00:00
|
|
|
}
|
|
|
|
}
|
2018-11-05 21:34:28 +00:00
|
|
|
}
|
2018-07-07 07:54:21 +00:00
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
// panicRecover displays an error if an input panics.
|
|
|
|
func panicRecover(input *models.RunningInput) {
|
|
|
|
if err := recover(); err != nil {
|
|
|
|
trace := make([]byte, 2048)
|
|
|
|
runtime.Stack(trace, true)
|
|
|
|
log.Printf("E! FATAL: Input [%s] panicked: %s, Stack:\n%s\n",
|
|
|
|
input.Name(), err, trace)
|
|
|
|
log.Println("E! PLEASE REPORT THIS PANIC ON GITHUB with " +
|
|
|
|
"stack trace, configuration, and OS information: " +
|
|
|
|
"https://github.com/influxdata/telegraf/issues/new/choose")
|
|
|
|
}
|
2015-04-01 16:34:32 +00:00
|
|
|
}
|