Add ability to label inputs for logging (#6207)
This commit is contained in:
@@ -1,7 +1,6 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -21,6 +20,7 @@ const (
|
||||
// OutputConfig containing name and filter
|
||||
type OutputConfig struct {
|
||||
Name string
|
||||
Alias string
|
||||
Filter Filter
|
||||
|
||||
FlushInterval time.Duration
|
||||
@@ -34,7 +34,6 @@ type RunningOutput struct {
|
||||
newMetricsCount int64
|
||||
droppedMetrics int64
|
||||
|
||||
Name string
|
||||
Output telegraf.Output
|
||||
Config *OutputConfig
|
||||
MetricBufferLimit int
|
||||
@@ -46,6 +45,7 @@ type RunningOutput struct {
|
||||
BatchReady chan time.Time
|
||||
|
||||
buffer *Buffer
|
||||
log telegraf.Logger
|
||||
|
||||
aggMutex sync.Mutex
|
||||
}
|
||||
@@ -53,56 +53,77 @@ type RunningOutput struct {
|
||||
func NewRunningOutput(
|
||||
name string,
|
||||
output telegraf.Output,
|
||||
conf *OutputConfig,
|
||||
config *OutputConfig,
|
||||
batchSize int,
|
||||
bufferLimit int,
|
||||
) *RunningOutput {
|
||||
if conf.MetricBufferLimit > 0 {
|
||||
bufferLimit = conf.MetricBufferLimit
|
||||
logger := &Logger{
|
||||
Name: logName("outputs", config.Name, config.Alias),
|
||||
Errs: selfstat.Register("gather", "errors",
|
||||
map[string]string{"output": config.Name, "alias": config.Alias}),
|
||||
}
|
||||
|
||||
setLogIfExist(output, logger)
|
||||
|
||||
if config.MetricBufferLimit > 0 {
|
||||
bufferLimit = config.MetricBufferLimit
|
||||
}
|
||||
if bufferLimit == 0 {
|
||||
bufferLimit = DEFAULT_METRIC_BUFFER_LIMIT
|
||||
}
|
||||
if conf.MetricBatchSize > 0 {
|
||||
batchSize = conf.MetricBatchSize
|
||||
if config.MetricBatchSize > 0 {
|
||||
batchSize = config.MetricBatchSize
|
||||
}
|
||||
if batchSize == 0 {
|
||||
batchSize = DEFAULT_METRIC_BATCH_SIZE
|
||||
}
|
||||
|
||||
ro := &RunningOutput{
|
||||
Name: name,
|
||||
buffer: NewBuffer(name, bufferLimit),
|
||||
buffer: NewBuffer(config.LogName(), bufferLimit),
|
||||
BatchReady: make(chan time.Time, 1),
|
||||
Output: output,
|
||||
Config: conf,
|
||||
Config: config,
|
||||
MetricBufferLimit: bufferLimit,
|
||||
MetricBatchSize: batchSize,
|
||||
MetricsFiltered: selfstat.Register(
|
||||
"write",
|
||||
"metrics_filtered",
|
||||
map[string]string{"output": name},
|
||||
map[string]string{"output": config.Name, "alias": config.Alias},
|
||||
),
|
||||
WriteTime: selfstat.RegisterTiming(
|
||||
"write",
|
||||
"write_time_ns",
|
||||
map[string]string{"output": name},
|
||||
map[string]string{"output": config.Name, "alias": config.Alias},
|
||||
),
|
||||
log: logger,
|
||||
}
|
||||
|
||||
return ro
|
||||
}
|
||||
|
||||
func (c *OutputConfig) LogName() string {
|
||||
if c.Alias == "" {
|
||||
return c.Name
|
||||
}
|
||||
return c.Name + "::" + c.Alias
|
||||
}
|
||||
|
||||
func (r *RunningOutput) LogName() string {
|
||||
return logName("outputs", r.Config.Name, r.Config.Alias)
|
||||
}
|
||||
|
||||
func (ro *RunningOutput) metricFiltered(metric telegraf.Metric) {
|
||||
ro.MetricsFiltered.Incr(1)
|
||||
metric.Drop()
|
||||
}
|
||||
|
||||
func (ro *RunningOutput) Init() error {
|
||||
if p, ok := ro.Output.(telegraf.Initializer); ok {
|
||||
func (r *RunningOutput) Init() error {
|
||||
if p, ok := r.Output.(telegraf.Initializer); ok {
|
||||
err := p.Init()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -192,35 +213,32 @@ func (ro *RunningOutput) WriteBatch() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ro *RunningOutput) Close() {
|
||||
err := ro.Output.Close()
|
||||
func (r *RunningOutput) Close() {
|
||||
err := r.Output.Close()
|
||||
if err != nil {
|
||||
log.Printf("E! [outputs.%s] Error closing output: %v", ro.Name, err)
|
||||
r.log.Errorf("Error closing output: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
|
||||
dropped := atomic.LoadInt64(&ro.droppedMetrics)
|
||||
func (r *RunningOutput) write(metrics []telegraf.Metric) error {
|
||||
dropped := atomic.LoadInt64(&r.droppedMetrics)
|
||||
if dropped > 0 {
|
||||
log.Printf("W! [outputs.%s] Metric buffer overflow; %d metrics have been dropped",
|
||||
ro.Name, dropped)
|
||||
atomic.StoreInt64(&ro.droppedMetrics, 0)
|
||||
r.log.Warnf("Metric buffer overflow; %d metrics have been dropped", dropped)
|
||||
atomic.StoreInt64(&r.droppedMetrics, 0)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
err := ro.Output.Write(metrics)
|
||||
err := r.Output.Write(metrics)
|
||||
elapsed := time.Since(start)
|
||||
ro.WriteTime.Incr(elapsed.Nanoseconds())
|
||||
r.WriteTime.Incr(elapsed.Nanoseconds())
|
||||
|
||||
if err == nil {
|
||||
log.Printf("D! [outputs.%s] wrote batch of %d metrics in %s\n",
|
||||
ro.Name, len(metrics), elapsed)
|
||||
r.log.Debugf("Wrote batch of %d metrics in %s", len(metrics), elapsed)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ro *RunningOutput) LogBufferStatus() {
|
||||
nBuffer := ro.buffer.Len()
|
||||
log.Printf("D! [outputs.%s] buffer fullness: %d / %d metrics. ",
|
||||
ro.Name, nBuffer, ro.MetricBufferLimit)
|
||||
func (r *RunningOutput) LogBufferStatus() {
|
||||
nBuffer := r.buffer.Len()
|
||||
r.log.Debugf("Buffer fullness: %d / %d metrics", nBuffer, r.MetricBufferLimit)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user