Document and add support to input plugins for logging alias (#6357)
This commit is contained in:
parent
e42d2e39c6
commit
817c9a69a9
|
@ -188,6 +188,7 @@ driven operation.
|
|||
|
||||
Parameters that can be used with any input plugin:
|
||||
|
||||
- **alias**: Name an instance of a plugin.
|
||||
- **interval**: How often to gather this metric. Normal plugins use a single
|
||||
global interval, but if one particular input should be run less or more
|
||||
often, you can configure that here.
|
||||
|
|
|
@ -144,7 +144,7 @@ func (r *RunningAggregator) Add(m telegraf.Metric) bool {
|
|||
defer r.Unlock()
|
||||
|
||||
if m.Time().Before(r.periodStart.Add(-r.Config.Grace)) || m.Time().After(r.periodEnd.Add(r.Config.Delay)) {
|
||||
r.log.Debugf("metric is outside aggregation window; discarding. %s: m: %s e: %s g: %s",
|
||||
r.log.Debugf("Metric is outside aggregation window; discarding. %s: m: %s e: %s g: %s",
|
||||
m.Time(), r.periodStart, r.periodEnd, r.Config.Grace)
|
||||
r.MetricsDropped.Incr(1)
|
||||
return r.Config.DropOriginal
|
||||
|
|
|
@ -10,6 +10,7 @@ emitting the aggregate every `period` seconds.
|
|||
[[aggregators.basicstats]]
|
||||
## The period on which to flush & clear the aggregator.
|
||||
period = "30s"
|
||||
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package basicstats
|
||||
|
||||
import (
|
||||
"log"
|
||||
"math"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
@ -10,6 +9,7 @@ import (
|
|||
|
||||
type BasicStats struct {
|
||||
Stats []string `toml:"stats"`
|
||||
Log telegraf.Logger
|
||||
|
||||
cache map[uint64]aggregate
|
||||
statsConfig *configuredStats
|
||||
|
@ -28,9 +28,9 @@ type configuredStats struct {
|
|||
}
|
||||
|
||||
func NewBasicStats() *BasicStats {
|
||||
mm := &BasicStats{}
|
||||
mm.Reset()
|
||||
return mm
|
||||
return &BasicStats{
|
||||
cache: make(map[uint64]aggregate),
|
||||
}
|
||||
}
|
||||
|
||||
type aggregate struct {
|
||||
|
@ -53,6 +53,7 @@ type basicstats struct {
|
|||
var sampleConfig = `
|
||||
## The period on which to flush & clear the aggregator.
|
||||
period = "30s"
|
||||
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
|
@ -61,17 +62,17 @@ var sampleConfig = `
|
|||
# stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"]
|
||||
`
|
||||
|
||||
func (m *BasicStats) SampleConfig() string {
|
||||
func (*BasicStats) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (m *BasicStats) Description() string {
|
||||
func (*BasicStats) Description() string {
|
||||
return "Keep the aggregate basicstats of each metric passing through."
|
||||
}
|
||||
|
||||
func (m *BasicStats) Add(in telegraf.Metric) {
|
||||
func (b *BasicStats) Add(in telegraf.Metric) {
|
||||
id := in.HashID()
|
||||
if _, ok := m.cache[id]; !ok {
|
||||
if _, ok := b.cache[id]; !ok {
|
||||
// hit an uncached metric, create caches for first time:
|
||||
a := aggregate{
|
||||
name: in.Name(),
|
||||
|
@ -92,13 +93,13 @@ func (m *BasicStats) Add(in telegraf.Metric) {
|
|||
}
|
||||
}
|
||||
}
|
||||
m.cache[id] = a
|
||||
b.cache[id] = a
|
||||
} else {
|
||||
for _, field := range in.FieldList() {
|
||||
if fv, ok := convert(field.Value); ok {
|
||||
if _, ok := m.cache[id].fields[field.Key]; !ok {
|
||||
if _, ok := b.cache[id].fields[field.Key]; !ok {
|
||||
// hit an uncached field of a cached metric
|
||||
m.cache[id].fields[field.Key] = basicstats{
|
||||
b.cache[id].fields[field.Key] = basicstats{
|
||||
count: 1,
|
||||
min: fv,
|
||||
max: fv,
|
||||
|
@ -111,7 +112,7 @@ func (m *BasicStats) Add(in telegraf.Metric) {
|
|||
continue
|
||||
}
|
||||
|
||||
tmp := m.cache[id].fields[field.Key]
|
||||
tmp := b.cache[id].fields[field.Key]
|
||||
//https://en.m.wikipedia.org/wiki/Algorithms_for_calculating_variance
|
||||
//variable initialization
|
||||
x := fv
|
||||
|
@ -138,32 +139,30 @@ func (m *BasicStats) Add(in telegraf.Metric) {
|
|||
//diff compute
|
||||
tmp.diff = fv - tmp.LAST
|
||||
//store final data
|
||||
m.cache[id].fields[field.Key] = tmp
|
||||
b.cache[id].fields[field.Key] = tmp
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *BasicStats) Push(acc telegraf.Accumulator) {
|
||||
config := getConfiguredStats(m)
|
||||
|
||||
for _, aggregate := range m.cache {
|
||||
func (b *BasicStats) Push(acc telegraf.Accumulator) {
|
||||
for _, aggregate := range b.cache {
|
||||
fields := map[string]interface{}{}
|
||||
for k, v := range aggregate.fields {
|
||||
|
||||
if config.count {
|
||||
if b.statsConfig.count {
|
||||
fields[k+"_count"] = v.count
|
||||
}
|
||||
if config.min {
|
||||
if b.statsConfig.min {
|
||||
fields[k+"_min"] = v.min
|
||||
}
|
||||
if config.max {
|
||||
if b.statsConfig.max {
|
||||
fields[k+"_max"] = v.max
|
||||
}
|
||||
if config.mean {
|
||||
if b.statsConfig.mean {
|
||||
fields[k+"_mean"] = v.mean
|
||||
}
|
||||
if config.sum {
|
||||
if b.statsConfig.sum {
|
||||
fields[k+"_sum"] = v.sum
|
||||
}
|
||||
|
||||
|
@ -171,16 +170,16 @@ func (m *BasicStats) Push(acc telegraf.Accumulator) {
|
|||
if v.count > 1 {
|
||||
variance := v.M2 / (v.count - 1)
|
||||
|
||||
if config.variance {
|
||||
if b.statsConfig.variance {
|
||||
fields[k+"_s2"] = variance
|
||||
}
|
||||
if config.stdev {
|
||||
if b.statsConfig.stdev {
|
||||
fields[k+"_stdev"] = math.Sqrt(variance)
|
||||
}
|
||||
if config.diff {
|
||||
if b.statsConfig.diff {
|
||||
fields[k+"_diff"] = v.diff
|
||||
}
|
||||
if config.non_negative_diff && v.diff >= 0 {
|
||||
if b.statsConfig.non_negative_diff && v.diff >= 0 {
|
||||
fields[k+"_non_negative_diff"] = v.diff
|
||||
}
|
||||
|
||||
|
@ -194,14 +193,12 @@ func (m *BasicStats) Push(acc telegraf.Accumulator) {
|
|||
}
|
||||
}
|
||||
|
||||
func parseStats(names []string) *configuredStats {
|
||||
|
||||
// member function for logging.
|
||||
func (b *BasicStats) parseStats() *configuredStats {
|
||||
parsed := &configuredStats{}
|
||||
|
||||
for _, name := range names {
|
||||
|
||||
for _, name := range b.Stats {
|
||||
switch name {
|
||||
|
||||
case "count":
|
||||
parsed.count = true
|
||||
case "min":
|
||||
|
@ -222,45 +219,32 @@ func parseStats(names []string) *configuredStats {
|
|||
parsed.non_negative_diff = true
|
||||
|
||||
default:
|
||||
log.Printf("W! Unrecognized basic stat '%s', ignoring", name)
|
||||
b.Log.Warnf("Unrecognized basic stat %q, ignoring", name)
|
||||
}
|
||||
}
|
||||
|
||||
return parsed
|
||||
}
|
||||
|
||||
func defaultStats() *configuredStats {
|
||||
|
||||
defaults := &configuredStats{}
|
||||
|
||||
defaults.count = true
|
||||
defaults.min = true
|
||||
defaults.max = true
|
||||
defaults.mean = true
|
||||
defaults.variance = true
|
||||
defaults.stdev = true
|
||||
defaults.sum = false
|
||||
defaults.non_negative_diff = false
|
||||
|
||||
return defaults
|
||||
func (b *BasicStats) getConfiguredStats() {
|
||||
if b.Stats == nil {
|
||||
b.statsConfig = &configuredStats{
|
||||
count: true,
|
||||
min: true,
|
||||
max: true,
|
||||
mean: true,
|
||||
variance: true,
|
||||
stdev: true,
|
||||
sum: false,
|
||||
non_negative_diff: false,
|
||||
}
|
||||
|
||||
func getConfiguredStats(m *BasicStats) *configuredStats {
|
||||
|
||||
if m.statsConfig == nil {
|
||||
|
||||
if m.Stats == nil {
|
||||
m.statsConfig = defaultStats()
|
||||
} else {
|
||||
m.statsConfig = parseStats(m.Stats)
|
||||
b.statsConfig = b.parseStats()
|
||||
}
|
||||
}
|
||||
|
||||
return m.statsConfig
|
||||
}
|
||||
|
||||
func (m *BasicStats) Reset() {
|
||||
m.cache = make(map[uint64]aggregate)
|
||||
func (b *BasicStats) Reset() {
|
||||
b.cache = make(map[uint64]aggregate)
|
||||
}
|
||||
|
||||
func convert(in interface{}) (float64, bool) {
|
||||
|
@ -276,6 +260,12 @@ func convert(in interface{}) (float64, bool) {
|
|||
}
|
||||
}
|
||||
|
||||
func (b *BasicStats) Init() error {
|
||||
b.getConfiguredStats()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
aggregators.Add("basicstats", func() telegraf.Aggregator {
|
||||
return NewBasicStats()
|
||||
|
|
|
@ -39,6 +39,8 @@ var m2, _ = metric.New("m1",
|
|||
|
||||
func BenchmarkApply(b *testing.B) {
|
||||
minmax := NewBasicStats()
|
||||
minmax.Log = testutil.Logger{}
|
||||
minmax.getConfiguredStats()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
minmax.Add(m1)
|
||||
|
@ -50,6 +52,8 @@ func BenchmarkApply(b *testing.B) {
|
|||
func TestBasicStatsWithPeriod(t *testing.T) {
|
||||
acc := testutil.Accumulator{}
|
||||
minmax := NewBasicStats()
|
||||
minmax.Log = testutil.Logger{}
|
||||
minmax.getConfiguredStats()
|
||||
|
||||
minmax.Add(m1)
|
||||
minmax.Add(m2)
|
||||
|
@ -106,6 +110,8 @@ func TestBasicStatsWithPeriod(t *testing.T) {
|
|||
func TestBasicStatsDifferentPeriods(t *testing.T) {
|
||||
acc := testutil.Accumulator{}
|
||||
minmax := NewBasicStats()
|
||||
minmax.Log = testutil.Logger{}
|
||||
minmax.getConfiguredStats()
|
||||
|
||||
minmax.Add(m1)
|
||||
minmax.Push(&acc)
|
||||
|
@ -181,6 +187,8 @@ func TestBasicStatsWithOnlyCount(t *testing.T) {
|
|||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"count"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
aggregator.getConfiguredStats()
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
@ -208,6 +216,8 @@ func TestBasicStatsWithOnlyMin(t *testing.T) {
|
|||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"min"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
aggregator.getConfiguredStats()
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
@ -235,6 +245,8 @@ func TestBasicStatsWithOnlyMax(t *testing.T) {
|
|||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"max"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
aggregator.getConfiguredStats()
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
@ -262,6 +274,8 @@ func TestBasicStatsWithOnlyMean(t *testing.T) {
|
|||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"mean"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
aggregator.getConfiguredStats()
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
@ -289,6 +303,8 @@ func TestBasicStatsWithOnlySum(t *testing.T) {
|
|||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"sum"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
aggregator.getConfiguredStats()
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
@ -347,6 +363,8 @@ func TestBasicStatsWithOnlySumFloatingPointErrata(t *testing.T) {
|
|||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"sum"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
aggregator.getConfiguredStats()
|
||||
|
||||
aggregator.Add(sum1)
|
||||
aggregator.Add(sum2)
|
||||
|
@ -368,6 +386,8 @@ func TestBasicStatsWithOnlyVariance(t *testing.T) {
|
|||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"s2"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
aggregator.getConfiguredStats()
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
@ -393,6 +413,8 @@ func TestBasicStatsWithOnlyStandardDeviation(t *testing.T) {
|
|||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"stdev"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
aggregator.getConfiguredStats()
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
@ -418,6 +440,8 @@ func TestBasicStatsWithMinAndMax(t *testing.T) {
|
|||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"min", "max"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
aggregator.getConfiguredStats()
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
@ -452,6 +476,8 @@ func TestBasicStatsWithDiff(t *testing.T) {
|
|||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"diff"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
aggregator.getConfiguredStats()
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
@ -477,6 +503,8 @@ func TestBasicStatsWithNonNegativeDiff(t *testing.T) {
|
|||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"non_negative_diff"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
aggregator.getConfiguredStats()
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
@ -500,7 +528,9 @@ func TestBasicStatsWithNonNegativeDiff(t *testing.T) {
|
|||
func TestBasicStatsWithAllStats(t *testing.T) {
|
||||
acc := testutil.Accumulator{}
|
||||
minmax := NewBasicStats()
|
||||
minmax.Log = testutil.Logger{}
|
||||
minmax.Stats = []string{"count", "min", "max", "mean", "stdev", "s2", "sum"}
|
||||
minmax.getConfiguredStats()
|
||||
|
||||
minmax.Add(m1)
|
||||
minmax.Add(m2)
|
||||
|
@ -564,6 +594,8 @@ func TestBasicStatsWithNoStats(t *testing.T) {
|
|||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
aggregator.getConfiguredStats()
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
@ -579,6 +611,8 @@ func TestBasicStatsWithUnknownStat(t *testing.T) {
|
|||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"crazy"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
aggregator.getConfiguredStats()
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
@ -596,6 +630,8 @@ func TestBasicStatsWithUnknownStat(t *testing.T) {
|
|||
func TestBasicStatsWithDefaultStats(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Log = testutil.Logger{}
|
||||
aggregator.getConfiguredStats()
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -55,6 +54,7 @@ type AMQPConsumer struct {
|
|||
tls.ClientConfig
|
||||
|
||||
ContentEncoding string `toml:"content_encoding"`
|
||||
Log telegraf.Logger
|
||||
|
||||
deliveries map[telegraf.TrackingID]amqp.Delivery
|
||||
|
||||
|
@ -241,11 +241,11 @@ func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error {
|
|||
break
|
||||
}
|
||||
|
||||
log.Printf("I! [inputs.amqp_consumer] connection closed: %s; trying to reconnect", err)
|
||||
a.Log.Infof("Connection closed: %s; trying to reconnect", err)
|
||||
for {
|
||||
msgs, err := a.connect(amqpConf)
|
||||
if err != nil {
|
||||
log.Printf("E! AMQP connection failed: %s", err)
|
||||
a.Log.Errorf("AMQP connection failed: %s", err)
|
||||
time.Sleep(10 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
@ -272,14 +272,14 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err
|
|||
p := rand.Perm(len(brokers))
|
||||
for _, n := range p {
|
||||
broker := brokers[n]
|
||||
log.Printf("D! [inputs.amqp_consumer] connecting to %q", broker)
|
||||
a.Log.Debugf("Connecting to %q", broker)
|
||||
conn, err := amqp.DialConfig(broker, *amqpConf)
|
||||
if err == nil {
|
||||
a.conn = conn
|
||||
log.Printf("D! [inputs.amqp_consumer] connected to %q", broker)
|
||||
a.Log.Debugf("Connected to %q", broker)
|
||||
break
|
||||
}
|
||||
log.Printf("D! [inputs.amqp_consumer] error connecting to %q", broker)
|
||||
a.Log.Debugf("Error connecting to %q", broker)
|
||||
}
|
||||
|
||||
if a.conn == nil {
|
||||
|
@ -288,7 +288,7 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err
|
|||
|
||||
ch, err := a.conn.Channel()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to open a channel: %s", err)
|
||||
return nil, fmt.Errorf("Failed to open a channel: %s", err.Error())
|
||||
}
|
||||
|
||||
if a.Exchange != "" {
|
||||
|
@ -395,7 +395,7 @@ func declareExchange(
|
|||
)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error declaring exchange: %v", err)
|
||||
return fmt.Errorf("Error declaring exchange: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -437,7 +437,7 @@ func declareQueue(
|
|||
)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error declaring queue: %v", err)
|
||||
return nil, fmt.Errorf("Error declaring queue: %v", err)
|
||||
}
|
||||
return &queue, nil
|
||||
}
|
||||
|
@ -486,8 +486,7 @@ func (a *AMQPConsumer) onMessage(acc telegraf.TrackingAccumulator, d amqp.Delive
|
|||
// this message.
|
||||
rejErr := d.Ack(false)
|
||||
if rejErr != nil {
|
||||
log.Printf("E! [inputs.amqp_consumer] Unable to reject message: %d: %v",
|
||||
d.DeliveryTag, rejErr)
|
||||
a.Log.Errorf("Unable to reject message: %d: %v", d.DeliveryTag, rejErr)
|
||||
a.conn.Close()
|
||||
}
|
||||
}
|
||||
|
@ -519,15 +518,13 @@ func (a *AMQPConsumer) onDelivery(track telegraf.DeliveryInfo) bool {
|
|||
if track.Delivered() {
|
||||
err := delivery.Ack(false)
|
||||
if err != nil {
|
||||
log.Printf("E! [inputs.amqp_consumer] Unable to ack written delivery: %d: %v",
|
||||
delivery.DeliveryTag, err)
|
||||
a.Log.Errorf("Unable to ack written delivery: %d: %v", delivery.DeliveryTag, err)
|
||||
a.conn.Close()
|
||||
}
|
||||
} else {
|
||||
err := delivery.Reject(false)
|
||||
if err != nil {
|
||||
log.Printf("E! [inputs.amqp_consumer] Unable to reject failed delivery: %d: %v",
|
||||
delivery.DeliveryTag, err)
|
||||
a.Log.Errorf("Unable to reject failed delivery: %d: %v", delivery.DeliveryTag, err)
|
||||
a.conn.Close()
|
||||
}
|
||||
}
|
||||
|
@ -541,7 +538,7 @@ func (a *AMQPConsumer) Stop() {
|
|||
a.wg.Wait()
|
||||
err := a.conn.Close()
|
||||
if err != nil && err != amqp.ErrClosed {
|
||||
log.Printf("E! [inputs.amqp_consumer] Error closing AMQP connection: %s", err)
|
||||
a.Log.Errorf("Error closing AMQP connection: %s", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
package activemq
|
||||
package azure_storage_queue
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -17,6 +16,7 @@ type AzureStorageQueue struct {
|
|||
StorageAccountName string `toml:"account_name"`
|
||||
StorageAccountKey string `toml:"account_key"`
|
||||
PeekOldestMessageAge bool `toml:"peek_oldest_message_age"`
|
||||
Log telegraf.Logger
|
||||
|
||||
serviceURL *azqueue.ServiceURL
|
||||
}
|
||||
|
@ -92,7 +92,7 @@ func (a *AzureStorageQueue) Gather(acc telegraf.Accumulator) error {
|
|||
ctx := context.TODO()
|
||||
|
||||
for marker := (azqueue.Marker{}); marker.NotDone(); {
|
||||
log.Printf("D! [inputs.azure_storage_queue] Listing queues of storage account '%s'", a.StorageAccountName)
|
||||
a.Log.Debugf("Listing queues of storage account '%s'", a.StorageAccountName)
|
||||
queuesSegment, err := serviceURL.ListQueuesSegment(ctx, marker,
|
||||
azqueue.ListQueuesSegmentOptions{
|
||||
Detail: azqueue.ListQueuesSegmentDetails{Metadata: false},
|
||||
|
@ -103,11 +103,11 @@ func (a *AzureStorageQueue) Gather(acc telegraf.Accumulator) error {
|
|||
marker = queuesSegment.NextMarker
|
||||
|
||||
for _, queueItem := range queuesSegment.QueueItems {
|
||||
log.Printf("D! [inputs.azure_storage_queue] Processing queue '%s' of storage account '%s'", queueItem.Name, a.StorageAccountName)
|
||||
a.Log.Debugf("Processing queue '%s' of storage account '%s'", queueItem.Name, a.StorageAccountName)
|
||||
queueURL := serviceURL.NewQueueURL(queueItem.Name)
|
||||
properties, err := queueURL.GetProperties(ctx)
|
||||
if err != nil {
|
||||
log.Printf("E! [inputs.azure_storage_queue] Error getting properties for queue %s: %s", queueItem.Name, err.Error())
|
||||
a.Log.Errorf("Error getting properties for queue %s: %s", queueItem.Name, err.Error())
|
||||
continue
|
||||
}
|
||||
var peekedMessage *azqueue.PeekedMessage
|
||||
|
@ -115,7 +115,7 @@ func (a *AzureStorageQueue) Gather(acc telegraf.Accumulator) error {
|
|||
messagesURL := queueURL.NewMessagesURL()
|
||||
messagesResponse, err := messagesURL.Peek(ctx, 1)
|
||||
if err != nil {
|
||||
log.Printf("E! [inputs.azure_storage_queue] Error peeking queue %s: %s", queueItem.Name, err.Error())
|
||||
a.Log.Errorf("Error peeking queue %s: %s", queueItem.Name, err.Error())
|
||||
} else if messagesResponse.NumMessages() > 0 {
|
||||
peekedMessage = messagesResponse.Message(0)
|
||||
}
|
||||
|
|
|
@ -101,12 +101,12 @@ func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error {
|
|||
for _, s := range sockets {
|
||||
dump, err := perfDump(c.CephBinary, s)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("E! error reading from socket '%s': %v", s.socket, err))
|
||||
acc.AddError(fmt.Errorf("error reading from socket '%s': %v", s.socket, err))
|
||||
continue
|
||||
}
|
||||
data, err := parseDump(dump)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("E! error parsing dump from socket '%s': %v", s.socket, err))
|
||||
acc.AddError(fmt.Errorf("error parsing dump from socket '%s': %v", s.socket, err))
|
||||
continue
|
||||
}
|
||||
for tag, metrics := range data {
|
||||
|
@ -287,7 +287,7 @@ func flatten(data interface{}) []*metric {
|
|||
}
|
||||
}
|
||||
default:
|
||||
log.Printf("I! Ignoring unexpected type '%T' for value %v", val, val)
|
||||
log.Printf("I! [inputs.ceph] ignoring unexpected type '%T' for value %v", val, val)
|
||||
}
|
||||
|
||||
return metrics
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -54,6 +53,8 @@ type CiscoTelemetryGNMI struct {
|
|||
acc telegraf.Accumulator
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
|
||||
Log telegraf.Logger
|
||||
}
|
||||
|
||||
// Subscription for a GNMI client
|
||||
|
@ -211,8 +212,8 @@ func (c *CiscoTelemetryGNMI) subscribeGNMI(ctx context.Context, address string,
|
|||
return fmt.Errorf("failed to send subscription request: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("D! [inputs.cisco_telemetry_gnmi]: Connection to GNMI device %s established", address)
|
||||
defer log.Printf("D! [inputs.cisco_telemetry_gnmi]: Connection to GNMI device %s closed", address)
|
||||
c.Log.Debugf("Connection to GNMI device %s established", address)
|
||||
defer c.Log.Debugf("Connection to GNMI device %s closed", address)
|
||||
for ctx.Err() == nil {
|
||||
var reply *gnmi.SubscribeResponse
|
||||
if reply, err = subscribeClient.Recv(); err != nil {
|
||||
|
@ -267,7 +268,7 @@ func (c *CiscoTelemetryGNMI) handleSubscribeResponse(address string, reply *gnmi
|
|||
if alias, ok := c.aliases[aliasPath]; ok {
|
||||
name = alias
|
||||
} else {
|
||||
log.Printf("D! [inputs.cisco_telemetry_gnmi]: No measurement alias for GNMI path: %s", name)
|
||||
c.Log.Debugf("No measurement alias for GNMI path: %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -104,7 +104,9 @@ func TestGNMIError(t *testing.T) {
|
|||
acc := &testutil.Accumulator{}
|
||||
gnmi.RegisterGNMIServer(server, &mockGNMIServer{t: t, scenario: 0, server: server, acc: acc})
|
||||
|
||||
c := &CiscoTelemetryGNMI{Addresses: []string{listener.Addr().String()},
|
||||
c := &CiscoTelemetryGNMI{
|
||||
Log: testutil.Logger{},
|
||||
Addresses: []string{listener.Addr().String()},
|
||||
Username: "theuser", Password: "thepassword", Encoding: "proto",
|
||||
Redial: internal.Duration{Duration: 1 * time.Second}}
|
||||
|
||||
|
@ -174,7 +176,9 @@ func TestGNMIMultiple(t *testing.T) {
|
|||
acc := &testutil.Accumulator{}
|
||||
gnmi.RegisterGNMIServer(server, &mockGNMIServer{t: t, scenario: 1, server: server, acc: acc})
|
||||
|
||||
c := &CiscoTelemetryGNMI{Addresses: []string{listener.Addr().String()},
|
||||
c := &CiscoTelemetryGNMI{
|
||||
Log: testutil.Logger{},
|
||||
Addresses: []string{listener.Addr().String()},
|
||||
Username: "theuser", Password: "thepassword", Encoding: "proto",
|
||||
Redial: internal.Duration{Duration: 1 * time.Second},
|
||||
Subscriptions: []Subscription{{Name: "alias", Origin: "type", Path: "/model", SubscriptionMode: "sample"}},
|
||||
|
@ -215,7 +219,9 @@ func TestGNMIMultipleRedial(t *testing.T) {
|
|||
acc := &testutil.Accumulator{}
|
||||
gnmi.RegisterGNMIServer(server, &mockGNMIServer{t: t, scenario: 2, server: server, acc: acc})
|
||||
|
||||
c := &CiscoTelemetryGNMI{Addresses: []string{listener.Addr().String()},
|
||||
c := &CiscoTelemetryGNMI{
|
||||
Log: testutil.Logger{},
|
||||
Addresses: []string{listener.Addr().String()},
|
||||
Username: "theuser", Password: "thepassword", Encoding: "proto",
|
||||
Redial: internal.Duration{Duration: 10 * time.Millisecond},
|
||||
Subscriptions: []Subscription{{Name: "alias", Origin: "type", Path: "/model", SubscriptionMode: "sample"}},
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"path"
|
||||
"strconv"
|
||||
|
@ -43,6 +42,8 @@ type CiscoTelemetryMDT struct {
|
|||
Aliases map[string]string `toml:"aliases"`
|
||||
EmbeddedTags []string `toml:"embedded_tags"`
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
// GRPC TLS settings
|
||||
internaltls.ServerConfig
|
||||
|
||||
|
@ -146,11 +147,11 @@ func (c *CiscoTelemetryMDT) acceptTCPClients() {
|
|||
// Individual client connection routine
|
||||
c.wg.Add(1)
|
||||
go func() {
|
||||
log.Printf("D! [inputs.cisco_telemetry_mdt]: Accepted Cisco MDT TCP dialout connection from %s", conn.RemoteAddr())
|
||||
c.Log.Debugf("Accepted Cisco MDT TCP dialout connection from %s", conn.RemoteAddr())
|
||||
if err := c.handleTCPClient(conn); err != nil {
|
||||
c.acc.AddError(err)
|
||||
}
|
||||
log.Printf("D! [inputs.cisco_telemetry_mdt]: Closed Cisco MDT TCP dialout connection from %s", conn.RemoteAddr())
|
||||
c.Log.Debugf("Closed Cisco MDT TCP dialout connection from %s", conn.RemoteAddr())
|
||||
|
||||
mutex.Lock()
|
||||
delete(clients, conn)
|
||||
|
@ -165,7 +166,7 @@ func (c *CiscoTelemetryMDT) acceptTCPClients() {
|
|||
mutex.Lock()
|
||||
for client := range clients {
|
||||
if err := client.Close(); err != nil {
|
||||
log.Printf("E! [inputs.cisco_telemetry_mdt]: Failed to close TCP dialout client: %v", err)
|
||||
c.Log.Errorf("Failed to close TCP dialout client: %v", err)
|
||||
}
|
||||
}
|
||||
mutex.Unlock()
|
||||
|
@ -218,7 +219,7 @@ func (c *CiscoTelemetryMDT) handleTCPClient(conn net.Conn) error {
|
|||
func (c *CiscoTelemetryMDT) MdtDialout(stream dialout.GRPCMdtDialout_MdtDialoutServer) error {
|
||||
peer, peerOK := peer.FromContext(stream.Context())
|
||||
if peerOK {
|
||||
log.Printf("D! [inputs.cisco_telemetry_mdt]: Accepted Cisco MDT GRPC dialout connection from %s", peer.Addr)
|
||||
c.Log.Debugf("Accepted Cisco MDT GRPC dialout connection from %s", peer.Addr)
|
||||
}
|
||||
|
||||
var chunkBuffer bytes.Buffer
|
||||
|
@ -252,7 +253,7 @@ func (c *CiscoTelemetryMDT) MdtDialout(stream dialout.GRPCMdtDialout_MdtDialoutS
|
|||
}
|
||||
|
||||
if peerOK {
|
||||
log.Printf("D! [inputs.cisco_telemetry_mdt]: Closed Cisco MDT GRPC dialout connection from %s", peer.Addr)
|
||||
c.Log.Debugf("Closed Cisco MDT GRPC dialout connection from %s", peer.Addr)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -291,7 +292,7 @@ func (c *CiscoTelemetryMDT) handleTelemetry(data []byte) {
|
|||
}
|
||||
|
||||
if keys == nil || content == nil {
|
||||
log.Printf("I! [inputs.cisco_telemetry_mdt]: Message from %s missing keys or content", msg.GetNodeIdStr())
|
||||
c.Log.Infof("Message from %s missing keys or content", msg.GetNodeIdStr())
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -412,7 +413,7 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie
|
|||
} else {
|
||||
c.mutex.Lock()
|
||||
if _, haveWarned := c.warned[path]; !haveWarned {
|
||||
log.Printf("D! [inputs.cisco_telemetry_mdt]: No measurement alias for encoding path: %s", path)
|
||||
c.Log.Debugf("No measurement alias for encoding path: %s", path)
|
||||
c.warned[path] = struct{}{}
|
||||
}
|
||||
c.mutex.Unlock()
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
)
|
||||
|
||||
func TestHandleTelemetryTwoSimple(t *testing.T) {
|
||||
c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"alias": "type:model/some/path"}}
|
||||
c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "dummy", Aliases: map[string]string{"alias": "type:model/some/path"}}
|
||||
acc := &testutil.Accumulator{}
|
||||
c.Start(acc)
|
||||
|
||||
|
@ -93,7 +93,7 @@ func TestHandleTelemetryTwoSimple(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHandleTelemetrySingleNested(t *testing.T) {
|
||||
c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"nested": "type:model/nested/path"}}
|
||||
c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "dummy", Aliases: map[string]string{"nested": "type:model/nested/path"}}
|
||||
acc := &testutil.Accumulator{}
|
||||
c.Start(acc)
|
||||
|
||||
|
@ -385,7 +385,7 @@ func TestHandleNXDME(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTCPDialoutOverflow(t *testing.T) {
|
||||
c := &CiscoTelemetryMDT{Transport: "tcp", ServiceAddress: "127.0.0.1:57000"}
|
||||
c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "tcp", ServiceAddress: "127.0.0.1:57000"}
|
||||
acc := &testutil.Accumulator{}
|
||||
assert.Nil(t, c.Start(acc))
|
||||
|
||||
|
@ -441,7 +441,7 @@ func mockTelemetryMessage() *telemetry.Telemetry {
|
|||
}
|
||||
|
||||
func TestTCPDialoutMultiple(t *testing.T) {
|
||||
c := &CiscoTelemetryMDT{Transport: "tcp", ServiceAddress: "127.0.0.1:57000", Aliases: map[string]string{
|
||||
c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "tcp", ServiceAddress: "127.0.0.1:57000", Aliases: map[string]string{
|
||||
"some": "type:model/some/path", "parallel": "type:model/parallel/path", "other": "type:model/other/path"}}
|
||||
acc := &testutil.Accumulator{}
|
||||
assert.Nil(t, c.Start(acc))
|
||||
|
@ -500,7 +500,7 @@ func TestTCPDialoutMultiple(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGRPCDialoutError(t *testing.T) {
|
||||
c := &CiscoTelemetryMDT{Transport: "grpc", ServiceAddress: "127.0.0.1:57001"}
|
||||
c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "grpc", ServiceAddress: "127.0.0.1:57001"}
|
||||
acc := &testutil.Accumulator{}
|
||||
assert.Nil(t, c.Start(acc))
|
||||
|
||||
|
@ -519,7 +519,7 @@ func TestGRPCDialoutError(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGRPCDialoutMultiple(t *testing.T) {
|
||||
c := &CiscoTelemetryMDT{Transport: "grpc", ServiceAddress: "127.0.0.1:57001", Aliases: map[string]string{
|
||||
c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "grpc", ServiceAddress: "127.0.0.1:57001", Aliases: map[string]string{
|
||||
"some": "type:model/some/path", "parallel": "type:model/parallel/path", "other": "type:model/other/path"}}
|
||||
acc := &testutil.Accumulator{}
|
||||
assert.Nil(t, c.Start(acc))
|
||||
|
|
|
@ -5,16 +5,16 @@ import (
|
|||
"fmt"
|
||||
"sync"
|
||||
|
||||
"cloud.google.com/go/pubsub"
|
||||
"encoding/base64"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/pubsub"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/option"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
type empty struct{}
|
||||
|
@ -43,6 +43,8 @@ type PubSub struct {
|
|||
|
||||
Base64Data bool `toml:"base64_data"`
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
sub subscription
|
||||
stubSub func() subscription
|
||||
|
||||
|
@ -134,14 +136,14 @@ func (ps *PubSub) receiveWithRetry(parentCtx context.Context) {
|
|||
err := ps.startReceiver(parentCtx)
|
||||
|
||||
for err != nil && parentCtx.Err() == nil {
|
||||
log.Printf("E! [inputs.cloud_pubsub] Receiver for subscription %s exited with error: %v", ps.sub.ID(), err)
|
||||
ps.Log.Errorf("Receiver for subscription %s exited with error: %v", ps.sub.ID(), err)
|
||||
|
||||
delay := defaultRetryDelaySeconds
|
||||
if ps.RetryReceiveDelaySeconds > 0 {
|
||||
delay = ps.RetryReceiveDelaySeconds
|
||||
}
|
||||
|
||||
log.Printf("I! [inputs.cloud_pubsub] Waiting %d seconds before attempting to restart receiver...", delay)
|
||||
ps.Log.Infof("Waiting %d seconds before attempting to restart receiver...", delay)
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
|
||||
err = ps.startReceiver(parentCtx)
|
||||
|
@ -149,7 +151,7 @@ func (ps *PubSub) receiveWithRetry(parentCtx context.Context) {
|
|||
}
|
||||
|
||||
func (ps *PubSub) startReceiver(parentCtx context.Context) error {
|
||||
log.Printf("I! [inputs.cloud_pubsub] Starting receiver for subscription %s...", ps.sub.ID())
|
||||
ps.Log.Infof("Starting receiver for subscription %s...", ps.sub.ID())
|
||||
cctx, ccancel := context.WithCancel(parentCtx)
|
||||
err := ps.sub.Receive(cctx, func(ctx context.Context, msg message) {
|
||||
if err := ps.onMessage(ctx, msg); err != nil {
|
||||
|
@ -159,7 +161,7 @@ func (ps *PubSub) startReceiver(parentCtx context.Context) error {
|
|||
if err != nil {
|
||||
ps.acc.AddError(fmt.Errorf("receiver for subscription %s exited: %v", ps.sub.ID(), err))
|
||||
} else {
|
||||
log.Printf("I! [inputs.cloud_pubsub] subscription pull ended (no error, most likely stopped)")
|
||||
ps.Log.Info("Subscription pull ended (no error, most likely stopped)")
|
||||
}
|
||||
ccancel()
|
||||
return err
|
||||
|
|
|
@ -3,10 +3,11 @@ package cloud_pubsub
|
|||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -26,6 +27,7 @@ func TestRunParse(t *testing.T) {
|
|||
sub.receiver = testMessagesReceive(sub)
|
||||
|
||||
ps := &PubSub{
|
||||
Log: testutil.Logger{},
|
||||
parser: testParser,
|
||||
stubSub: func() subscription { return sub },
|
||||
Project: "projectIDontMatterForTests",
|
||||
|
@ -69,6 +71,7 @@ func TestRunBase64(t *testing.T) {
|
|||
sub.receiver = testMessagesReceive(sub)
|
||||
|
||||
ps := &PubSub{
|
||||
Log: testutil.Logger{},
|
||||
parser: testParser,
|
||||
stubSub: func() subscription { return sub },
|
||||
Project: "projectIDontMatterForTests",
|
||||
|
@ -112,6 +115,7 @@ func TestRunInvalidMessages(t *testing.T) {
|
|||
sub.receiver = testMessagesReceive(sub)
|
||||
|
||||
ps := &PubSub{
|
||||
Log: testutil.Logger{},
|
||||
parser: testParser,
|
||||
stubSub: func() subscription { return sub },
|
||||
Project: "projectIDontMatterForTests",
|
||||
|
@ -158,6 +162,7 @@ func TestRunOverlongMessages(t *testing.T) {
|
|||
sub.receiver = testMessagesReceive(sub)
|
||||
|
||||
ps := &PubSub{
|
||||
Log: testutil.Logger{},
|
||||
parser: testParser,
|
||||
stubSub: func() subscription { return sub },
|
||||
Project: "projectIDontMatterForTests",
|
||||
|
@ -205,6 +210,7 @@ func TestRunErrorInSubscriber(t *testing.T) {
|
|||
sub.receiver = testMessagesError(sub, errors.New("a fake error"))
|
||||
|
||||
ps := &PubSub{
|
||||
Log: testutil.Logger{},
|
||||
parser: testParser,
|
||||
stubSub: func() subscription { return sub },
|
||||
Project: "projectIDontMatterForTests",
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
@ -33,6 +32,7 @@ type PubSubPush struct {
|
|||
WriteTimeout internal.Duration
|
||||
MaxBodySize internal.Size
|
||||
AddMeta bool
|
||||
Log telegraf.Logger
|
||||
|
||||
MaxUndeliveredMessages int `toml:"max_undelivered_messages"`
|
||||
|
||||
|
@ -227,21 +227,21 @@ func (p *PubSubPush) serveWrite(res http.ResponseWriter, req *http.Request) {
|
|||
|
||||
var payload Payload
|
||||
if err = json.Unmarshal(bytes, &payload); err != nil {
|
||||
log.Printf("E! [inputs.cloud_pubsub_push] Error decoding payload %s", err.Error())
|
||||
p.Log.Errorf("Error decoding payload %s", err.Error())
|
||||
res.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
sDec, err := base64.StdEncoding.DecodeString(payload.Msg.Data)
|
||||
if err != nil {
|
||||
log.Printf("E! [inputs.cloud_pubsub_push] Base64-Decode Failed %s", err.Error())
|
||||
p.Log.Errorf("Base64-decode failed %s", err.Error())
|
||||
res.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
metrics, err := p.Parse(sDec)
|
||||
if err != nil {
|
||||
log.Println("D! [inputs.cloud_pubsub_push] " + err.Error())
|
||||
p.Log.Debug(err.Error())
|
||||
res.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
@ -295,7 +295,7 @@ func (p *PubSubPush) receiveDelivered() {
|
|||
ch <- true
|
||||
} else {
|
||||
ch <- false
|
||||
log.Println("D! [inputs.cloud_pubsub_push] Metric group failed to process")
|
||||
p.Log.Debug("Metric group failed to process")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func TestServeHTTP(t *testing.T) {
|
||||
|
@ -118,6 +119,7 @@ func TestServeHTTP(t *testing.T) {
|
|||
|
||||
rr := httptest.NewRecorder()
|
||||
pubPush := &PubSubPush{
|
||||
Log: testutil.Logger{},
|
||||
Path: "/",
|
||||
MaxBodySize: internal.Size{
|
||||
Size: test.maxsize,
|
||||
|
|
|
@ -2,7 +2,6 @@ package diskio
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
|
@ -24,6 +23,8 @@ type DiskIO struct {
|
|||
NameTemplates []string
|
||||
SkipSerialNumber bool
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
infoCache map[string]diskInfoCache
|
||||
deviceFilter filter.Filter
|
||||
initialized bool
|
||||
|
@ -75,7 +76,7 @@ func (s *DiskIO) init() error {
|
|||
if hasMeta(device) {
|
||||
filter, err := filter.Compile(s.Devices)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error compiling device pattern: %v", err)
|
||||
return fmt.Errorf("error compiling device pattern: %s", err.Error())
|
||||
}
|
||||
s.deviceFilter = filter
|
||||
}
|
||||
|
@ -99,7 +100,7 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
diskio, err := s.ps.DiskIO(devices)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting disk io info: %s", err)
|
||||
return fmt.Errorf("error getting disk io info: %s", err.Error())
|
||||
}
|
||||
|
||||
for _, io := range diskio {
|
||||
|
@ -166,7 +167,7 @@ func (s *DiskIO) diskName(devName string) (string, []string) {
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
log.Printf("W! Error gathering disk info: %s", err)
|
||||
s.Log.Warnf("Error gathering disk info: %s", err)
|
||||
return devName, devLinks
|
||||
}
|
||||
|
||||
|
@ -199,7 +200,7 @@ func (s *DiskIO) diskTags(devName string) map[string]string {
|
|||
|
||||
di, err := s.diskInfo(devName)
|
||||
if err != nil {
|
||||
log.Printf("W! Error gathering disk info: %s", err)
|
||||
s.Log.Warnf("Error gathering disk info: %s", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -103,6 +103,7 @@ func TestDiskIO(t *testing.T) {
|
|||
var acc testutil.Accumulator
|
||||
|
||||
diskio := &DiskIO{
|
||||
Log: testutil.Logger{},
|
||||
ps: &mps,
|
||||
Devices: tt.devices,
|
||||
}
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
@ -45,6 +44,8 @@ type Docker struct {
|
|||
ContainerStateInclude []string `toml:"container_state_include"`
|
||||
ContainerStateExclude []string `toml:"container_state_exclude"`
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
tlsint.ClientConfig
|
||||
|
||||
newEnvClient func() (Client, error)
|
||||
|
@ -107,8 +108,10 @@ var sampleConfig = `
|
|||
## Whether to report for each container per-device blkio (8:0, 8:1...) and
|
||||
## network (eth0, eth1, ...) stats or not
|
||||
perdevice = true
|
||||
|
||||
## Whether to report for each container total blkio and network stats or not
|
||||
total = false
|
||||
|
||||
## Which environment variables should we use as a tag
|
||||
##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
|
||||
|
||||
|
@ -274,7 +277,7 @@ func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error {
|
|||
fields["tasks_running"] = running[service.ID]
|
||||
fields["tasks_desired"] = tasksNoShutdown[service.ID]
|
||||
} else {
|
||||
log.Printf("E! Unknow Replicas Mode")
|
||||
d.Log.Error("Unknown replica mode")
|
||||
}
|
||||
// Add metrics
|
||||
acc.AddFields("docker_swarm",
|
||||
|
|
|
@ -252,6 +252,7 @@ func TestDocker_WindowsMemoryContainerStats(t *testing.T) {
|
|||
var acc testutil.Accumulator
|
||||
|
||||
d := Docker{
|
||||
Log: testutil.Logger{},
|
||||
newClient: func(string, *tls.Config) (Client, error) {
|
||||
return &MockClient{
|
||||
InfoF: func(ctx context.Context) (types.Info, error) {
|
||||
|
@ -390,6 +391,7 @@ func TestContainerLabels(t *testing.T) {
|
|||
}
|
||||
|
||||
d := Docker{
|
||||
Log: testutil.Logger{},
|
||||
newClient: newClientFunc,
|
||||
LabelInclude: tt.include,
|
||||
LabelExclude: tt.exclude,
|
||||
|
@ -511,6 +513,7 @@ func TestContainerNames(t *testing.T) {
|
|||
}
|
||||
|
||||
d := Docker{
|
||||
Log: testutil.Logger{},
|
||||
newClient: newClientFunc,
|
||||
ContainerInclude: tt.include,
|
||||
ContainerExclude: tt.exclude,
|
||||
|
@ -625,7 +628,10 @@ func TestContainerStatus(t *testing.T) {
|
|||
|
||||
return &client, nil
|
||||
}
|
||||
d = Docker{newClient: newClientFunc}
|
||||
d = Docker{
|
||||
Log: testutil.Logger{},
|
||||
newClient: newClientFunc,
|
||||
}
|
||||
)
|
||||
|
||||
// mock time
|
||||
|
@ -675,6 +681,7 @@ func TestContainerStatus(t *testing.T) {
|
|||
func TestDockerGatherInfo(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
d := Docker{
|
||||
Log: testutil.Logger{},
|
||||
newClient: newClient,
|
||||
TagEnvironment: []string{"ENVVAR1", "ENVVAR2", "ENVVAR3", "ENVVAR5",
|
||||
"ENVVAR6", "ENVVAR7", "ENVVAR8", "ENVVAR9"},
|
||||
|
@ -824,6 +831,7 @@ func TestDockerGatherInfo(t *testing.T) {
|
|||
func TestDockerGatherSwarmInfo(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
d := Docker{
|
||||
Log: testutil.Logger{},
|
||||
newClient: newClient,
|
||||
}
|
||||
|
||||
|
@ -931,6 +939,7 @@ func TestContainerStateFilter(t *testing.T) {
|
|||
}
|
||||
|
||||
d := Docker{
|
||||
Log: testutil.Logger{},
|
||||
newClient: newClientFunc,
|
||||
ContainerStateInclude: tt.include,
|
||||
ContainerStateExclude: tt.exclude,
|
||||
|
@ -992,6 +1001,7 @@ func TestContainerName(t *testing.T) {
|
|||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
d := Docker{
|
||||
Log: testutil.Logger{},
|
||||
newClient: tt.clientFunc,
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
|
|
|
@ -17,8 +17,10 @@ the [upgrading steps][upgrading].
|
|||
##
|
||||
## If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost:24242"]
|
||||
|
||||
## Type is one of "user", "domain", "ip", or "global"
|
||||
type = "global"
|
||||
|
||||
## Wildcard matches like "*.com". An empty string "" is same as "*"
|
||||
## If type = "ip" filters should be <IP/network>
|
||||
filters = [""]
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
// "log"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -32,8 +31,10 @@ var sampleConfig = `
|
|||
##
|
||||
## If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost:24242"]
|
||||
|
||||
## Type is one of "user", "domain", "ip", or "global"
|
||||
type = "global"
|
||||
|
||||
## Wildcard matches like "*.com". An empty string "" is same as "*"
|
||||
## If type = "ip" filters should be <IP/network>
|
||||
filters = [""]
|
||||
|
@ -82,12 +83,12 @@ func (d *Dovecot) Gather(acc telegraf.Accumulator) error {
|
|||
func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype string, filter string) error {
|
||||
_, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error: %s on url %s\n", err, addr)
|
||||
return fmt.Errorf("%q on url %s", err.Error(), addr)
|
||||
}
|
||||
|
||||
c, err := net.DialTimeout("tcp", addr, defaultTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to connect to dovecot server '%s': %s", addr, err)
|
||||
return fmt.Errorf("enable to connect to dovecot server '%s': %s", addr, err)
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
|
|
|
@ -161,7 +161,7 @@ func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync
|
|||
if isNagios {
|
||||
metrics, err = nagios.TryAddState(runErr, metrics)
|
||||
if err != nil {
|
||||
e.log.Errorf("failed to add nagios state: %s", err)
|
||||
e.log.Errorf("Failed to add nagios state: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package filecount
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
@ -59,6 +58,7 @@ type FileCount struct {
|
|||
fileFilters []fileFilterFunc
|
||||
globPaths []globpath.GlobPath
|
||||
Fs fileSystem
|
||||
Log telegraf.Logger
|
||||
}
|
||||
|
||||
func (_ *FileCount) Description() string {
|
||||
|
@ -210,7 +210,7 @@ func (fc *FileCount) count(acc telegraf.Accumulator, basedir string, glob globpa
|
|||
Unsorted: true,
|
||||
ErrorCallback: func(osPathname string, err error) godirwalk.ErrorAction {
|
||||
if os.IsPermission(errors.Cause(err)) {
|
||||
log.Println("D! [inputs.filecount]", err)
|
||||
fc.Log.Debug(err)
|
||||
return godirwalk.SkipNode
|
||||
}
|
||||
return godirwalk.Halt
|
||||
|
|
|
@ -152,6 +152,7 @@ func TestDirectoryWithTrailingSlash(t *testing.T) {
|
|||
|
||||
func getNoFilterFileCount() FileCount {
|
||||
return FileCount{
|
||||
Log: testutil.Logger{},
|
||||
Directories: []string{getTestdataDir()},
|
||||
Name: "*",
|
||||
Recursive: true,
|
||||
|
|
|
@ -11,6 +11,7 @@ The filestat plugin gathers metrics about file existence, size, and other stats.
|
|||
## These accept standard unix glob matching rules, but with the addition of
|
||||
## ** as a "super asterisk". See https://github.com/gobwas/glob.
|
||||
files = ["/etc/telegraf/telegraf.conf", "/var/log/**.log"]
|
||||
|
||||
## If true, read the entire file and calculate an md5 checksum.
|
||||
md5 = false
|
||||
```
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
@ -23,6 +22,7 @@ const sampleConfig = `
|
|||
## See https://github.com/gobwas/glob for more examples
|
||||
##
|
||||
files = ["/var/log/**.log"]
|
||||
|
||||
## If true, read the entire file and calculate an md5 checksum.
|
||||
md5 = false
|
||||
`
|
||||
|
@ -31,6 +31,8 @@ type FileStat struct {
|
|||
Md5 bool
|
||||
Files []string
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
// maps full file paths to globmatch obj
|
||||
globs map[string]*globpath.GlobPath
|
||||
}
|
||||
|
@ -41,11 +43,11 @@ func NewFileStat() *FileStat {
|
|||
}
|
||||
}
|
||||
|
||||
func (_ *FileStat) Description() string {
|
||||
func (*FileStat) Description() string {
|
||||
return "Read stats about given file(s)"
|
||||
}
|
||||
|
||||
func (_ *FileStat) SampleConfig() string { return sampleConfig }
|
||||
func (*FileStat) SampleConfig() string { return sampleConfig }
|
||||
|
||||
func (f *FileStat) Gather(acc telegraf.Accumulator) error {
|
||||
var err error
|
||||
|
@ -86,7 +88,7 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
if fileInfo == nil {
|
||||
log.Printf("E! Unable to get info for file [%s], possible permissions issue",
|
||||
f.Log.Errorf("Unable to get info for file %q, possible permissions issue",
|
||||
fileName)
|
||||
} else {
|
||||
fields["size_bytes"] = fileInfo.Size()
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
func TestGatherNoMd5(t *testing.T) {
|
||||
dir := getTestdataDir()
|
||||
fs := NewFileStat()
|
||||
fs.Log = testutil.Logger{}
|
||||
fs.Files = []string{
|
||||
dir + "log1.log",
|
||||
dir + "log2.log",
|
||||
|
@ -44,6 +45,7 @@ func TestGatherNoMd5(t *testing.T) {
|
|||
func TestGatherExplicitFiles(t *testing.T) {
|
||||
dir := getTestdataDir()
|
||||
fs := NewFileStat()
|
||||
fs.Log = testutil.Logger{}
|
||||
fs.Md5 = true
|
||||
fs.Files = []string{
|
||||
dir + "log1.log",
|
||||
|
@ -77,6 +79,7 @@ func TestGatherExplicitFiles(t *testing.T) {
|
|||
func TestGatherGlob(t *testing.T) {
|
||||
dir := getTestdataDir()
|
||||
fs := NewFileStat()
|
||||
fs.Log = testutil.Logger{}
|
||||
fs.Md5 = true
|
||||
fs.Files = []string{
|
||||
dir + "*.log",
|
||||
|
@ -103,6 +106,7 @@ func TestGatherGlob(t *testing.T) {
|
|||
func TestGatherSuperAsterisk(t *testing.T) {
|
||||
dir := getTestdataDir()
|
||||
fs := NewFileStat()
|
||||
fs.Log = testutil.Logger{}
|
||||
fs.Md5 = true
|
||||
fs.Files = []string{
|
||||
dir + "**",
|
||||
|
@ -136,6 +140,7 @@ func TestGatherSuperAsterisk(t *testing.T) {
|
|||
func TestModificationTime(t *testing.T) {
|
||||
dir := getTestdataDir()
|
||||
fs := NewFileStat()
|
||||
fs.Log = testutil.Logger{}
|
||||
fs.Files = []string{
|
||||
dir + "log1.log",
|
||||
}
|
||||
|
@ -153,6 +158,7 @@ func TestModificationTime(t *testing.T) {
|
|||
|
||||
func TestNoModificationTime(t *testing.T) {
|
||||
fs := NewFileStat()
|
||||
fs.Log = testutil.Logger{}
|
||||
fs.Files = []string{
|
||||
"/non/existant/file",
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"crypto/subtle"
|
||||
"crypto/tls"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -48,6 +47,7 @@ type HTTPListenerV2 struct {
|
|||
tlsint.ServerConfig
|
||||
|
||||
TimeFunc
|
||||
Log telegraf.Logger
|
||||
|
||||
wg sync.WaitGroup
|
||||
|
||||
|
@ -162,7 +162,7 @@ func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error {
|
|||
server.Serve(h.listener)
|
||||
}()
|
||||
|
||||
log.Printf("I! [inputs.http_listener_v2] Listening on %s", listener.Addr().String())
|
||||
h.Log.Infof("Listening on %s", listener.Addr().String())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -219,7 +219,7 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request)
|
|||
|
||||
metrics, err := h.Parse(bytes)
|
||||
if err != nil {
|
||||
log.Printf("D! [inputs.http_listener_v2] Parse error: %v", err)
|
||||
h.Log.Debugf("Parse error: %s", err.Error())
|
||||
badRequest(res)
|
||||
return
|
||||
}
|
||||
|
@ -239,7 +239,7 @@ func (h *HTTPListenerV2) collectBody(res http.ResponseWriter, req *http.Request)
|
|||
var err error
|
||||
body, err = gzip.NewReader(req.Body)
|
||||
if err != nil {
|
||||
log.Println("D! " + err.Error())
|
||||
h.Log.Debug(err.Error())
|
||||
badRequest(res)
|
||||
return nil, false
|
||||
}
|
||||
|
@ -261,7 +261,7 @@ func (h *HTTPListenerV2) collectQuery(res http.ResponseWriter, req *http.Request
|
|||
|
||||
query, err := url.QueryUnescape(rawQuery)
|
||||
if err != nil {
|
||||
log.Printf("D! [inputs.http_listener_v2] Error parsing query: %v", err)
|
||||
h.Log.Debugf("Error parsing query: %s", err.Error())
|
||||
badRequest(res)
|
||||
return nil, false
|
||||
}
|
||||
|
|
|
@ -46,6 +46,7 @@ func newTestHTTPListenerV2() *HTTPListenerV2 {
|
|||
parser, _ := parsers.NewInfluxParser()
|
||||
|
||||
listener := &HTTPListenerV2{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: "localhost:0",
|
||||
Path: "/write",
|
||||
Methods: []string{"POST"},
|
||||
|
@ -68,6 +69,7 @@ func newTestHTTPSListenerV2() *HTTPListenerV2 {
|
|||
parser, _ := parsers.NewInfluxParser()
|
||||
|
||||
listener := &HTTPListenerV2{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: "localhost:0",
|
||||
Path: "/write",
|
||||
Methods: []string{"POST"},
|
||||
|
@ -231,6 +233,7 @@ func TestWriteHTTPExactMaxBodySize(t *testing.T) {
|
|||
parser, _ := parsers.NewInfluxParser()
|
||||
|
||||
listener := &HTTPListenerV2{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: "localhost:0",
|
||||
Path: "/write",
|
||||
Methods: []string{"POST"},
|
||||
|
@ -253,6 +256,7 @@ func TestWriteHTTPVerySmallMaxBody(t *testing.T) {
|
|||
parser, _ := parsers.NewInfluxParser()
|
||||
|
||||
listener := &HTTPListenerV2{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: "localhost:0",
|
||||
Path: "/write",
|
||||
Methods: []string{"POST"},
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -34,6 +33,8 @@ type HTTPResponse struct {
|
|||
Interface string
|
||||
tls.ClientConfig
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
compiledStringMatch *regexp.Regexp
|
||||
client *http.Client
|
||||
}
|
||||
|
@ -242,7 +243,7 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string]
|
|||
// HTTP error codes do not generate errors in the net/http library
|
||||
if err != nil {
|
||||
// Log error
|
||||
log.Printf("D! Network error while polling %s: %s", u, err.Error())
|
||||
h.Log.Debugf("Network error while polling %s: %s", u, err.Error())
|
||||
|
||||
// Get error details
|
||||
netErr := setError(err, fields, tags)
|
||||
|
@ -271,7 +272,7 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string]
|
|||
|
||||
bodyBytes, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Printf("D! Failed to read body of HTTP Response : %s", err)
|
||||
h.Log.Debugf("Failed to read body of HTTP Response : %s", err.Error())
|
||||
setResult("body_read_error", fields, tags)
|
||||
fields["content_length"] = len(bodyBytes)
|
||||
if h.ResponseStringMatch != "" {
|
||||
|
@ -322,7 +323,7 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
|
|||
if h.Address == "" {
|
||||
h.URLs = []string{"http://localhost"}
|
||||
} else {
|
||||
log.Printf("W! [inputs.http_response] 'address' deprecated in telegraf 1.12, please use 'urls'")
|
||||
h.Log.Warn("'address' deprecated in telegraf 1.12, please use 'urls'")
|
||||
h.URLs = []string{h.Address}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -150,6 +150,7 @@ func TestHeaders(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
h := &HTTPResponse{
|
||||
Log: testutil.Logger{},
|
||||
Address: ts.URL,
|
||||
Method: "GET",
|
||||
ResponseTimeout: internal.Duration{Duration: time.Second * 2},
|
||||
|
@ -185,6 +186,7 @@ func TestFields(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
h := &HTTPResponse{
|
||||
Log: testutil.Logger{},
|
||||
Address: ts.URL + "/good",
|
||||
Body: "{ 'test': 'data'}",
|
||||
Method: "GET",
|
||||
|
@ -246,6 +248,7 @@ func TestInterface(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
h := &HTTPResponse{
|
||||
Log: testutil.Logger{},
|
||||
Address: ts.URL + "/good",
|
||||
Body: "{ 'test': 'data'}",
|
||||
Method: "GET",
|
||||
|
@ -284,6 +287,7 @@ func TestRedirects(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
h := &HTTPResponse{
|
||||
Log: testutil.Logger{},
|
||||
Address: ts.URL + "/redirect",
|
||||
Body: "{ 'test': 'data'}",
|
||||
Method: "GET",
|
||||
|
@ -314,6 +318,7 @@ func TestRedirects(t *testing.T) {
|
|||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
|
||||
|
||||
h = &HTTPResponse{
|
||||
Log: testutil.Logger{},
|
||||
Address: ts.URL + "/badredirect",
|
||||
Body: "{ 'test': 'data'}",
|
||||
Method: "GET",
|
||||
|
@ -350,6 +355,7 @@ func TestMethod(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
h := &HTTPResponse{
|
||||
Log: testutil.Logger{},
|
||||
Address: ts.URL + "/mustbepostmethod",
|
||||
Body: "{ 'test': 'data'}",
|
||||
Method: "POST",
|
||||
|
@ -380,6 +386,7 @@ func TestMethod(t *testing.T) {
|
|||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
|
||||
|
||||
h = &HTTPResponse{
|
||||
Log: testutil.Logger{},
|
||||
Address: ts.URL + "/mustbepostmethod",
|
||||
Body: "{ 'test': 'data'}",
|
||||
Method: "GET",
|
||||
|
@ -411,6 +418,7 @@ func TestMethod(t *testing.T) {
|
|||
|
||||
//check that lowercase methods work correctly
|
||||
h = &HTTPResponse{
|
||||
Log: testutil.Logger{},
|
||||
Address: ts.URL + "/mustbepostmethod",
|
||||
Body: "{ 'test': 'data'}",
|
||||
Method: "head",
|
||||
|
@ -447,6 +455,7 @@ func TestBody(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
h := &HTTPResponse{
|
||||
Log: testutil.Logger{},
|
||||
Address: ts.URL + "/musthaveabody",
|
||||
Body: "{ 'test': 'data'}",
|
||||
Method: "GET",
|
||||
|
@ -477,6 +486,7 @@ func TestBody(t *testing.T) {
|
|||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
|
||||
|
||||
h = &HTTPResponse{
|
||||
Log: testutil.Logger{},
|
||||
Address: ts.URL + "/musthaveabody",
|
||||
Method: "GET",
|
||||
ResponseTimeout: internal.Duration{Duration: time.Second * 20},
|
||||
|
@ -510,6 +520,7 @@ func TestStringMatch(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
h := &HTTPResponse{
|
||||
Log: testutil.Logger{},
|
||||
Address: ts.URL + "/good",
|
||||
Body: "{ 'test': 'data'}",
|
||||
Method: "GET",
|
||||
|
@ -547,6 +558,7 @@ func TestStringMatchJson(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
h := &HTTPResponse{
|
||||
Log: testutil.Logger{},
|
||||
Address: ts.URL + "/jsonresponse",
|
||||
Body: "{ 'test': 'data'}",
|
||||
Method: "GET",
|
||||
|
@ -584,6 +596,7 @@ func TestStringMatchFail(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
h := &HTTPResponse{
|
||||
Log: testutil.Logger{},
|
||||
Address: ts.URL + "/good",
|
||||
Body: "{ 'test': 'data'}",
|
||||
Method: "GET",
|
||||
|
@ -626,6 +639,7 @@ func TestTimeout(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
h := &HTTPResponse{
|
||||
Log: testutil.Logger{},
|
||||
Address: ts.URL + "/twosecondnap",
|
||||
Body: "{ 'test': 'data'}",
|
||||
Method: "GET",
|
||||
|
@ -659,6 +673,7 @@ func TestBadRegex(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
h := &HTTPResponse{
|
||||
Log: testutil.Logger{},
|
||||
Address: ts.URL + "/good",
|
||||
Body: "{ 'test': 'data'}",
|
||||
Method: "GET",
|
||||
|
@ -682,6 +697,7 @@ func TestBadRegex(t *testing.T) {
|
|||
func TestNetworkErrors(t *testing.T) {
|
||||
// DNS error
|
||||
h := &HTTPResponse{
|
||||
Log: testutil.Logger{},
|
||||
Address: "https://nonexistent.nonexistent", // Any non-resolvable URL works here
|
||||
Body: "",
|
||||
Method: "GET",
|
||||
|
@ -708,6 +724,7 @@ func TestNetworkErrors(t *testing.T) {
|
|||
|
||||
// Connecton failed
|
||||
h = &HTTPResponse{
|
||||
Log: testutil.Logger{},
|
||||
Address: "https:/nonexistent.nonexistent", // Any non-routable IP works here
|
||||
Body: "",
|
||||
Method: "GET",
|
||||
|
@ -739,6 +756,7 @@ func TestContentLength(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
h := &HTTPResponse{
|
||||
Log: testutil.Logger{},
|
||||
URLs: []string{ts.URL + "/good"},
|
||||
Body: "{ 'test': 'data'}",
|
||||
Method: "GET",
|
||||
|
@ -769,6 +787,7 @@ func TestContentLength(t *testing.T) {
|
|||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
|
||||
|
||||
h = &HTTPResponse{
|
||||
Log: testutil.Logger{},
|
||||
URLs: []string{ts.URL + "/musthaveabody"},
|
||||
Body: "{ 'test': 'data'}",
|
||||
Method: "GET",
|
||||
|
|
|
@ -11,10 +11,10 @@ services and hosts. You can read Icinga2's documentation for their remote API
|
|||
```toml
|
||||
# Description
|
||||
[[inputs.icinga2]]
|
||||
## Required Icinga2 server address (default: "https://localhost:5665")
|
||||
## Required Icinga2 server address
|
||||
# server = "https://localhost:5665"
|
||||
|
||||
## Required Icinga2 object type ("services" or "hosts, default "services")
|
||||
## Required Icinga2 object type ("services" or "hosts")
|
||||
# object_type = "services"
|
||||
|
||||
## Credentials for basic HTTP authentication
|
||||
|
|
|
@ -3,7 +3,6 @@ package icinga2
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
@ -22,6 +21,8 @@ type Icinga2 struct {
|
|||
ResponseTimeout internal.Duration
|
||||
tls.ClientConfig
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
|
@ -49,10 +50,10 @@ var levels = []string{"ok", "warning", "critical", "unknown"}
|
|||
type ObjectType string
|
||||
|
||||
var sampleConfig = `
|
||||
## Required Icinga2 server address (default: "https://localhost:5665")
|
||||
## Required Icinga2 server address
|
||||
# server = "https://localhost:5665"
|
||||
|
||||
## Required Icinga2 object type ("services" or "hosts, default "services")
|
||||
## Required Icinga2 object type ("services" or "hosts")
|
||||
# object_type = "services"
|
||||
|
||||
## Credentials for basic HTTP authentication
|
||||
|
@ -80,25 +81,27 @@ func (i *Icinga2) SampleConfig() string {
|
|||
|
||||
func (i *Icinga2) GatherStatus(acc telegraf.Accumulator, checks []Object) {
|
||||
for _, check := range checks {
|
||||
fields := make(map[string]interface{})
|
||||
tags := make(map[string]string)
|
||||
|
||||
url, err := url.Parse(i.Server)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
i.Log.Error(err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
state := int64(check.Attrs.State)
|
||||
|
||||
fields["name"] = check.Attrs.Name
|
||||
fields["state_code"] = state
|
||||
fields := map[string]interface{}{
|
||||
"name": check.Attrs.Name,
|
||||
"state_code": state,
|
||||
}
|
||||
|
||||
tags["display_name"] = check.Attrs.DisplayName
|
||||
tags["check_command"] = check.Attrs.CheckCommand
|
||||
tags["state"] = levels[state]
|
||||
tags["source"] = url.Hostname()
|
||||
tags["scheme"] = url.Scheme
|
||||
tags["port"] = url.Port()
|
||||
tags := map[string]string{
|
||||
"display_name": check.Attrs.DisplayName,
|
||||
"check_command": check.Attrs.CheckCommand,
|
||||
"state": levels[state],
|
||||
"source": url.Hostname(),
|
||||
"scheme": url.Scheme,
|
||||
"port": url.Port(),
|
||||
}
|
||||
|
||||
acc.AddFields(fmt.Sprintf("icinga2_%s", i.ObjectType), fields, tags)
|
||||
}
|
||||
|
@ -167,6 +170,7 @@ func init() {
|
|||
return &Icinga2{
|
||||
Server: "https://localhost:5665",
|
||||
ObjectType: "services",
|
||||
ResponseTimeout: internal.Duration{Duration: time.Second * 5},
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ func TestGatherServicesStatus(t *testing.T) {
|
|||
json.Unmarshal([]byte(s), &checks)
|
||||
|
||||
icinga2 := new(Icinga2)
|
||||
icinga2.Log = testutil.Logger{}
|
||||
icinga2.ObjectType = "services"
|
||||
icinga2.Server = "https://localhost:5665"
|
||||
|
||||
|
@ -86,6 +87,7 @@ func TestGatherHostsStatus(t *testing.T) {
|
|||
var acc testutil.Accumulator
|
||||
|
||||
icinga2 := new(Icinga2)
|
||||
icinga2.Log = testutil.Logger{}
|
||||
icinga2.ObjectType = "hosts"
|
||||
icinga2.Server = "https://localhost:5665"
|
||||
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
@ -75,6 +74,8 @@ type HTTPListener struct {
|
|||
BuffersCreated selfstat.Stat
|
||||
AuthFailures selfstat.Stat
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
longLines selfstat.Stat
|
||||
}
|
||||
|
||||
|
@ -202,7 +203,7 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
|
|||
server.Serve(h.listener)
|
||||
}()
|
||||
|
||||
log.Printf("I! Started HTTP listener service on %s\n", h.ServiceAddress)
|
||||
h.Log.Infof("Started HTTP listener service on %s", h.ServiceAddress)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -215,7 +216,7 @@ func (h *HTTPListener) Stop() {
|
|||
h.listener.Close()
|
||||
h.wg.Wait()
|
||||
|
||||
log.Println("I! Stopped HTTP listener service on ", h.ServiceAddress)
|
||||
h.Log.Infof("Stopped HTTP listener service on %s", h.ServiceAddress)
|
||||
}
|
||||
|
||||
func (h *HTTPListener) ServeHTTP(res http.ResponseWriter, req *http.Request) {
|
||||
|
@ -274,7 +275,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
|
|||
var err error
|
||||
body, err = gzip.NewReader(req.Body)
|
||||
if err != nil {
|
||||
log.Println("D! " + err.Error())
|
||||
h.Log.Debug(err.Error())
|
||||
badRequest(res, err.Error())
|
||||
return
|
||||
}
|
||||
|
@ -290,7 +291,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
|
|||
for {
|
||||
n, err := io.ReadFull(body, buf[bufStart:])
|
||||
if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF {
|
||||
log.Println("D! " + err.Error())
|
||||
h.Log.Debug(err.Error())
|
||||
// problem reading the request body
|
||||
badRequest(res, err.Error())
|
||||
return
|
||||
|
@ -326,7 +327,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
|
|||
// finished reading the request body
|
||||
err = h.parse(buf[:n+bufStart], now, precision, db)
|
||||
if err != nil {
|
||||
log.Println("D! "+err.Error(), bufStart+n)
|
||||
h.Log.Debugf("%s: %s", err.Error(), bufStart+n)
|
||||
return400 = true
|
||||
}
|
||||
if return400 {
|
||||
|
@ -348,7 +349,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
|
|||
if i == -1 {
|
||||
h.longLines.Incr(1)
|
||||
// drop any line longer than the max buffer size
|
||||
log.Printf("D! http_listener received a single line longer than the maximum of %d bytes",
|
||||
h.Log.Debugf("Http_listener received a single line longer than the maximum of %d bytes",
|
||||
len(buf))
|
||||
hangingBytes = true
|
||||
return400 = true
|
||||
|
@ -356,7 +357,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
|
|||
continue
|
||||
}
|
||||
if err := h.parse(buf[:i+1], now, precision, db); err != nil {
|
||||
log.Println("D! " + err.Error())
|
||||
h.Log.Debug(err.Error())
|
||||
return400 = true
|
||||
}
|
||||
// rotate the bit remaining after the last newline to the front of the buffer
|
||||
|
|
|
@ -44,6 +44,7 @@ var (
|
|||
|
||||
func newTestHTTPListener() *HTTPListener {
|
||||
listener := &HTTPListener{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: "localhost:0",
|
||||
TimeFunc: time.Now,
|
||||
}
|
||||
|
@ -59,6 +60,7 @@ func newTestHTTPAuthListener() *HTTPListener {
|
|||
|
||||
func newTestHTTPSListener() *HTTPListener {
|
||||
listener := &HTTPListener{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: "localhost:0",
|
||||
ServerConfig: *pki.TLSServerConfig(),
|
||||
TimeFunc: time.Now,
|
||||
|
@ -220,6 +222,7 @@ func TestWriteHTTPNoNewline(t *testing.T) {
|
|||
|
||||
func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) {
|
||||
listener := &HTTPListener{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: "localhost:0",
|
||||
MaxLineSize: internal.Size{Size: 128 * 1000},
|
||||
TimeFunc: time.Now,
|
||||
|
@ -238,6 +241,7 @@ func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) {
|
|||
|
||||
func TestWriteHTTPVerySmallMaxBody(t *testing.T) {
|
||||
listener := &HTTPListener{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: "localhost:0",
|
||||
MaxBodySize: internal.Size{Size: 4096},
|
||||
TimeFunc: time.Now,
|
||||
|
@ -255,6 +259,7 @@ func TestWriteHTTPVerySmallMaxBody(t *testing.T) {
|
|||
|
||||
func TestWriteHTTPVerySmallMaxLineSize(t *testing.T) {
|
||||
listener := &HTTPListener{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: "localhost:0",
|
||||
MaxLineSize: internal.Size{Size: 70},
|
||||
TimeFunc: time.Now,
|
||||
|
@ -282,6 +287,7 @@ func TestWriteHTTPVerySmallMaxLineSize(t *testing.T) {
|
|||
|
||||
func TestWriteHTTPLargeLinesSkipped(t *testing.T) {
|
||||
listener := &HTTPListener{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: "localhost:0",
|
||||
MaxLineSize: internal.Size{Size: 100},
|
||||
TimeFunc: time.Now,
|
||||
|
|
|
@ -5,7 +5,6 @@ package ipvs
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/bits"
|
||||
"strconv"
|
||||
"syscall"
|
||||
|
@ -18,6 +17,7 @@ import (
|
|||
// IPVS holds the state for this input plugin
|
||||
type IPVS struct {
|
||||
handle *ipvs.Handle
|
||||
Log telegraf.Logger
|
||||
}
|
||||
|
||||
// Description returns a description string
|
||||
|
@ -61,7 +61,7 @@ func (i *IPVS) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
destinations, err := i.handle.GetDestinations(s)
|
||||
if err != nil {
|
||||
log.Println("E! Failed to list destinations for a virtual server")
|
||||
i.Log.Errorf("Failed to list destinations for a virtual server: %s", err.Error())
|
||||
continue // move on to the next virtual server
|
||||
}
|
||||
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -29,6 +28,8 @@ type Jenkins struct {
|
|||
tls.ClientConfig
|
||||
client *client
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
MaxConnections int `toml:"max_connections"`
|
||||
MaxBuildAge internal.Duration `toml:"max_build_age"`
|
||||
MaxSubJobDepth int `toml:"max_subjob_depth"`
|
||||
|
@ -304,7 +305,7 @@ func (j *Jenkins) getJobDetail(jr jobRequest, acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
if build.Building {
|
||||
log.Printf("D! Ignore running build on %s, build %v", jr.name, number)
|
||||
j.Log.Debugf("Ignore running build on %s, build %v", jr.name, number)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -206,6 +206,7 @@ func TestGatherNodeData(t *testing.T) {
|
|||
ts := httptest.NewServer(test.input)
|
||||
defer ts.Close()
|
||||
j := &Jenkins{
|
||||
Log: testutil.Logger{},
|
||||
URL: ts.URL,
|
||||
ResponseTimeout: internal.Duration{Duration: time.Microsecond},
|
||||
NodeExclude: []string{"ignore-1", "ignore-2"},
|
||||
|
@ -258,6 +259,7 @@ func TestInitialize(t *testing.T) {
|
|||
{
|
||||
name: "bad jenkins config",
|
||||
input: &Jenkins{
|
||||
Log: testutil.Logger{},
|
||||
URL: "http://a bad url",
|
||||
ResponseTimeout: internal.Duration{Duration: time.Microsecond},
|
||||
},
|
||||
|
@ -266,6 +268,7 @@ func TestInitialize(t *testing.T) {
|
|||
{
|
||||
name: "has filter",
|
||||
input: &Jenkins{
|
||||
Log: testutil.Logger{},
|
||||
URL: ts.URL,
|
||||
ResponseTimeout: internal.Duration{Duration: time.Microsecond},
|
||||
JobExclude: []string{"job1", "job2"},
|
||||
|
@ -275,10 +278,12 @@ func TestInitialize(t *testing.T) {
|
|||
{
|
||||
name: "default config",
|
||||
input: &Jenkins{
|
||||
Log: testutil.Logger{},
|
||||
URL: ts.URL,
|
||||
ResponseTimeout: internal.Duration{Duration: time.Microsecond},
|
||||
},
|
||||
output: &Jenkins{
|
||||
Log: testutil.Logger{},
|
||||
MaxConnections: 5,
|
||||
MaxSubJobPerLayer: 10,
|
||||
},
|
||||
|
@ -570,6 +575,7 @@ func TestGatherJobs(t *testing.T) {
|
|||
ts := httptest.NewServer(test.input)
|
||||
defer ts.Close()
|
||||
j := &Jenkins{
|
||||
Log: testutil.Logger{},
|
||||
URL: ts.URL,
|
||||
MaxBuildAge: internal.Duration{Duration: time.Hour},
|
||||
ResponseTimeout: internal.Duration{Duration: time.Microsecond},
|
||||
|
|
|
@ -2,7 +2,6 @@ package jti_openconfig_telemetry
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
@ -34,6 +33,8 @@ type OpenConfigTelemetry struct {
|
|||
EnableTLS bool `toml:"enable_tls"`
|
||||
internaltls.ClientConfig
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
sensorsConfig []sensorConfig
|
||||
grpcClientConns []*grpc.ClientConn
|
||||
wg *sync.WaitGroup
|
||||
|
@ -243,7 +244,7 @@ func (m *OpenConfigTelemetry) splitSensorConfig() int {
|
|||
}
|
||||
|
||||
if len(spathSplit) == 0 {
|
||||
log.Printf("E! No sensors are specified")
|
||||
m.Log.Error("No sensors are specified")
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -257,7 +258,7 @@ func (m *OpenConfigTelemetry) splitSensorConfig() int {
|
|||
}
|
||||
|
||||
if len(spathSplit) == 0 {
|
||||
log.Printf("E! No valid sensors are specified")
|
||||
m.Log.Error("No valid sensors are specified")
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -294,13 +295,13 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context,
|
|||
rpcStatus, _ := status.FromError(err)
|
||||
// If service is currently unavailable and may come back later, retry
|
||||
if rpcStatus.Code() != codes.Unavailable {
|
||||
acc.AddError(fmt.Errorf("E! Could not subscribe to %s: %v", grpcServer,
|
||||
acc.AddError(fmt.Errorf("could not subscribe to %s: %v", grpcServer,
|
||||
err))
|
||||
return
|
||||
} else {
|
||||
// Retry with delay. If delay is not provided, use default
|
||||
if m.RetryDelay.Duration > 0 {
|
||||
log.Printf("D! Retrying %s with timeout %v", grpcServer,
|
||||
m.Log.Debugf("Retrying %s with timeout %v", grpcServer,
|
||||
m.RetryDelay.Duration)
|
||||
time.Sleep(m.RetryDelay.Duration)
|
||||
continue
|
||||
|
@ -314,11 +315,11 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context,
|
|||
if err != nil {
|
||||
// If we encounter error in the stream, break so we can retry
|
||||
// the connection
|
||||
acc.AddError(fmt.Errorf("E! Failed to read from %s: %v", err, grpcServer))
|
||||
acc.AddError(fmt.Errorf("failed to read from %s: %s", grpcServer, err))
|
||||
break
|
||||
}
|
||||
|
||||
log.Printf("D! Received from %s: %v", grpcServer, r)
|
||||
m.Log.Debugf("Received from %s: %v", grpcServer, r)
|
||||
|
||||
// Create a point and add to batch
|
||||
tags := make(map[string]string)
|
||||
|
@ -329,7 +330,7 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context,
|
|||
dgroups := m.extractData(r, grpcServer)
|
||||
|
||||
// Print final data collection
|
||||
log.Printf("D! Available collection for %s is: %v", grpcServer, dgroups)
|
||||
m.Log.Debugf("Available collection for %s is: %v", grpcServer, dgroups)
|
||||
|
||||
tnow := time.Now()
|
||||
// Iterate through data groups and add them
|
||||
|
@ -349,10 +350,9 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context,
|
|||
}
|
||||
|
||||
func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error {
|
||||
|
||||
// Build sensors config
|
||||
if m.splitSensorConfig() == 0 {
|
||||
return fmt.Errorf("E! No valid sensor configuration available")
|
||||
return fmt.Errorf("no valid sensor configuration available")
|
||||
}
|
||||
|
||||
// Parse TLS config
|
||||
|
@ -376,15 +376,15 @@ func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error {
|
|||
// Extract device address and port
|
||||
grpcServer, grpcPort, err := net.SplitHostPort(server)
|
||||
if err != nil {
|
||||
log.Printf("E! Invalid server address: %v", err)
|
||||
m.Log.Errorf("Invalid server address: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
grpcClientConn, err = grpc.Dial(server, opts...)
|
||||
if err != nil {
|
||||
log.Printf("E! Failed to connect to %s: %v", server, err)
|
||||
m.Log.Errorf("Failed to connect to %s: %s", server, err.Error())
|
||||
} else {
|
||||
log.Printf("D! Opened a new gRPC session to %s on port %s", grpcServer, grpcPort)
|
||||
m.Log.Debugf("Opened a new gRPC session to %s on port %s", grpcServer, grpcPort)
|
||||
}
|
||||
|
||||
// Add to the list of client connections
|
||||
|
@ -396,13 +396,13 @@ func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error {
|
|||
&authentication.LoginRequest{UserName: m.Username,
|
||||
Password: m.Password, ClientId: m.ClientID})
|
||||
if loginErr != nil {
|
||||
log.Printf("E! Could not initiate login check for %s: %v", server, loginErr)
|
||||
m.Log.Errorf("Could not initiate login check for %s: %v", server, loginErr)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if the user is authenticated. Bail if auth error
|
||||
if !loginReply.Result {
|
||||
log.Printf("E! Failed to authenticate the user for %s", server)
|
||||
m.Log.Errorf("Failed to authenticate the user for %s", server)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
)
|
||||
|
||||
var cfg = &OpenConfigTelemetry{
|
||||
Log: testutil.Logger{},
|
||||
Servers: []string{"127.0.0.1:50051"},
|
||||
SampleFrequency: internal.Duration{Duration: time.Second * 2},
|
||||
}
|
||||
|
|
|
@ -13,12 +13,16 @@ from the same topic in parallel.
|
|||
[[inputs.kafka_consumer]]
|
||||
## topic(s) to consume
|
||||
topics = ["telegraf"]
|
||||
|
||||
## an array of Zookeeper connection strings
|
||||
zookeeper_peers = ["localhost:2181"]
|
||||
|
||||
## Zookeeper Chroot
|
||||
zookeeper_chroot = ""
|
||||
|
||||
## the name of the consumer group
|
||||
consumer_group = "telegraf_metrics_consumers"
|
||||
|
||||
## Offset (must be either "oldest" or "newest")
|
||||
offset = "oldest"
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package kafka_consumer_legacy
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
|
@ -30,6 +29,8 @@ type Kafka struct {
|
|||
Offset string
|
||||
parser parsers.Parser
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
sync.Mutex
|
||||
|
||||
// channel for all incoming kafka messages
|
||||
|
@ -49,12 +50,16 @@ type Kafka struct {
|
|||
var sampleConfig = `
|
||||
## topic(s) to consume
|
||||
topics = ["telegraf"]
|
||||
|
||||
## an array of Zookeeper connection strings
|
||||
zookeeper_peers = ["localhost:2181"]
|
||||
|
||||
## Zookeeper Chroot
|
||||
zookeeper_chroot = ""
|
||||
|
||||
## the name of the consumer group
|
||||
consumer_group = "telegraf_metrics_consumers"
|
||||
|
||||
## Offset (must be either "oldest" or "newest")
|
||||
offset = "oldest"
|
||||
|
||||
|
@ -96,7 +101,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error {
|
|||
case "newest":
|
||||
config.Offsets.Initial = sarama.OffsetNewest
|
||||
default:
|
||||
log.Printf("I! WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n",
|
||||
k.Log.Infof("WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n",
|
||||
k.Offset)
|
||||
config.Offsets.Initial = sarama.OffsetOldest
|
||||
}
|
||||
|
@ -121,7 +126,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error {
|
|||
|
||||
// Start the kafka message reader
|
||||
go k.receiver()
|
||||
log.Printf("I! Started the kafka consumer service, peers: %v, topics: %v\n",
|
||||
k.Log.Infof("Started the kafka consumer service, peers: %v, topics: %v\n",
|
||||
k.ZookeeperPeers, k.Topics)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ func TestReadsMetricsFromKafka(t *testing.T) {
|
|||
|
||||
// Start the Kafka Consumer
|
||||
k := &Kafka{
|
||||
Log: testutil.Logger{},
|
||||
ConsumerGroup: "telegraf_test_consumers",
|
||||
Topics: []string{testTopic},
|
||||
ZookeeperPeers: zkPeers,
|
||||
|
|
|
@ -21,6 +21,7 @@ const (
|
|||
func newTestKafka() (*Kafka, chan *sarama.ConsumerMessage) {
|
||||
in := make(chan *sarama.ConsumerMessage, 1000)
|
||||
k := Kafka{
|
||||
Log: testutil.Logger{},
|
||||
ConsumerGroup: "test",
|
||||
Topics: []string{"telegraf"},
|
||||
ZookeeperPeers: []string{"localhost:2181"},
|
||||
|
|
|
@ -3,7 +3,6 @@ package kinesis_consumer
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -40,6 +39,8 @@ type (
|
|||
DynamoDB *DynamoDB `toml:"checkpoint_dynamodb"`
|
||||
MaxUndeliveredMessages int `toml:"max_undelivered_messages"`
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
cons *consumer.Consumer
|
||||
parser parsers.Parser
|
||||
cancel context.CancelFunc
|
||||
|
@ -220,7 +221,7 @@ func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error {
|
|||
})
|
||||
if err != nil {
|
||||
k.cancel()
|
||||
log.Printf("E! [inputs.kinesis_consumer] Scan encounterred an error - %s", err.Error())
|
||||
k.Log.Errorf("Scan encounterred an error: %s", err.Error())
|
||||
k.cons = nil
|
||||
}
|
||||
}()
|
||||
|
@ -285,7 +286,7 @@ func (k *KinesisConsumer) onDelivery(ctx context.Context) {
|
|||
k.lastSeqNum = strToBint(sequenceNum)
|
||||
k.checkpoint.Set(chk.streamName, chk.shardID, sequenceNum)
|
||||
} else {
|
||||
log.Println("D! [inputs.kinesis_consumer] Metric group failed to process")
|
||||
k.Log.Debug("Metric group failed to process")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -114,11 +114,11 @@ var availableCollectors = map[string]func(ctx context.Context, acc telegraf.Accu
|
|||
"endpoints": collectEndpoints,
|
||||
"ingress": collectIngress,
|
||||
"nodes": collectNodes,
|
||||
"persistentvolumes": collectPersistentVolumes,
|
||||
"persistentvolumeclaims": collectPersistentVolumeClaims,
|
||||
"pods": collectPods,
|
||||
"services": collectServices,
|
||||
"statefulsets": collectStatefulSets,
|
||||
"persistentvolumes": collectPersistentVolumes,
|
||||
"persistentvolumeclaims": collectPersistentVolumeClaims,
|
||||
}
|
||||
|
||||
func (ki *KubernetesInventory) initClient() (*client, error) {
|
||||
|
@ -144,12 +144,12 @@ func atoi(s string) int64 {
|
|||
func convertQuantity(s string, m float64) int64 {
|
||||
q, err := resource.ParseQuantity(s)
|
||||
if err != nil {
|
||||
log.Printf("E! Failed to parse quantity - %v", err)
|
||||
log.Printf("D! [inputs.kube_inventory] failed to parse quantity: %s", err.Error())
|
||||
return 0
|
||||
}
|
||||
f, err := strconv.ParseFloat(fmt.Sprint(q.AsDec()), 64)
|
||||
if err != nil {
|
||||
log.Printf("E! Failed to parse float - %v", err)
|
||||
log.Printf("D! [inputs.kube_inventory] failed to parse float: %s", err.Error())
|
||||
return 0
|
||||
}
|
||||
if m < 1 {
|
||||
|
|
|
@ -4,7 +4,6 @@ package logparser
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
|
@ -14,7 +13,6 @@ import (
|
|||
"github.com/influxdata/telegraf/internal/globpath"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
// Parsers
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -48,6 +46,8 @@ type LogParserPlugin struct {
|
|||
FromBeginning bool
|
||||
WatchMethod string
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
tailers map[string]*tail.Tail
|
||||
offsets map[string]int64
|
||||
lines chan logEntry
|
||||
|
@ -207,7 +207,7 @@ func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error {
|
|||
for _, filepath := range l.Files {
|
||||
g, err := globpath.Compile(filepath)
|
||||
if err != nil {
|
||||
log.Printf("E! [inputs.logparser] Error Glob %s failed to compile, %s", filepath, err)
|
||||
l.Log.Errorf("Glob %q failed to compile: %s", filepath, err)
|
||||
continue
|
||||
}
|
||||
files := g.Match()
|
||||
|
@ -221,7 +221,7 @@ func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error {
|
|||
var seek *tail.SeekInfo
|
||||
if !fromBeginning {
|
||||
if offset, ok := l.offsets[file]; ok {
|
||||
log.Printf("D! [inputs.tail] using offset %d for file: %v", offset, file)
|
||||
l.Log.Debugf("Using offset %d for file: %v", offset, file)
|
||||
seek = &tail.SeekInfo{
|
||||
Whence: 0,
|
||||
Offset: offset,
|
||||
|
@ -248,7 +248,7 @@ func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error {
|
|||
continue
|
||||
}
|
||||
|
||||
log.Printf("D! [inputs.logparser] tail added for file: %v", file)
|
||||
l.Log.Debugf("Tail added for file: %v", file)
|
||||
|
||||
// create a goroutine for each "tailer"
|
||||
l.wg.Add(1)
|
||||
|
@ -269,7 +269,7 @@ func (l *LogParserPlugin) receiver(tailer *tail.Tail) {
|
|||
for line = range tailer.Lines {
|
||||
|
||||
if line.Err != nil {
|
||||
log.Printf("E! [inputs.logparser] Error tailing file %s, Error: %s",
|
||||
l.Log.Errorf("Error tailing file %s, Error: %s",
|
||||
tailer.Filename, line.Err)
|
||||
continue
|
||||
}
|
||||
|
@ -315,7 +315,7 @@ func (l *LogParserPlugin) parser() {
|
|||
l.acc.AddFields(m.Name(), m.Fields(), tags, m.Time())
|
||||
}
|
||||
} else {
|
||||
log.Println("E! [inputs.logparser] Error parsing log line: " + err.Error())
|
||||
l.Log.Errorf("Error parsing log line: %s", err.Error())
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -332,7 +332,7 @@ func (l *LogParserPlugin) Stop() {
|
|||
offset, err := t.Tell()
|
||||
if err == nil {
|
||||
l.offsets[t.Filename] = offset
|
||||
log.Printf("D! [inputs.logparser] recording offset %d for file: %v", offset, t.Filename)
|
||||
l.Log.Debugf("Recording offset %d for file: %v", offset, t.Filename)
|
||||
} else {
|
||||
l.acc.AddError(fmt.Errorf("error recording offset for file %s", t.Filename))
|
||||
}
|
||||
|
@ -340,10 +340,10 @@ func (l *LogParserPlugin) Stop() {
|
|||
err := t.Stop()
|
||||
|
||||
//message for a stopped tailer
|
||||
log.Printf("D! [inputs.logparser] tail dropped for file: %v", t.Filename)
|
||||
l.Log.Debugf("Tail dropped for file: %v", t.Filename)
|
||||
|
||||
if err != nil {
|
||||
log.Printf("E! [inputs.logparser] Error stopping tail on file %s", t.Filename)
|
||||
l.Log.Errorf("Error stopping tail on file %s", t.Filename)
|
||||
}
|
||||
}
|
||||
close(l.done)
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
|
||||
func TestStartNoParsers(t *testing.T) {
|
||||
logparser := &LogParserPlugin{
|
||||
Log: testutil.Logger{},
|
||||
FromBeginning: true,
|
||||
Files: []string{"testdata/*.log"},
|
||||
}
|
||||
|
@ -26,6 +27,7 @@ func TestGrokParseLogFilesNonExistPattern(t *testing.T) {
|
|||
thisdir := getCurrentDir()
|
||||
|
||||
logparser := &LogParserPlugin{
|
||||
Log: testutil.Logger{},
|
||||
FromBeginning: true,
|
||||
Files: []string{thisdir + "testdata/*.log"},
|
||||
GrokConfig: GrokConfig{
|
||||
|
@ -43,6 +45,7 @@ func TestGrokParseLogFiles(t *testing.T) {
|
|||
thisdir := getCurrentDir()
|
||||
|
||||
logparser := &LogParserPlugin{
|
||||
Log: testutil.Logger{},
|
||||
GrokConfig: GrokConfig{
|
||||
MeasurementName: "logparser_grok",
|
||||
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},
|
||||
|
@ -89,6 +92,7 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) {
|
|||
thisdir := getCurrentDir()
|
||||
|
||||
logparser := &LogParserPlugin{
|
||||
Log: testutil.Logger{},
|
||||
FromBeginning: true,
|
||||
Files: []string{emptydir + "/*.log"},
|
||||
GrokConfig: GrokConfig{
|
||||
|
@ -128,6 +132,7 @@ func TestGrokParseLogFilesOneBad(t *testing.T) {
|
|||
thisdir := getCurrentDir()
|
||||
|
||||
logparser := &LogParserPlugin{
|
||||
Log: testutil.Logger{},
|
||||
FromBeginning: true,
|
||||
Files: []string{thisdir + "testdata/test_a.log"},
|
||||
GrokConfig: GrokConfig{
|
||||
|
|
|
@ -134,7 +134,7 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) {
|
|||
req.URL.RawQuery = params.String()
|
||||
req.Header.Set("User-Agent", "Telegraf-MailChimp-Plugin")
|
||||
if api.Debug {
|
||||
log.Printf("D! Request URL: %s", req.URL.String())
|
||||
log.Printf("D! [inputs.mailchimp] request URL: %s", req.URL.String())
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
|
@ -148,7 +148,7 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) {
|
|||
return nil, err
|
||||
}
|
||||
if api.Debug {
|
||||
log.Printf("D! Response Body:%s", string(body))
|
||||
log.Printf("D! [inputs.mailchimp] response Body: %q", string(body))
|
||||
}
|
||||
|
||||
if err = chimpErrorCheck(body); err != nil {
|
||||
|
|
|
@ -10,8 +10,10 @@ For more information, please check the [Mesos Observability Metrics](http://meso
|
|||
[[inputs.mesos]]
|
||||
## Timeout, in ms.
|
||||
timeout = 100
|
||||
|
||||
## A list of Mesos masters.
|
||||
masters = ["http://localhost:5050"]
|
||||
|
||||
## Master metrics groups to be collected, by default, all enabled.
|
||||
master_collections = [
|
||||
"resources",
|
||||
|
@ -26,8 +28,10 @@ For more information, please check the [Mesos Observability Metrics](http://meso
|
|||
"registrar",
|
||||
"allocator",
|
||||
]
|
||||
|
||||
## A list of Mesos slaves, default is []
|
||||
# slaves = []
|
||||
|
||||
## Slave metrics groups to be collected, by default, all enabled.
|
||||
# slave_collections = [
|
||||
# "resources",
|
||||
|
|
|
@ -32,9 +32,10 @@ type Mesos struct {
|
|||
MasterCols []string `toml:"master_collections"`
|
||||
Slaves []string
|
||||
SlaveCols []string `toml:"slave_collections"`
|
||||
//SlaveTasks bool
|
||||
tls.ClientConfig
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
initialized bool
|
||||
client *http.Client
|
||||
masterURLs []*url.URL
|
||||
|
@ -49,8 +50,10 @@ var allMetrics = map[Role][]string{
|
|||
var sampleConfig = `
|
||||
## Timeout, in ms.
|
||||
timeout = 100
|
||||
|
||||
## A list of Mesos masters.
|
||||
masters = ["http://localhost:5050"]
|
||||
|
||||
## Master metrics groups to be collected, by default, all enabled.
|
||||
master_collections = [
|
||||
"resources",
|
||||
|
@ -65,8 +68,10 @@ var sampleConfig = `
|
|||
"registrar",
|
||||
"allocator",
|
||||
]
|
||||
|
||||
## A list of Mesos slaves, default is []
|
||||
# slaves = []
|
||||
|
||||
## Slave metrics groups to be collected, by default, all enabled.
|
||||
# slave_collections = [
|
||||
# "resources",
|
||||
|
@ -110,7 +115,7 @@ func parseURL(s string, role Role) (*url.URL, error) {
|
|||
}
|
||||
|
||||
s = "http://" + host + ":" + port
|
||||
log.Printf("W! [inputs.mesos] Using %q as connection URL; please update your configuration to use an URL", s)
|
||||
log.Printf("W! [inputs.mesos] using %q as connection URL; please update your configuration to use an URL", s)
|
||||
}
|
||||
|
||||
return url.Parse(s)
|
||||
|
@ -126,7 +131,7 @@ func (m *Mesos) initialize() error {
|
|||
}
|
||||
|
||||
if m.Timeout == 0 {
|
||||
log.Println("I! [inputs.mesos] Missing timeout value, setting default value (100ms)")
|
||||
m.Log.Info("Missing timeout value, setting default value (100ms)")
|
||||
m.Timeout = 100
|
||||
}
|
||||
|
||||
|
@ -191,17 +196,6 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error {
|
|||
wg.Done()
|
||||
return
|
||||
}(slave)
|
||||
|
||||
// if !m.SlaveTasks {
|
||||
// continue
|
||||
// }
|
||||
|
||||
// wg.Add(1)
|
||||
// go func(c string) {
|
||||
// acc.AddError(m.gatherSlaveTaskMetrics(slave, acc))
|
||||
// wg.Done()
|
||||
// return
|
||||
// }(v)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
@ -487,7 +481,7 @@ func getMetrics(role Role, group string) []string {
|
|||
ret, ok := m[group]
|
||||
|
||||
if !ok {
|
||||
log.Printf("I! [mesos] Unknown %s metrics group: %s\n", role, group)
|
||||
log.Printf("I! [inputs.mesos] unknown role %q metrics group: %s", role, group)
|
||||
return []string{}
|
||||
}
|
||||
|
||||
|
|
|
@ -349,6 +349,7 @@ func TestMesosMaster(t *testing.T) {
|
|||
var acc testutil.Accumulator
|
||||
|
||||
m := Mesos{
|
||||
Log: testutil.Logger{},
|
||||
Masters: []string{masterTestServer.Listener.Addr().String()},
|
||||
Timeout: 10,
|
||||
}
|
||||
|
@ -364,6 +365,7 @@ func TestMesosMaster(t *testing.T) {
|
|||
|
||||
func TestMasterFilter(t *testing.T) {
|
||||
m := Mesos{
|
||||
Log: testutil.Logger{},
|
||||
MasterCols: []string{
|
||||
"resources", "master", "registrar", "allocator",
|
||||
},
|
||||
|
@ -416,6 +418,7 @@ func TestMesosSlave(t *testing.T) {
|
|||
var acc testutil.Accumulator
|
||||
|
||||
m := Mesos{
|
||||
Log: testutil.Logger{},
|
||||
Masters: []string{},
|
||||
Slaves: []string{slaveTestServer.Listener.Addr().String()},
|
||||
// SlaveTasks: true,
|
||||
|
@ -433,6 +436,7 @@ func TestMesosSlave(t *testing.T) {
|
|||
|
||||
func TestSlaveFilter(t *testing.T) {
|
||||
m := Mesos{
|
||||
Log: testutil.Logger{},
|
||||
SlaveCols: []string{
|
||||
"resources", "agent", "tasks",
|
||||
},
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
@ -25,6 +24,8 @@ type MongoDB struct {
|
|||
GatherColStats bool
|
||||
ColStatsDbs []string
|
||||
tlsint.ClientConfig
|
||||
|
||||
Log telegraf.Logger
|
||||
}
|
||||
|
||||
type Ssl struct {
|
||||
|
@ -82,24 +83,24 @@ func (m *MongoDB) Gather(acc telegraf.Accumulator) error {
|
|||
// Preserve backwards compatibility for hostnames without a
|
||||
// scheme, broken in go 1.8. Remove in Telegraf 2.0
|
||||
serv = "mongodb://" + serv
|
||||
log.Printf("W! [inputs.mongodb] Using %q as connection URL; please update your configuration to use an URL", serv)
|
||||
m.Log.Warnf("Using %q as connection URL; please update your configuration to use an URL", serv)
|
||||
m.Servers[i] = serv
|
||||
}
|
||||
|
||||
u, err := url.Parse(serv)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("Unable to parse address %q: %s", serv, err))
|
||||
m.Log.Errorf("Unable to parse address %q: %s", serv, err.Error())
|
||||
continue
|
||||
}
|
||||
if u.Host == "" {
|
||||
acc.AddError(fmt.Errorf("Unable to parse address %q", serv))
|
||||
m.Log.Errorf("Unable to parse address %q", serv)
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(srv *Server) {
|
||||
defer wg.Done()
|
||||
acc.AddError(m.gatherServer(srv, acc))
|
||||
m.Log.Error(m.gatherServer(srv, acc))
|
||||
}(m.getMongoServer(u))
|
||||
}
|
||||
|
||||
|
@ -110,6 +111,7 @@ func (m *MongoDB) Gather(acc telegraf.Accumulator) error {
|
|||
func (m *MongoDB) getMongoServer(url *url.URL) *Server {
|
||||
if _, ok := m.mongos[url.Host]; !ok {
|
||||
m.mongos[url.Host] = &Server{
|
||||
Log: m.Log,
|
||||
Url: url,
|
||||
}
|
||||
}
|
||||
|
@ -126,8 +128,7 @@ func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error {
|
|||
}
|
||||
dialInfo, err := mgo.ParseURL(dialAddrs[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse URL (%s), %s\n",
|
||||
dialAddrs[0], err.Error())
|
||||
return fmt.Errorf("unable to parse URL %q: %s", dialAddrs[0], err.Error())
|
||||
}
|
||||
dialInfo.Direct = true
|
||||
dialInfo.Timeout = 5 * time.Second
|
||||
|
@ -169,7 +170,7 @@ func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error {
|
|||
|
||||
sess, err := mgo.DialWithInfo(dialInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to connect to MongoDB, %s\n", err.Error())
|
||||
return fmt.Errorf("unable to connect to MongoDB: %s", err.Error())
|
||||
}
|
||||
server.Session = sess
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package mongodb
|
||||
|
||||
import (
|
||||
"log"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -15,6 +15,8 @@ type Server struct {
|
|||
Url *url.URL
|
||||
Session *mgo.Session
|
||||
lastResult *MongoStatus
|
||||
|
||||
Log telegraf.Logger
|
||||
}
|
||||
|
||||
func (s *Server) getDefaultTags() map[string]string {
|
||||
|
@ -31,11 +33,11 @@ func IsAuthorization(err error) bool {
|
|||
return strings.Contains(err.Error(), "not authorized")
|
||||
}
|
||||
|
||||
func authLogLevel(err error) string {
|
||||
func (s *Server) authLog(err error) {
|
||||
if IsAuthorization(err) {
|
||||
return "D!"
|
||||
s.Log.Debug(err.Error())
|
||||
} else {
|
||||
return "E!"
|
||||
s.Log.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -158,30 +160,30 @@ func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error)
|
|||
}
|
||||
|
||||
results := &ColStats{}
|
||||
for _, db_name := range names {
|
||||
if stringInSlice(db_name, colStatsDbs) || len(colStatsDbs) == 0 {
|
||||
for _, dbName := range names {
|
||||
if stringInSlice(dbName, colStatsDbs) || len(colStatsDbs) == 0 {
|
||||
var colls []string
|
||||
colls, err = s.Session.DB(db_name).CollectionNames()
|
||||
colls, err = s.Session.DB(dbName).CollectionNames()
|
||||
if err != nil {
|
||||
log.Printf("E! [inputs.mongodb] Error getting collection names: %v", err)
|
||||
s.Log.Errorf("Error getting collection names: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
for _, col_name := range colls {
|
||||
col_stat_line := &ColStatsData{}
|
||||
err = s.Session.DB(db_name).Run(bson.D{
|
||||
for _, colName := range colls {
|
||||
colStatLine := &ColStatsData{}
|
||||
err = s.Session.DB(dbName).Run(bson.D{
|
||||
{
|
||||
Name: "collStats",
|
||||
Value: col_name,
|
||||
Value: colName,
|
||||
},
|
||||
}, col_stat_line)
|
||||
}, colStatLine)
|
||||
if err != nil {
|
||||
log.Printf("%s [inputs.mongodb] Error getting col stats from %q: %v", authLogLevel(err), col_name, err)
|
||||
s.authLog(fmt.Errorf("error getting col stats from %q: %v", colName, err))
|
||||
continue
|
||||
}
|
||||
collection := &Collection{
|
||||
Name: col_name,
|
||||
DbName: db_name,
|
||||
ColStatsData: col_stat_line,
|
||||
Name: colName,
|
||||
DbName: dbName,
|
||||
ColStatsData: colStatLine,
|
||||
}
|
||||
results.Collections = append(results.Collections, *collection)
|
||||
}
|
||||
|
@ -203,7 +205,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool, gather
|
|||
// member of a replica set.
|
||||
replSetStatus, err := s.gatherReplSetStatus()
|
||||
if err != nil {
|
||||
log.Printf("D! [inputs.mongodb] Unable to gather replica set status: %v", err)
|
||||
s.Log.Debugf("Unable to gather replica set status: %s", err.Error())
|
||||
}
|
||||
|
||||
// Gather the oplog if we are a member of a replica set. Non-replica set
|
||||
|
@ -218,13 +220,12 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool, gather
|
|||
|
||||
clusterStatus, err := s.gatherClusterStatus()
|
||||
if err != nil {
|
||||
log.Printf("D! [inputs.mongodb] Unable to gather cluster status: %v", err)
|
||||
s.Log.Debugf("Unable to gather cluster status: %s", err.Error())
|
||||
}
|
||||
|
||||
shardStats, err := s.gatherShardConnPoolStats()
|
||||
if err != nil {
|
||||
log.Printf("%s [inputs.mongodb] Unable to gather shard connection pool stats: %v",
|
||||
authLogLevel(err), err)
|
||||
s.authLog(fmt.Errorf("unable to gather shard connection pool stats: %s", err.Error()))
|
||||
}
|
||||
|
||||
var collectionStats *ColStats
|
||||
|
@ -246,7 +247,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool, gather
|
|||
for _, name := range names {
|
||||
db, err := s.gatherDBStats(name)
|
||||
if err != nil {
|
||||
log.Printf("D! [inputs.mongodb] Error getting db stats from %q: %v", name, err)
|
||||
s.Log.Debugf("Error getting db stats from %q: %s", name, err.Error())
|
||||
}
|
||||
dbStats.Dbs = append(dbStats.Dbs, *db)
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -61,6 +60,8 @@ type MQTTConsumer struct {
|
|||
ClientID string `toml:"client_id"`
|
||||
tls.ClientConfig
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
clientFactory ClientFactory
|
||||
client Client
|
||||
opts *mqtt.ClientOptions
|
||||
|
@ -212,7 +213,7 @@ func (m *MQTTConsumer) connect() error {
|
|||
return err
|
||||
}
|
||||
|
||||
log.Printf("I! [inputs.mqtt_consumer] Connected %v", m.Servers)
|
||||
m.Log.Infof("Connected %v", m.Servers)
|
||||
m.state = Connected
|
||||
m.sem = make(semaphore, m.MaxUndeliveredMessages)
|
||||
m.messages = make(map[telegraf.TrackingID]bool)
|
||||
|
@ -223,7 +224,7 @@ func (m *MQTTConsumer) connect() error {
|
|||
SessionPresent() bool
|
||||
}
|
||||
if t, ok := token.(sessionPresent); ok && t.SessionPresent() {
|
||||
log.Printf("D! [inputs.mqtt_consumer] Session found %v", m.Servers)
|
||||
m.Log.Debugf("Session found %v", m.Servers)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -244,7 +245,7 @@ func (m *MQTTConsumer) connect() error {
|
|||
|
||||
func (m *MQTTConsumer) onConnectionLost(c mqtt.Client, err error) {
|
||||
m.acc.AddError(fmt.Errorf("connection lost: %v", err))
|
||||
log.Printf("D! [inputs.mqtt_consumer] Disconnected %v", m.Servers)
|
||||
m.Log.Debugf("Disconnected %v", m.Servers)
|
||||
m.state = Disconnected
|
||||
return
|
||||
}
|
||||
|
@ -292,9 +293,9 @@ func (m *MQTTConsumer) onMessage(acc telegraf.TrackingAccumulator, msg mqtt.Mess
|
|||
|
||||
func (m *MQTTConsumer) Stop() {
|
||||
if m.state == Connected {
|
||||
log.Printf("D! [inputs.mqtt_consumer] Disconnecting %v", m.Servers)
|
||||
m.Log.Debugf("Disconnecting %v", m.Servers)
|
||||
m.client.Disconnect(200)
|
||||
log.Printf("D! [inputs.mqtt_consumer] Disconnected %v", m.Servers)
|
||||
m.Log.Debugf("Disconnected %v", m.Servers)
|
||||
m.state = Disconnected
|
||||
}
|
||||
m.cancel()
|
||||
|
@ -303,7 +304,7 @@ func (m *MQTTConsumer) Stop() {
|
|||
func (m *MQTTConsumer) Gather(acc telegraf.Accumulator) error {
|
||||
if m.state == Disconnected {
|
||||
m.state = Connecting
|
||||
log.Printf("D! [inputs.mqtt_consumer] Connecting %v", m.Servers)
|
||||
m.Log.Debugf("Connecting %v", m.Servers)
|
||||
m.connect()
|
||||
}
|
||||
|
||||
|
@ -346,7 +347,7 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) {
|
|||
for _, server := range m.Servers {
|
||||
// Preserve support for host:port style servers; deprecated in Telegraf 1.4.4
|
||||
if !strings.Contains(server, "://") {
|
||||
log.Printf("W! [inputs.mqtt_consumer] Server %q should be updated to use `scheme://host:port` format", server)
|
||||
m.Log.Warnf("Server %q should be updated to use `scheme://host:port` format", server)
|
||||
if tlsCfg == nil {
|
||||
server = "tcp://" + server
|
||||
} else {
|
||||
|
|
|
@ -102,6 +102,7 @@ func TestLifecycleSanity(t *testing.T) {
|
|||
},
|
||||
}
|
||||
})
|
||||
plugin.Log = testutil.Logger{}
|
||||
plugin.Servers = []string{"tcp://127.0.0.1"}
|
||||
|
||||
parser := &FakeParser{}
|
||||
|
@ -124,10 +125,12 @@ func TestRandomClientID(t *testing.T) {
|
|||
var err error
|
||||
|
||||
m1 := New(nil)
|
||||
m1.Log = testutil.Logger{}
|
||||
err = m1.Init()
|
||||
require.NoError(t, err)
|
||||
|
||||
m2 := New(nil)
|
||||
m2.Log = testutil.Logger{}
|
||||
err = m2.Init()
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -137,6 +140,7 @@ func TestRandomClientID(t *testing.T) {
|
|||
// PersistentSession requires ClientID
|
||||
func TestPersistentClientIDFail(t *testing.T) {
|
||||
plugin := New(nil)
|
||||
plugin.Log = testutil.Logger{}
|
||||
plugin.PersistentSession = true
|
||||
|
||||
err := plugin.Init()
|
||||
|
@ -255,6 +259,7 @@ func TestTopicTag(t *testing.T) {
|
|||
plugin := New(func(o *mqtt.ClientOptions) Client {
|
||||
return client
|
||||
})
|
||||
plugin.Log = testutil.Logger{}
|
||||
plugin.Topics = []string{"telegraf"}
|
||||
plugin.TopicTag = tt.topicTag()
|
||||
|
||||
|
@ -295,6 +300,7 @@ func TestAddRouteCalledForEachTopic(t *testing.T) {
|
|||
plugin := New(func(o *mqtt.ClientOptions) Client {
|
||||
return client
|
||||
})
|
||||
plugin.Log = testutil.Logger{}
|
||||
plugin.Topics = []string{"a", "b"}
|
||||
|
||||
err := plugin.Init()
|
||||
|
@ -325,6 +331,7 @@ func TestSubscribeCalledIfNoSession(t *testing.T) {
|
|||
plugin := New(func(o *mqtt.ClientOptions) Client {
|
||||
return client
|
||||
})
|
||||
plugin.Log = testutil.Logger{}
|
||||
plugin.Topics = []string{"b"}
|
||||
|
||||
err := plugin.Init()
|
||||
|
@ -355,6 +362,7 @@ func TestSubscribeNotCalledIfSession(t *testing.T) {
|
|||
plugin := New(func(o *mqtt.ClientOptions) Client {
|
||||
return client
|
||||
})
|
||||
plugin.Log = testutil.Logger{}
|
||||
plugin.Topics = []string{"b"}
|
||||
|
||||
err := plugin.Init()
|
||||
|
|
|
@ -12,8 +12,10 @@ instances of telegraf can read from a NATS cluster in parallel.
|
|||
[[inputs.nats_consumer]]
|
||||
## urls of NATS servers
|
||||
servers = ["nats://localhost:4222"]
|
||||
|
||||
## subject(s) to consume
|
||||
subjects = ["telegraf"]
|
||||
|
||||
## name a queue group
|
||||
queue_group = "telegraf_consumers"
|
||||
|
||||
|
|
|
@ -3,7 +3,6 @@ package natsconsumer
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
@ -40,6 +39,8 @@ type natsConsumer struct {
|
|||
Password string `toml:"password"`
|
||||
tls.ClientConfig
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
// Client pending limits:
|
||||
PendingMessageLimit int `toml:"pending_message_limit"`
|
||||
PendingBytesLimit int `toml:"pending_bytes_limit"`
|
||||
|
@ -68,6 +69,7 @@ var sampleConfig = `
|
|||
|
||||
## subject(s) to consume
|
||||
subjects = ["telegraf"]
|
||||
|
||||
## name a queue group
|
||||
queue_group = "telegraf_consumers"
|
||||
|
||||
|
@ -198,7 +200,7 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error {
|
|||
go n.receiver(ctx)
|
||||
}()
|
||||
|
||||
log.Printf("I! Started the NATS consumer service, nats: %v, subjects: %v, queue: %v\n",
|
||||
n.Log.Infof("Started the NATS consumer service, nats: %v, subjects: %v, queue: %v",
|
||||
n.conn.ConnectedUrl(), n.Subjects, n.QueueGroup)
|
||||
|
||||
return nil
|
||||
|
@ -216,21 +218,21 @@ func (n *natsConsumer) receiver(ctx context.Context) {
|
|||
case <-n.acc.Delivered():
|
||||
<-sem
|
||||
case err := <-n.errs:
|
||||
n.acc.AddError(err)
|
||||
n.Log.Error(err)
|
||||
case sem <- empty{}:
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case err := <-n.errs:
|
||||
<-sem
|
||||
n.acc.AddError(err)
|
||||
n.Log.Error(err)
|
||||
case <-n.acc.Delivered():
|
||||
<-sem
|
||||
<-sem
|
||||
case msg := <-n.in:
|
||||
metrics, err := n.parser.Parse(msg.Data)
|
||||
if err != nil {
|
||||
n.acc.AddError(fmt.Errorf("subject: %s, error: %s", msg.Subject, err.Error()))
|
||||
n.Log.Errorf("Subject: %s, error: %s", msg.Subject, err.Error())
|
||||
<-sem
|
||||
continue
|
||||
}
|
||||
|
@ -244,8 +246,8 @@ func (n *natsConsumer) receiver(ctx context.Context) {
|
|||
func (n *natsConsumer) clean() {
|
||||
for _, sub := range n.subs {
|
||||
if err := sub.Unsubscribe(); err != nil {
|
||||
n.acc.AddError(fmt.Errorf("Error unsubscribing from subject %s in queue %s: %s\n",
|
||||
sub.Subject, sub.Queue, err.Error()))
|
||||
n.Log.Errorf("Error unsubscribing from subject %s in queue %s: %s",
|
||||
sub.Subject, sub.Queue, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -10,8 +10,10 @@ of the supported [input data formats][].
|
|||
[[inputs.nsq_consumer]]
|
||||
## Server option still works but is deprecated, we just prepend it to the nsqd array.
|
||||
# server = "localhost:4150"
|
||||
|
||||
## An array representing the NSQD TCP HTTP Endpoints
|
||||
nsqd = ["localhost:4150"]
|
||||
|
||||
## An array representing the NSQLookupd HTTP Endpoints
|
||||
nsqlookupd = ["localhost:4161"]
|
||||
topic = "telegraf"
|
||||
|
|
|
@ -2,7 +2,6 @@ package nsq_consumer
|
|||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
@ -18,10 +17,12 @@ const (
|
|||
type empty struct{}
|
||||
type semaphore chan empty
|
||||
|
||||
type logger struct{}
|
||||
type logger struct {
|
||||
log telegraf.Logger
|
||||
}
|
||||
|
||||
func (l *logger) Output(calldepth int, s string) error {
|
||||
log.Println("D! [inputs.nsq_consumer] " + s)
|
||||
l.log.Debug(s)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -39,6 +40,8 @@ type NSQConsumer struct {
|
|||
parser parsers.Parser
|
||||
consumer *nsq.Consumer
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
mu sync.Mutex
|
||||
messages map[telegraf.TrackingID]*nsq.Message
|
||||
wg sync.WaitGroup
|
||||
|
@ -48,8 +51,10 @@ type NSQConsumer struct {
|
|||
var sampleConfig = `
|
||||
## Server option still works but is deprecated, we just prepend it to the nsqd array.
|
||||
# server = "localhost:4150"
|
||||
|
||||
## An array representing the NSQD TCP HTTP Endpoints
|
||||
nsqd = ["localhost:4150"]
|
||||
|
||||
## An array representing the NSQLookupd HTTP Endpoints
|
||||
nsqlookupd = ["localhost:4161"]
|
||||
topic = "telegraf"
|
||||
|
@ -98,7 +103,7 @@ func (n *NSQConsumer) Start(ac telegraf.Accumulator) error {
|
|||
n.cancel = cancel
|
||||
|
||||
n.connect()
|
||||
n.consumer.SetLogger(&logger{}, nsq.LogLevelInfo)
|
||||
n.consumer.SetLogger(&logger{log: n.Log}, nsq.LogLevelInfo)
|
||||
n.consumer.AddHandler(nsq.HandlerFunc(func(message *nsq.Message) error {
|
||||
metrics, err := n.parser.Parse(message.Body)
|
||||
if err != nil {
|
||||
|
|
|
@ -36,6 +36,7 @@ func TestReadsMetricsFromNSQ(t *testing.T) {
|
|||
newMockNSQD(script, addr.String())
|
||||
|
||||
consumer := &NSQConsumer{
|
||||
Log: testutil.Logger{},
|
||||
Server: "127.0.0.1:4155",
|
||||
Topic: "telegraf",
|
||||
Channel: "consume",
|
||||
|
|
|
@ -4,11 +4,9 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
// register in driver.
|
||||
_ "github.com/jackc/pgx/stdlib"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
@ -23,6 +21,8 @@ type Postgresql struct {
|
|||
AdditionalTags []string
|
||||
Query query
|
||||
Debug bool
|
||||
|
||||
Log telegraf.Logger
|
||||
}
|
||||
|
||||
type query []struct {
|
||||
|
@ -186,7 +186,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
|
|||
if p.Query[i].Version <= db_version {
|
||||
rows, err := p.DB.Query(sql_query)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
p.Log.Error(err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -194,7 +194,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
// grab the column information from the result
|
||||
if columns, err = rows.Columns(); err != nil {
|
||||
acc.AddError(err)
|
||||
p.Log.Error(err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -209,7 +209,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
|
|||
for rows.Next() {
|
||||
err = p.accRow(meas_name, rows, acc, columns)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
p.Log.Error(err.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -272,7 +272,7 @@ func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumula
|
|||
fields := make(map[string]interface{})
|
||||
COLUMN:
|
||||
for col, val := range columnMap {
|
||||
log.Printf("D! postgresql_extensible: column: %s = %T: %v\n", col, *val, *val)
|
||||
p.Log.Debugf("Column: %s = %T: %v\n", col, *val, *val)
|
||||
_, ignore := ignoredColumns[col]
|
||||
if ignore || *val == nil {
|
||||
continue
|
||||
|
@ -290,7 +290,7 @@ COLUMN:
|
|||
case int64, int32, int:
|
||||
tags[col] = fmt.Sprintf("%d", v)
|
||||
default:
|
||||
log.Println("failed to add additional tag", col)
|
||||
p.Log.Debugf("Failed to add %q as additional tag", col)
|
||||
}
|
||||
continue COLUMN
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
|
||||
func queryRunner(t *testing.T, q query) *testutil.Accumulator {
|
||||
p := &Postgresql{
|
||||
Log: testutil.Logger{},
|
||||
Service: postgresql.Service{
|
||||
Address: fmt.Sprintf(
|
||||
"host=%s user=postgres sslmode=disable",
|
||||
|
@ -232,6 +233,7 @@ func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) {
|
|||
}
|
||||
|
||||
p := &Postgresql{
|
||||
Log: testutil.Logger{},
|
||||
Service: postgresql.Service{
|
||||
Address: fmt.Sprintf(
|
||||
"host=%s user=postgres sslmode=disable",
|
||||
|
@ -251,7 +253,10 @@ func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAccRow(t *testing.T) {
|
||||
p := Postgresql{}
|
||||
p := Postgresql{
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
columns := []string{"datname", "cat"}
|
||||
|
||||
|
|
|
@ -110,8 +110,8 @@ func parseResponse(metrics string) map[string]interface{} {
|
|||
|
||||
i, err := strconv.ParseInt(m[1], 10, 64)
|
||||
if err != nil {
|
||||
log.Printf("E! powerdns: Error parsing integer for metric [%s]: %s",
|
||||
metric, err)
|
||||
log.Printf("E! [inputs.powerdns] error parsing integer for metric %q: %s",
|
||||
metric, err.Error())
|
||||
continue
|
||||
}
|
||||
values[m[0]] = i
|
||||
|
|
|
@ -139,8 +139,8 @@ func parseResponse(metrics string) map[string]interface{} {
|
|||
|
||||
i, err := strconv.ParseInt(m[1], 10, 64)
|
||||
if err != nil {
|
||||
log.Printf("E! [inputs.powerdns_recursor] Error parsing integer for metric [%s] %v",
|
||||
metric, err)
|
||||
log.Printf("E! [inputs.powerdns_recursor] error parsing integer for metric %q: %s",
|
||||
metric, err.Error())
|
||||
continue
|
||||
}
|
||||
values[m[0]] = i
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
@ -23,6 +22,8 @@ type Processes struct {
|
|||
execPS func() ([]byte, error)
|
||||
readProcFile func(filename string) ([]byte, error)
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
forcePS bool
|
||||
forceProc bool
|
||||
}
|
||||
|
@ -124,8 +125,7 @@ func (p *Processes) gatherFromPS(fields map[string]interface{}) error {
|
|||
case '?':
|
||||
fields["unknown"] = fields["unknown"].(int64) + int64(1)
|
||||
default:
|
||||
log.Printf("I! processes: Unknown state [ %s ] from ps",
|
||||
string(status[0]))
|
||||
p.Log.Infof("Unknown state %q from ps", string(status[0]))
|
||||
}
|
||||
fields["total"] = fields["total"].(int64) + int64(1)
|
||||
}
|
||||
|
@ -184,14 +184,13 @@ func (p *Processes) gatherFromProc(fields map[string]interface{}) error {
|
|||
}
|
||||
fields["parked"] = int64(1)
|
||||
default:
|
||||
log.Printf("I! processes: Unknown state [ %s ] in file %s",
|
||||
string(stats[0][0]), filename)
|
||||
p.Log.Infof("Unknown state %q in file %q", string(stats[0][0]), filename)
|
||||
}
|
||||
fields["total"] = fields["total"].(int64) + int64(1)
|
||||
|
||||
threads, err := strconv.Atoi(string(stats[17]))
|
||||
if err != nil {
|
||||
log.Printf("I! processes: Error parsing thread count: %s", err)
|
||||
p.Log.Infof("Error parsing thread count: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
fields["total_threads"] = fields["total_threads"].(int64) + int64(threads)
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
|
||||
func TestProcesses(t *testing.T) {
|
||||
processes := &Processes{
|
||||
Log: testutil.Logger{},
|
||||
execPS: execPS,
|
||||
readProcFile: readProcFile,
|
||||
}
|
||||
|
@ -35,6 +36,7 @@ func TestProcesses(t *testing.T) {
|
|||
|
||||
func TestFromPS(t *testing.T) {
|
||||
processes := &Processes{
|
||||
Log: testutil.Logger{},
|
||||
execPS: testExecPS,
|
||||
forcePS: true,
|
||||
}
|
||||
|
@ -56,6 +58,7 @@ func TestFromPS(t *testing.T) {
|
|||
|
||||
func TestFromPSError(t *testing.T) {
|
||||
processes := &Processes{
|
||||
Log: testutil.Logger{},
|
||||
execPS: testExecPSError,
|
||||
forcePS: true,
|
||||
}
|
||||
|
@ -71,6 +74,7 @@ func TestFromProcFiles(t *testing.T) {
|
|||
}
|
||||
tester := tester{}
|
||||
processes := &Processes{
|
||||
Log: testutil.Logger{},
|
||||
readProcFile: tester.testProcFile,
|
||||
forceProc: true,
|
||||
}
|
||||
|
@ -93,6 +97,7 @@ func TestFromProcFilesWithSpaceInCmd(t *testing.T) {
|
|||
}
|
||||
tester := tester{}
|
||||
processes := &Processes{
|
||||
Log: testutil.Logger{},
|
||||
readProcFile: tester.testProcFile2,
|
||||
forceProc: true,
|
||||
}
|
||||
|
@ -120,6 +125,7 @@ func TestParkedProcess(t *testing.T) {
|
|||
procstat := `88 (watchdog/13) P 2 0 0 0 -1 69238848 0 0 0 0 0 0 0 0 20 0 1 0 20 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 1 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
`
|
||||
plugin := &Processes{
|
||||
Log: testutil.Logger{},
|
||||
readProcFile: func(string) ([]byte, error) {
|
||||
return []byte(procstat), nil
|
||||
},
|
||||
|
|
|
@ -68,7 +68,7 @@ func (p *Prometheus) start(ctx context.Context) error {
|
|||
case <-time.After(time.Second):
|
||||
err := p.watch(ctx, client)
|
||||
if err != nil {
|
||||
log.Printf("E! [inputs.prometheus] unable to watch resources: %v", err)
|
||||
p.Log.Errorf("Unable to watch resources: %s", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -144,7 +144,7 @@ func registerPod(pod *corev1.Pod, p *Prometheus) {
|
|||
return
|
||||
}
|
||||
|
||||
log.Printf("D! [inputs.prometheus] will scrape metrics from %s", *targetURL)
|
||||
log.Printf("D! [inputs.prometheus] will scrape metrics from %q", *targetURL)
|
||||
// add annotation as metrics tags
|
||||
tags := pod.GetMetadata().GetAnnotations()
|
||||
if tags == nil {
|
||||
|
@ -158,7 +158,7 @@ func registerPod(pod *corev1.Pod, p *Prometheus) {
|
|||
}
|
||||
URL, err := url.Parse(*targetURL)
|
||||
if err != nil {
|
||||
log.Printf("E! [inputs.prometheus] could not parse URL %s: %v", *targetURL, err)
|
||||
log.Printf("E! [inputs.prometheus] could not parse URL %q: %s", *targetURL, err.Error())
|
||||
return
|
||||
}
|
||||
podURL := p.AddressToURL(URL, URL.Hostname())
|
||||
|
@ -211,13 +211,13 @@ func unregisterPod(pod *corev1.Pod, p *Prometheus) {
|
|||
return
|
||||
}
|
||||
|
||||
log.Printf("D! [inputs.prometheus] registered a delete request for %s in namespace %s",
|
||||
log.Printf("D! [inputs.prometheus] registered a delete request for %q in namespace %q",
|
||||
pod.GetMetadata().GetName(), pod.GetMetadata().GetNamespace())
|
||||
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
if _, ok := p.kubernetesPods[*url]; ok {
|
||||
delete(p.kubernetesPods, *url)
|
||||
log.Printf("D! [inputs.prometheus] will stop scraping for %s", *url)
|
||||
log.Printf("D! [inputs.prometheus] will stop scraping for %q", *url)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package prometheus
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
v1 "github.com/ericchiang/k8s/apis/core/v1"
|
||||
|
@ -53,7 +54,7 @@ func TestScrapeURLAnnotationsCustomPathWithSep(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAddPod(t *testing.T) {
|
||||
prom := &Prometheus{}
|
||||
prom := &Prometheus{Log: testutil.Logger{}}
|
||||
|
||||
p := pod()
|
||||
p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"}
|
||||
|
@ -62,7 +63,7 @@ func TestAddPod(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAddMultipleDuplicatePods(t *testing.T) {
|
||||
prom := &Prometheus{}
|
||||
prom := &Prometheus{Log: testutil.Logger{}}
|
||||
|
||||
p := pod()
|
||||
p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"}
|
||||
|
@ -73,7 +74,7 @@ func TestAddMultipleDuplicatePods(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAddMultiplePods(t *testing.T) {
|
||||
prom := &Prometheus{}
|
||||
prom := &Prometheus{Log: testutil.Logger{}}
|
||||
|
||||
p := pod()
|
||||
p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"}
|
||||
|
@ -85,7 +86,7 @@ func TestAddMultiplePods(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDeletePods(t *testing.T) {
|
||||
prom := &Prometheus{}
|
||||
prom := &Prometheus{Log: testutil.Logger{}}
|
||||
|
||||
p := pod()
|
||||
p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"}
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -42,6 +41,8 @@ type Prometheus struct {
|
|||
|
||||
tls.ClientConfig
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
client *http.Client
|
||||
|
||||
// Should we scrape Kubernetes services for prometheus annotations
|
||||
|
@ -136,7 +137,7 @@ func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) {
|
|||
for _, u := range p.URLs {
|
||||
URL, err := url.Parse(u)
|
||||
if err != nil {
|
||||
log.Printf("prometheus: Could not parse %s, skipping it. Error: %s", u, err.Error())
|
||||
p.Log.Errorf("Could not parse %q, skipping it. Error: %s", u, err.Error())
|
||||
continue
|
||||
}
|
||||
allURLs[URL.String()] = URLAndAddress{URL: URL, OriginalURL: URL}
|
||||
|
@ -157,7 +158,7 @@ func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) {
|
|||
|
||||
resolvedAddresses, err := net.LookupHost(URL.Hostname())
|
||||
if err != nil {
|
||||
log.Printf("prometheus: Could not resolve %s, skipping it. Error: %s", URL.Host, err.Error())
|
||||
p.Log.Errorf("Could not resolve %q, skipping it. Error: %s", URL.Host, err.Error())
|
||||
continue
|
||||
}
|
||||
for _, resolved := range resolvedAddresses {
|
||||
|
|
|
@ -37,6 +37,7 @@ func TestPrometheusGeneratesMetrics(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
p := &Prometheus{
|
||||
Log: testutil.Logger{},
|
||||
URLs: []string{ts.URL},
|
||||
}
|
||||
|
||||
|
@ -60,6 +61,7 @@ func TestPrometheusGeneratesMetricsWithHostNameTag(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
p := &Prometheus{
|
||||
Log: testutil.Logger{},
|
||||
KubernetesServices: []string{ts.URL},
|
||||
}
|
||||
u, _ := url.Parse(ts.URL)
|
||||
|
@ -89,6 +91,7 @@ func TestPrometheusGeneratesMetricsAlthoughFirstDNSFails(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
p := &Prometheus{
|
||||
Log: testutil.Logger{},
|
||||
URLs: []string{ts.URL},
|
||||
KubernetesServices: []string{"http://random.telegraf.local:88/metrics"},
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
@ -23,6 +22,8 @@ type Redis struct {
|
|||
Password string
|
||||
tls.ClientConfig
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
clients []Client
|
||||
initialized bool
|
||||
}
|
||||
|
@ -101,13 +102,13 @@ func (r *Redis) init(acc telegraf.Accumulator) error {
|
|||
|
||||
for i, serv := range r.Servers {
|
||||
if !strings.HasPrefix(serv, "tcp://") && !strings.HasPrefix(serv, "unix://") {
|
||||
log.Printf("W! [inputs.redis]: server URL found without scheme; please update your configuration file")
|
||||
r.Log.Warn("Server URL found without scheme; please update your configuration file")
|
||||
serv = "tcp://" + serv
|
||||
}
|
||||
|
||||
u, err := url.Parse(serv)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse to address %q: %v", serv, err)
|
||||
return fmt.Errorf("unable to parse to address %q: %s", serv, err.Error())
|
||||
}
|
||||
|
||||
password := ""
|
||||
|
|
|
@ -20,6 +20,7 @@ func TestRedisConnect(t *testing.T) {
|
|||
addr := fmt.Sprintf(testutil.GetLocalHost() + ":6379")
|
||||
|
||||
r := &Redis{
|
||||
Log: testutil.Logger{},
|
||||
Servers: []string{addr},
|
||||
}
|
||||
|
||||
|
|
|
@ -3,7 +3,6 @@ package smart
|
|||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log"
|
||||
"os/exec"
|
||||
"path"
|
||||
"regexp"
|
||||
|
@ -120,6 +119,7 @@ type Smart struct {
|
|||
Devices []string
|
||||
UseSudo bool
|
||||
Timeout internal.Duration
|
||||
Log telegraf.Logger
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
|
@ -209,10 +209,10 @@ func (m *Smart) scan() ([]string, error) {
|
|||
for _, line := range strings.Split(string(out), "\n") {
|
||||
dev := strings.Split(line, " ")
|
||||
if len(dev) > 1 && !excludedDev(m.Excludes, strings.TrimSpace(dev[0])) {
|
||||
log.Printf("D! [inputs.smart] adding device: %+#v", dev)
|
||||
m.Log.Debugf("Adding device: %+#v", dev)
|
||||
devices = append(devices, strings.TrimSpace(dev[0]))
|
||||
} else {
|
||||
log.Printf("D! [inputs.smart] skipping device: %+#v", dev)
|
||||
m.Log.Debugf("Skipping device: %+#v", dev)
|
||||
}
|
||||
}
|
||||
return devices, nil
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
|
||||
func TestGatherAttributes(t *testing.T) {
|
||||
s := NewSmart()
|
||||
s.Log = testutil.Logger{}
|
||||
s.Path = "smartctl"
|
||||
s.Attributes = true
|
||||
|
||||
|
@ -330,6 +331,7 @@ func TestGatherAttributes(t *testing.T) {
|
|||
|
||||
func TestGatherNoAttributes(t *testing.T) {
|
||||
s := NewSmart()
|
||||
s.Log = testutil.Logger{}
|
||||
s.Path = "smartctl"
|
||||
s.Attributes = false
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ func execCmd(arg0 string, args ...string) ([]byte, error) {
|
|||
for _, arg := range args {
|
||||
quoted = append(quoted, fmt.Sprintf("%q", arg))
|
||||
}
|
||||
log.Printf("D! [inputs.snmp] Executing %q %s", arg0, strings.Join(quoted, " "))
|
||||
log.Printf("D! [inputs.snmp] executing %q %s", arg0, strings.Join(quoted, " "))
|
||||
}
|
||||
|
||||
out, err := execCommand(arg0, args...).Output()
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package snmp_legacy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
|
@ -24,6 +23,8 @@ type Snmp struct {
|
|||
Subtable []Subtable
|
||||
SnmptranslateFile string
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
nameToOid map[string]string
|
||||
initNode Node
|
||||
subTableMap map[string]Subtable
|
||||
|
@ -297,7 +298,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
data, err := ioutil.ReadFile(s.SnmptranslateFile)
|
||||
if err != nil {
|
||||
log.Printf("E! Reading SNMPtranslate file error: %s", err)
|
||||
s.Log.Errorf("Reading SNMPtranslate file error: %s", err.Error())
|
||||
return err
|
||||
} else {
|
||||
for _, line := range strings.Split(string(data), "\n") {
|
||||
|
@ -395,16 +396,16 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
|
|||
// only if len(s.OidInstanceMapping) == 0
|
||||
if len(host.OidInstanceMapping) >= 0 {
|
||||
if err := host.SNMPMap(acc, s.nameToOid, s.subTableMap); err != nil {
|
||||
acc.AddError(fmt.Errorf("E! SNMP Mapping error for host '%s': %s", host.Address, err))
|
||||
s.Log.Errorf("Mapping error for host %q: %s", host.Address, err.Error())
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Launch Get requests
|
||||
if err := host.SNMPGet(acc, s.initNode); err != nil {
|
||||
acc.AddError(fmt.Errorf("E! SNMP Error for host '%s': %s", host.Address, err))
|
||||
s.Log.Errorf("Error for host %q: %s", host.Address, err.Error())
|
||||
}
|
||||
if err := host.SNMPBulk(acc, s.initNode); err != nil {
|
||||
acc.AddError(fmt.Errorf("E! SNMP Error for host '%s': %s", host.Address, err))
|
||||
s.Log.Errorf("Error for host %q: %s", host.Address, err.Error())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -801,7 +802,7 @@ func (h *Host) HandleResponse(
|
|||
acc.AddFields(field_name, fields, tags)
|
||||
case gosnmp.NoSuchObject, gosnmp.NoSuchInstance:
|
||||
// Oid not found
|
||||
log.Printf("E! [snmp input] Oid not found: %s", oid_key)
|
||||
log.Printf("E! [inputs.snmp_legacy] oid %q not found", oid_key)
|
||||
default:
|
||||
// delete other data
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
|
@ -43,7 +42,7 @@ func (ssl *streamSocketListener) listen() {
|
|||
c, err := ssl.Accept()
|
||||
if err != nil {
|
||||
if !strings.HasSuffix(err.Error(), ": use of closed network connection") {
|
||||
ssl.AddError(err)
|
||||
ssl.Log.Error(err.Error())
|
||||
}
|
||||
break
|
||||
}
|
||||
|
@ -52,7 +51,7 @@ func (ssl *streamSocketListener) listen() {
|
|||
if srb, ok := c.(setReadBufferer); ok {
|
||||
srb.SetReadBuffer(int(ssl.ReadBufferSize.Size))
|
||||
} else {
|
||||
log.Printf("W! Unable to set read buffer on a %s socket", ssl.sockType)
|
||||
ssl.Log.Warnf("Unable to set read buffer on a %s socket", ssl.sockType)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -66,7 +65,7 @@ func (ssl *streamSocketListener) listen() {
|
|||
ssl.connectionsMtx.Unlock()
|
||||
|
||||
if err := ssl.setKeepAlive(c); err != nil {
|
||||
ssl.AddError(fmt.Errorf("unable to configure keep alive (%s): %s", ssl.ServiceAddress, err))
|
||||
ssl.Log.Errorf("Unable to configure keep alive %q: %s", ssl.ServiceAddress, err.Error())
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
|
@ -122,7 +121,7 @@ func (ssl *streamSocketListener) read(c net.Conn) {
|
|||
}
|
||||
metrics, err := ssl.Parse(scnr.Bytes())
|
||||
if err != nil {
|
||||
ssl.AddError(fmt.Errorf("unable to parse incoming line: %s", err))
|
||||
ssl.Log.Errorf("Unable to parse incoming line: %s", err.Error())
|
||||
// TODO rate limit
|
||||
continue
|
||||
}
|
||||
|
@ -133,9 +132,9 @@ func (ssl *streamSocketListener) read(c net.Conn) {
|
|||
|
||||
if err := scnr.Err(); err != nil {
|
||||
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
||||
log.Printf("D! Timeout in plugin [input.socket_listener]: %s", err)
|
||||
ssl.Log.Debugf("Timeout in plugin: %s", err.Error())
|
||||
} else if netErr != nil && !strings.HasSuffix(err.Error(), ": use of closed network connection") {
|
||||
ssl.AddError(err)
|
||||
ssl.Log.Error(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -151,14 +150,14 @@ func (psl *packetSocketListener) listen() {
|
|||
n, _, err := psl.ReadFrom(buf)
|
||||
if err != nil {
|
||||
if !strings.HasSuffix(err.Error(), ": use of closed network connection") {
|
||||
psl.AddError(err)
|
||||
psl.Log.Error(err.Error())
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
metrics, err := psl.Parse(buf[:n])
|
||||
if err != nil {
|
||||
psl.AddError(fmt.Errorf("unable to parse incoming packet: %s", err))
|
||||
psl.Log.Errorf("Unable to parse incoming packet: %s", err.Error())
|
||||
// TODO rate limit
|
||||
continue
|
||||
}
|
||||
|
@ -179,6 +178,8 @@ type SocketListener struct {
|
|||
|
||||
wg sync.WaitGroup
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
parsers.Parser
|
||||
telegraf.Accumulator
|
||||
io.Closer
|
||||
|
@ -292,7 +293,7 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error {
|
|||
return err
|
||||
}
|
||||
|
||||
log.Printf("I! [inputs.socket_listener] Listening on %s://%s", protocol, l.Addr())
|
||||
sl.Log.Infof("Listening on %s://%s", protocol, l.Addr())
|
||||
|
||||
// Set permissions on socket
|
||||
if (spl[0] == "unix" || spl[0] == "unixpacket") && sl.SocketMode != "" {
|
||||
|
@ -339,11 +340,11 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error {
|
|||
if srb, ok := pc.(setReadBufferer); ok {
|
||||
srb.SetReadBuffer(int(sl.ReadBufferSize.Size))
|
||||
} else {
|
||||
log.Printf("W! Unable to set read buffer on a %s socket", protocol)
|
||||
sl.Log.Warnf("Unable to set read buffer on a %s socket", protocol)
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("I! [inputs.socket_listener] Listening on %s://%s", protocol, pc.LocalAddr())
|
||||
sl.Log.Infof("Listening on %s://%s", protocol, pc.LocalAddr())
|
||||
|
||||
psl := &packetSocketListener{
|
||||
PacketConn: pc,
|
||||
|
|
|
@ -48,6 +48,7 @@ func TestSocketListener_tcp_tls(t *testing.T) {
|
|||
defer testEmptyLog(t)()
|
||||
|
||||
sl := newSocketListener()
|
||||
sl.Log = testutil.Logger{}
|
||||
sl.ServiceAddress = "tcp://127.0.0.1:0"
|
||||
sl.ServerConfig = *pki.TLSServerConfig()
|
||||
|
||||
|
@ -72,6 +73,7 @@ func TestSocketListener_unix_tls(t *testing.T) {
|
|||
sock := filepath.Join(tmpdir, "sl.TestSocketListener_unix_tls.sock")
|
||||
|
||||
sl := newSocketListener()
|
||||
sl.Log = testutil.Logger{}
|
||||
sl.ServiceAddress = "unix://" + sock
|
||||
sl.ServerConfig = *pki.TLSServerConfig()
|
||||
|
||||
|
@ -94,6 +96,7 @@ func TestSocketListener_tcp(t *testing.T) {
|
|||
defer testEmptyLog(t)()
|
||||
|
||||
sl := newSocketListener()
|
||||
sl.Log = testutil.Logger{}
|
||||
sl.ServiceAddress = "tcp://127.0.0.1:0"
|
||||
sl.ReadBufferSize = internal.Size{Size: 1024}
|
||||
|
||||
|
@ -112,6 +115,7 @@ func TestSocketListener_udp(t *testing.T) {
|
|||
defer testEmptyLog(t)()
|
||||
|
||||
sl := newSocketListener()
|
||||
sl.Log = testutil.Logger{}
|
||||
sl.ServiceAddress = "udp://127.0.0.1:0"
|
||||
sl.ReadBufferSize = internal.Size{Size: 1024}
|
||||
|
||||
|
@ -136,6 +140,7 @@ func TestSocketListener_unix(t *testing.T) {
|
|||
|
||||
os.Create(sock)
|
||||
sl := newSocketListener()
|
||||
sl.Log = testutil.Logger{}
|
||||
sl.ServiceAddress = "unix://" + sock
|
||||
sl.ReadBufferSize = internal.Size{Size: 1024}
|
||||
|
||||
|
@ -160,6 +165,7 @@ func TestSocketListener_unixgram(t *testing.T) {
|
|||
|
||||
os.Create(sock)
|
||||
sl := newSocketListener()
|
||||
sl.Log = testutil.Logger{}
|
||||
sl.ServiceAddress = "unixgram://" + sock
|
||||
sl.ReadBufferSize = internal.Size{Size: 1024}
|
||||
|
||||
|
|
|
@ -3,7 +3,6 @@ package stackdriver
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -128,6 +127,8 @@ type (
|
|||
DistributionAggregationAligners []string `toml:"distribution_aggregation_aligners"`
|
||||
Filter *ListTimeSeriesFilter `toml:"filter"`
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
client metricClient
|
||||
timeSeriesConfCache *timeSeriesConfCache
|
||||
prevEnd time.Time
|
||||
|
@ -167,6 +168,7 @@ type (
|
|||
|
||||
// stackdriverMetricClient is a metric client for stackdriver
|
||||
stackdriverMetricClient struct {
|
||||
log telegraf.Logger
|
||||
conn *monitoring.MetricClient
|
||||
|
||||
listMetricDescriptorsCalls selfstat.Stat
|
||||
|
@ -206,7 +208,7 @@ func (c *stackdriverMetricClient) ListMetricDescriptors(
|
|||
mdChan := make(chan *metricpb.MetricDescriptor, 1000)
|
||||
|
||||
go func() {
|
||||
log.Printf("D! [inputs.stackdriver] ListMetricDescriptors: %s", req.Filter)
|
||||
c.log.Debugf("List metric descriptor request filter: %s", req.Filter)
|
||||
defer close(mdChan)
|
||||
|
||||
// Iterate over metric descriptors and send them to buffered channel
|
||||
|
@ -216,7 +218,7 @@ func (c *stackdriverMetricClient) ListMetricDescriptors(
|
|||
mdDesc, mdErr := mdResp.Next()
|
||||
if mdErr != nil {
|
||||
if mdErr != iterator.Done {
|
||||
log.Printf("E! [inputs.stackdriver] Received error response: %s: %v", req, mdErr)
|
||||
c.log.Errorf("Failed iterating metric desciptor responses: %q: %v", req.String(), mdErr)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
@ -235,7 +237,7 @@ func (c *stackdriverMetricClient) ListTimeSeries(
|
|||
tsChan := make(chan *monitoringpb.TimeSeries, 1000)
|
||||
|
||||
go func() {
|
||||
log.Printf("D! [inputs.stackdriver] ListTimeSeries: %s", req.Filter)
|
||||
c.log.Debugf("List time series request filter: %s", req.Filter)
|
||||
defer close(tsChan)
|
||||
|
||||
// Iterate over timeseries and send them to buffered channel
|
||||
|
@ -245,7 +247,7 @@ func (c *stackdriverMetricClient) ListTimeSeries(
|
|||
tsDesc, tsErr := tsResp.Next()
|
||||
if tsErr != nil {
|
||||
if tsErr != iterator.Done {
|
||||
log.Printf("E! [inputs.stackdriver] Received error response: %s: %v", req, tsErr)
|
||||
c.log.Errorf("Failed iterating time series responses: %q: %v", req.String(), tsErr)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
@ -458,6 +460,7 @@ func (s *Stackdriver) initializeStackdriverClient(ctx context.Context) error {
|
|||
"stackdriver", "list_timeseries_calls", tags)
|
||||
|
||||
s.client = &stackdriverMetricClient{
|
||||
log: s.Log,
|
||||
conn: client,
|
||||
listMetricDescriptorsCalls: listMetricDescriptorsCalls,
|
||||
listTimeSeriesCalls: listTimeSeriesCalls,
|
||||
|
|
|
@ -640,6 +640,7 @@ func TestGather(t *testing.T) {
|
|||
t.Run(tt.name, func(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
s := &Stackdriver{
|
||||
Log: testutil.Logger{},
|
||||
Project: "test",
|
||||
RateLimit: 10,
|
||||
GatherRawDistributionBuckets: true,
|
||||
|
@ -775,6 +776,7 @@ func TestGatherAlign(t *testing.T) {
|
|||
}
|
||||
|
||||
s := &Stackdriver{
|
||||
Log: testutil.Logger{},
|
||||
Project: "test",
|
||||
RateLimit: 10,
|
||||
GatherRawDistributionBuckets: false,
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
@ -34,13 +33,6 @@ const (
|
|||
MaxTCPConnections = 250
|
||||
)
|
||||
|
||||
var dropwarn = "E! [inputs.statsd] Error: statsd message queue full. " +
|
||||
"We have dropped %d messages so far. " +
|
||||
"You may want to increase allowed_pending_messages in the config\n"
|
||||
|
||||
var malformedwarn = "E! [inputs.statsd] Statsd over TCP has received %d malformed packets" +
|
||||
" thus far."
|
||||
|
||||
// Statsd allows the importing of statsd and dogstatsd data.
|
||||
type Statsd struct {
|
||||
// Protocol used on listener - udp or tcp
|
||||
|
@ -133,6 +125,8 @@ type Statsd struct {
|
|||
PacketsRecv selfstat.Stat
|
||||
BytesRecv selfstat.Stat
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
// A pool of byte slices to handle parsing
|
||||
bufPool sync.Pool
|
||||
}
|
||||
|
@ -312,7 +306,7 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error {
|
|||
func (s *Statsd) Start(ac telegraf.Accumulator) error {
|
||||
if s.ParseDataDogTags {
|
||||
s.DataDogExtensions = true
|
||||
log.Printf("W! [inputs.statsd] The parse_data_dog_tags option is deprecated, use datadog_extensions instead.")
|
||||
s.Log.Warn("'parse_data_dog_tags' config option is deprecated, please use 'datadog_extensions' instead")
|
||||
}
|
||||
|
||||
s.acc = ac
|
||||
|
@ -350,8 +344,7 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
if s.ConvertNames {
|
||||
log.Printf("W! [inputs.statsd] statsd: convert_names config option is deprecated," +
|
||||
" please use metric_separator instead")
|
||||
s.Log.Warn("'convert_names' config option is deprecated, please use 'metric_separator' instead")
|
||||
}
|
||||
|
||||
if s.MetricSeparator == "" {
|
||||
|
@ -369,7 +362,7 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error {
|
|||
return err
|
||||
}
|
||||
|
||||
log.Println("I! [inputs.statsd] Statsd UDP listener listening on: ", conn.LocalAddr().String())
|
||||
s.Log.Infof("UDP listening on %q", conn.LocalAddr().String())
|
||||
s.UDPlistener = conn
|
||||
|
||||
s.wg.Add(1)
|
||||
|
@ -387,7 +380,7 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error {
|
|||
return err
|
||||
}
|
||||
|
||||
log.Println("I! [inputs.statsd] TCP Statsd listening on: ", listener.Addr().String())
|
||||
s.Log.Infof("TCP listening on %q", listener.Addr().String())
|
||||
s.TCPlistener = listener
|
||||
|
||||
s.wg.Add(1)
|
||||
|
@ -403,7 +396,7 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error {
|
|||
defer s.wg.Done()
|
||||
s.parser()
|
||||
}()
|
||||
log.Printf("I! [inputs.statsd] Started the statsd service on %s\n", s.ServiceAddress)
|
||||
s.Log.Infof("Started the statsd service on %q", s.ServiceAddress)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -463,7 +456,7 @@ func (s *Statsd) udpListen(conn *net.UDPConn) error {
|
|||
n, addr, err := conn.ReadFromUDP(buf)
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), "closed network") {
|
||||
log.Printf("E! [inputs.statsd] Error READ: %s\n", err.Error())
|
||||
s.Log.Errorf("Error reading: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
return err
|
||||
|
@ -479,7 +472,9 @@ func (s *Statsd) udpListen(conn *net.UDPConn) error {
|
|||
default:
|
||||
s.drops++
|
||||
if s.drops == 1 || s.AllowedPendingMessages == 0 || s.drops%s.AllowedPendingMessages == 0 {
|
||||
log.Printf(dropwarn, s.drops)
|
||||
s.Log.Errorf("Statsd message queue full. "+
|
||||
"We have dropped %d messages so far. "+
|
||||
"You may want to increase allowed_pending_messages in the config", s.drops)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -540,8 +535,8 @@ func (s *Statsd) parseStatsdLine(line string) error {
|
|||
// Validate splitting the line on ":"
|
||||
bits := strings.Split(line, ":")
|
||||
if len(bits) < 2 {
|
||||
log.Printf("E! [inputs.statsd] Error: splitting ':', Unable to parse metric: %s\n", line)
|
||||
return errors.New("Error Parsing statsd line")
|
||||
s.Log.Errorf("Splitting ':', unable to parse metric: %s", line)
|
||||
return errors.New("error Parsing statsd line")
|
||||
}
|
||||
|
||||
// Extract bucket name from individual metric bits
|
||||
|
@ -556,22 +551,22 @@ func (s *Statsd) parseStatsdLine(line string) error {
|
|||
// Validate splitting the bit on "|"
|
||||
pipesplit := strings.Split(bit, "|")
|
||||
if len(pipesplit) < 2 {
|
||||
log.Printf("E! [inputs.statsd] Error: splitting '|', Unable to parse metric: %s\n", line)
|
||||
return errors.New("Error Parsing statsd line")
|
||||
s.Log.Errorf("Splitting '|', unable to parse metric: %s", line)
|
||||
return errors.New("error parsing statsd line")
|
||||
} else if len(pipesplit) > 2 {
|
||||
sr := pipesplit[2]
|
||||
errmsg := "E! [inputs.statsd] parsing sample rate, %s, it must be in format like: " +
|
||||
"@0.1, @0.5, etc. Ignoring sample rate for line: %s\n"
|
||||
|
||||
if strings.Contains(sr, "@") && len(sr) > 1 {
|
||||
samplerate, err := strconv.ParseFloat(sr[1:], 64)
|
||||
if err != nil {
|
||||
log.Printf(errmsg, err.Error(), line)
|
||||
s.Log.Errorf("Parsing sample rate: %s", err.Error())
|
||||
} else {
|
||||
// sample rate successfully parsed
|
||||
m.samplerate = samplerate
|
||||
}
|
||||
} else {
|
||||
log.Printf(errmsg, "", line)
|
||||
s.Log.Debugf("Sample rate must be in format like: "+
|
||||
"@0.1, @0.5, etc. Ignoring sample rate for line: %s", line)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -580,15 +575,15 @@ func (s *Statsd) parseStatsdLine(line string) error {
|
|||
case "g", "c", "s", "ms", "h":
|
||||
m.mtype = pipesplit[1]
|
||||
default:
|
||||
log.Printf("E! [inputs.statsd] Error: Statsd Metric type %s unsupported", pipesplit[1])
|
||||
return errors.New("Error Parsing statsd line")
|
||||
s.Log.Errorf("Metric type %q unsupported", pipesplit[1])
|
||||
return errors.New("error parsing statsd line")
|
||||
}
|
||||
|
||||
// Parse the value
|
||||
if strings.HasPrefix(pipesplit[0], "-") || strings.HasPrefix(pipesplit[0], "+") {
|
||||
if m.mtype != "g" && m.mtype != "c" {
|
||||
log.Printf("E! [inputs.statsd] Error: +- values are only supported for gauges & counters: %s\n", line)
|
||||
return errors.New("Error Parsing statsd line")
|
||||
s.Log.Errorf("+- values are only supported for gauges & counters, unable to parse metric: %s", line)
|
||||
return errors.New("error parsing statsd line")
|
||||
}
|
||||
m.additive = true
|
||||
}
|
||||
|
@ -597,8 +592,8 @@ func (s *Statsd) parseStatsdLine(line string) error {
|
|||
case "g", "ms", "h":
|
||||
v, err := strconv.ParseFloat(pipesplit[0], 64)
|
||||
if err != nil {
|
||||
log.Printf("E! [inputs.statsd] Error: parsing value to float64: %s\n", line)
|
||||
return errors.New("Error Parsing statsd line")
|
||||
s.Log.Errorf("Parsing value to float64, unable to parse metric: %s", line)
|
||||
return errors.New("error parsing statsd line")
|
||||
}
|
||||
m.floatvalue = v
|
||||
case "c":
|
||||
|
@ -607,8 +602,8 @@ func (s *Statsd) parseStatsdLine(line string) error {
|
|||
if err != nil {
|
||||
v2, err2 := strconv.ParseFloat(pipesplit[0], 64)
|
||||
if err2 != nil {
|
||||
log.Printf("E! [inputs.statsd] Error: parsing value to int64: %s\n", line)
|
||||
return errors.New("Error Parsing statsd line")
|
||||
s.Log.Errorf("Parsing value to int64, unable to parse metric: %s", line)
|
||||
return errors.New("error parsing statsd line")
|
||||
}
|
||||
v = int64(v2)
|
||||
}
|
||||
|
@ -852,7 +847,9 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) {
|
|||
default:
|
||||
s.drops++
|
||||
if s.drops == 1 || s.drops%s.AllowedPendingMessages == 0 {
|
||||
log.Printf(dropwarn, s.drops)
|
||||
s.Log.Errorf("Statsd message queue full. "+
|
||||
"We have dropped %d messages so far. "+
|
||||
"You may want to increase allowed_pending_messages in the config", s.drops)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -862,9 +859,8 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) {
|
|||
// refuser refuses a TCP connection
|
||||
func (s *Statsd) refuser(conn *net.TCPConn) {
|
||||
conn.Close()
|
||||
log.Printf("I! [inputs.statsd] Refused TCP Connection from %s", conn.RemoteAddr())
|
||||
log.Printf("I! [inputs.statsd] WARNING: Maximum TCP Connections reached, you may want to" +
|
||||
" adjust max_tcp_connections")
|
||||
s.Log.Infof("Refused TCP Connection from %s", conn.RemoteAddr())
|
||||
s.Log.Warn("Maximum TCP Connections reached, you may want to adjust max_tcp_connections")
|
||||
}
|
||||
|
||||
// forget a TCP connection
|
||||
|
@ -883,7 +879,7 @@ func (s *Statsd) remember(id string, conn *net.TCPConn) {
|
|||
|
||||
func (s *Statsd) Stop() {
|
||||
s.Lock()
|
||||
log.Println("I! [inputs.statsd] Stopping the statsd service")
|
||||
s.Log.Infof("Stopping the statsd service")
|
||||
close(s.done)
|
||||
if s.isUDP() {
|
||||
s.UDPlistener.Close()
|
||||
|
@ -909,7 +905,7 @@ func (s *Statsd) Stop() {
|
|||
|
||||
s.Lock()
|
||||
close(s.in)
|
||||
log.Println("I! Stopped Statsd listener service on ", s.ServiceAddress)
|
||||
s.Log.Infof("Stopped listener service on %q", s.ServiceAddress)
|
||||
s.Unlock()
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ const (
|
|||
)
|
||||
|
||||
func NewTestStatsd() *Statsd {
|
||||
s := Statsd{}
|
||||
s := Statsd{Log: testutil.Logger{}}
|
||||
|
||||
// Make data structures
|
||||
s.done = make(chan struct{})
|
||||
|
@ -36,6 +36,7 @@ func NewTestStatsd() *Statsd {
|
|||
// Test that MaxTCPConections is respected
|
||||
func TestConcurrentConns(t *testing.T) {
|
||||
listener := Statsd{
|
||||
Log: testutil.Logger{},
|
||||
Protocol: "tcp",
|
||||
ServiceAddress: "localhost:8125",
|
||||
AllowedPendingMessages: 10000,
|
||||
|
@ -66,6 +67,7 @@ func TestConcurrentConns(t *testing.T) {
|
|||
// Test that MaxTCPConections is respected when max==1
|
||||
func TestConcurrentConns1(t *testing.T) {
|
||||
listener := Statsd{
|
||||
Log: testutil.Logger{},
|
||||
Protocol: "tcp",
|
||||
ServiceAddress: "localhost:8125",
|
||||
AllowedPendingMessages: 10000,
|
||||
|
@ -94,6 +96,7 @@ func TestConcurrentConns1(t *testing.T) {
|
|||
// Test that MaxTCPConections is respected
|
||||
func TestCloseConcurrentConns(t *testing.T) {
|
||||
listener := Statsd{
|
||||
Log: testutil.Logger{},
|
||||
Protocol: "tcp",
|
||||
ServiceAddress: "localhost:8125",
|
||||
AllowedPendingMessages: 10000,
|
||||
|
@ -115,6 +118,7 @@ func TestCloseConcurrentConns(t *testing.T) {
|
|||
// benchmark how long it takes to accept & process 100,000 metrics:
|
||||
func BenchmarkUDP(b *testing.B) {
|
||||
listener := Statsd{
|
||||
Log: testutil.Logger{},
|
||||
Protocol: "udp",
|
||||
ServiceAddress: "localhost:8125",
|
||||
AllowedPendingMessages: 250000,
|
||||
|
@ -145,6 +149,7 @@ func BenchmarkUDP(b *testing.B) {
|
|||
// benchmark how long it takes to accept & process 100,000 metrics:
|
||||
func BenchmarkTCP(b *testing.B) {
|
||||
listener := Statsd{
|
||||
Log: testutil.Logger{},
|
||||
Protocol: "tcp",
|
||||
ServiceAddress: "localhost:8125",
|
||||
AllowedPendingMessages: 250000,
|
||||
|
@ -1625,6 +1630,7 @@ func testValidateGauge(
|
|||
|
||||
func TestTCP(t *testing.T) {
|
||||
statsd := Statsd{
|
||||
Log: testutil.Logger{},
|
||||
Protocol: "tcp",
|
||||
ServiceAddress: "localhost:0",
|
||||
AllowedPendingMessages: 10000,
|
||||
|
|
|
@ -16,18 +16,15 @@ the created binary data file with the `sadf` utility.
|
|||
## On Debian and Arch Linux the default path is /usr/lib/sa/sadc whereas
|
||||
## on RHEL and CentOS the default path is /usr/lib64/sa/sadc
|
||||
sadc_path = "/usr/lib/sa/sadc" # required
|
||||
#
|
||||
#
|
||||
|
||||
## Path to the sadf command, if it is not in PATH
|
||||
# sadf_path = "/usr/bin/sadf"
|
||||
#
|
||||
#
|
||||
|
||||
## Activities is a list of activities, that are passed as argument to the
|
||||
## sadc collector utility (e.g: DISK, SNMP etc...)
|
||||
## The more activities that are added, the more data is collected.
|
||||
# activities = ["DISK"]
|
||||
#
|
||||
#
|
||||
|
||||
## Group metrics to measurements.
|
||||
##
|
||||
## If group is false each metric will be prefixed with a description
|
||||
|
@ -35,8 +32,7 @@ the created binary data file with the `sadf` utility.
|
|||
##
|
||||
## If Group is true, corresponding metrics are grouped to a single measurement.
|
||||
# group = true
|
||||
#
|
||||
#
|
||||
|
||||
## Options for the sadf command. The values on the left represent the sadf options and
|
||||
## the values on the right their description (wich are used for grouping and prefixing metrics).
|
||||
##
|
||||
|
@ -58,8 +54,7 @@ the created binary data file with the `sadf` utility.
|
|||
-w = "task"
|
||||
# -H = "hugepages" # only available for newer linux distributions
|
||||
# "-I ALL" = "interrupts" # requires INT activity
|
||||
#
|
||||
#
|
||||
|
||||
## Device tags can be used to add additional tags for devices. For example the configuration below
|
||||
## adds a tag vg with value rootvg for all metrics with sda devices.
|
||||
# [[inputs.sysstat.device_tags.sda]]
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"encoding/csv"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
|
@ -67,6 +66,8 @@ type Sysstat struct {
|
|||
DeviceTags map[string][]map[string]string `toml:"device_tags"`
|
||||
tmpFile string
|
||||
interval int
|
||||
|
||||
Log telegraf.Logger
|
||||
}
|
||||
|
||||
func (*Sysstat) Description() string {
|
||||
|
@ -81,18 +82,15 @@ var sampleConfig = `
|
|||
## Arch: /usr/lib/sa/sadc
|
||||
## RHEL/CentOS: /usr/lib64/sa/sadc
|
||||
sadc_path = "/usr/lib/sa/sadc" # required
|
||||
#
|
||||
#
|
||||
|
||||
## Path to the sadf command, if it is not in PATH
|
||||
# sadf_path = "/usr/bin/sadf"
|
||||
#
|
||||
#
|
||||
|
||||
## Activities is a list of activities, that are passed as argument to the
|
||||
## sadc collector utility (e.g: DISK, SNMP etc...)
|
||||
## The more activities that are added, the more data is collected.
|
||||
# activities = ["DISK"]
|
||||
#
|
||||
#
|
||||
|
||||
## Group metrics to measurements.
|
||||
##
|
||||
## If group is false each metric will be prefixed with a description
|
||||
|
@ -100,8 +98,7 @@ var sampleConfig = `
|
|||
##
|
||||
## If Group is true, corresponding metrics are grouped to a single measurement.
|
||||
# group = true
|
||||
#
|
||||
#
|
||||
|
||||
## Options for the sadf command. The values on the left represent the sadf
|
||||
## options and the values on the right their description (which are used for
|
||||
## grouping and prefixing metrics).
|
||||
|
@ -125,8 +122,7 @@ var sampleConfig = `
|
|||
-w = "task"
|
||||
# -H = "hugepages" # only available for newer linux distributions
|
||||
# "-I ALL" = "interrupts" # requires INT activity
|
||||
#
|
||||
#
|
||||
|
||||
## Device tags can be used to add additional tags for devices.
|
||||
## For example the configuration below adds a tag vg with value rootvg for
|
||||
## all metrics with sda devices.
|
||||
|
@ -196,7 +192,7 @@ func (s *Sysstat) collect() error {
|
|||
out, err := internal.CombinedOutputTimeout(cmd, time.Second*time.Duration(collectInterval+parseInterval))
|
||||
if err != nil {
|
||||
if err := os.Remove(s.tmpFile); err != nil {
|
||||
log.Printf("E! failed to remove tmp file after %s command: %s", strings.Join(cmd.Args, " "), err)
|
||||
s.Log.Errorf("Failed to remove tmp file after %q command: %s", strings.Join(cmd.Args, " "), err.Error())
|
||||
}
|
||||
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out))
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
)
|
||||
|
||||
var s = Sysstat{
|
||||
Log: testutil.Logger{},
|
||||
interval: 10,
|
||||
Sadc: "/usr/lib/sa/sadc",
|
||||
Sadf: "/usr/bin/sadf",
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -16,20 +15,22 @@ import (
|
|||
"github.com/shirou/gopsutil/load"
|
||||
)
|
||||
|
||||
type SystemStats struct{}
|
||||
type SystemStats struct {
|
||||
Log telegraf.Logger
|
||||
}
|
||||
|
||||
func (_ *SystemStats) Description() string {
|
||||
func (*SystemStats) Description() string {
|
||||
return "Read metrics about system load & uptime"
|
||||
}
|
||||
|
||||
func (_ *SystemStats) SampleConfig() string {
|
||||
func (*SystemStats) SampleConfig() string {
|
||||
return `
|
||||
## Uncomment to remove deprecated metrics.
|
||||
# fielddrop = ["uptime_format"]
|
||||
`
|
||||
}
|
||||
|
||||
func (_ *SystemStats) Gather(acc telegraf.Accumulator) error {
|
||||
func (s *SystemStats) Gather(acc telegraf.Accumulator) error {
|
||||
loadavg, err := load.Avg()
|
||||
if err != nil && !strings.Contains(err.Error(), "not implemented") {
|
||||
return err
|
||||
|
@ -51,9 +52,9 @@ func (_ *SystemStats) Gather(acc telegraf.Accumulator) error {
|
|||
if err == nil {
|
||||
fields["n_users"] = len(users)
|
||||
} else if os.IsNotExist(err) {
|
||||
log.Printf("D! [inputs.system] Error reading users: %v", err)
|
||||
s.Log.Debugf("Reading users: %s", err.Error())
|
||||
} else if os.IsPermission(err) {
|
||||
log.Printf("D! [inputs.system] %v", err)
|
||||
s.Log.Debug(err.Error())
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
|
|
|
@ -3,8 +3,6 @@
|
|||
package tail
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
|
@ -31,6 +29,8 @@ type Tail struct {
|
|||
Pipe bool
|
||||
WatchMethod string
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
tailers map[string]*tail.Tail
|
||||
offsets map[string]int64
|
||||
parserFunc parsers.ParserFunc
|
||||
|
@ -124,7 +124,7 @@ func (t *Tail) tailNewFiles(fromBeginning bool) error {
|
|||
for _, filepath := range t.Files {
|
||||
g, err := globpath.Compile(filepath)
|
||||
if err != nil {
|
||||
t.acc.AddError(fmt.Errorf("glob %s failed to compile, %s", filepath, err))
|
||||
t.Log.Errorf("Glob %q failed to compile: %s", filepath, err.Error())
|
||||
}
|
||||
for _, file := range g.Match() {
|
||||
if _, ok := t.tailers[file]; ok {
|
||||
|
@ -135,7 +135,7 @@ func (t *Tail) tailNewFiles(fromBeginning bool) error {
|
|||
var seek *tail.SeekInfo
|
||||
if !t.Pipe && !fromBeginning {
|
||||
if offset, ok := t.offsets[file]; ok {
|
||||
log.Printf("D! [inputs.tail] using offset %d for file: %v", offset, file)
|
||||
t.Log.Debugf("Using offset %d for %q", offset, file)
|
||||
seek = &tail.SeekInfo{
|
||||
Whence: 0,
|
||||
Offset: offset,
|
||||
|
@ -163,11 +163,11 @@ func (t *Tail) tailNewFiles(fromBeginning bool) error {
|
|||
continue
|
||||
}
|
||||
|
||||
log.Printf("D! [inputs.tail] tail added for file: %v", file)
|
||||
t.Log.Debugf("Tail added for %q", file)
|
||||
|
||||
parser, err := t.parserFunc()
|
||||
if err != nil {
|
||||
t.acc.AddError(fmt.Errorf("error creating parser: %v", err))
|
||||
t.Log.Errorf("Creating parser: %s", err.Error())
|
||||
}
|
||||
|
||||
// create a goroutine for each "tailer"
|
||||
|
@ -213,7 +213,7 @@ func (t *Tail) receiver(parser parsers.Parser, tailer *tail.Tail) {
|
|||
var firstLine = true
|
||||
for line := range tailer.Lines {
|
||||
if line.Err != nil {
|
||||
t.acc.AddError(fmt.Errorf("error tailing file %s, Error: %s", tailer.Filename, line.Err))
|
||||
t.Log.Errorf("Tailing %q: %s", tailer.Filename, line.Err.Error())
|
||||
continue
|
||||
}
|
||||
// Fix up files with Windows line endings.
|
||||
|
@ -221,8 +221,8 @@ func (t *Tail) receiver(parser parsers.Parser, tailer *tail.Tail) {
|
|||
|
||||
metrics, err := parseLine(parser, text, firstLine)
|
||||
if err != nil {
|
||||
t.acc.AddError(fmt.Errorf("malformed log line in %s: [%s], Error: %s",
|
||||
tailer.Filename, line.Text, err))
|
||||
t.Log.Errorf("Malformed log line in %q: [%q]: %s",
|
||||
tailer.Filename, line.Text, err.Error())
|
||||
continue
|
||||
}
|
||||
firstLine = false
|
||||
|
@ -233,10 +233,10 @@ func (t *Tail) receiver(parser parsers.Parser, tailer *tail.Tail) {
|
|||
}
|
||||
}
|
||||
|
||||
log.Printf("D! [inputs.tail] tail removed for file: %v", tailer.Filename)
|
||||
t.Log.Debugf("Tail removed for %q", tailer.Filename)
|
||||
|
||||
if err := tailer.Err(); err != nil {
|
||||
t.acc.AddError(fmt.Errorf("error tailing file %s, Error: %s", tailer.Filename, err))
|
||||
t.Log.Errorf("Tailing %q: %s", tailer.Filename, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -249,14 +249,14 @@ func (t *Tail) Stop() {
|
|||
// store offset for resume
|
||||
offset, err := tailer.Tell()
|
||||
if err == nil {
|
||||
log.Printf("D! [inputs.tail] recording offset %d for file: %v", offset, tailer.Filename)
|
||||
t.Log.Debugf("Recording offset %d for %q", offset, tailer.Filename)
|
||||
} else {
|
||||
t.acc.AddError(fmt.Errorf("error recording offset for file %s", tailer.Filename))
|
||||
t.Log.Errorf("Recording offset for %q: %s", tailer.Filename, err.Error())
|
||||
}
|
||||
}
|
||||
err := tailer.Stop()
|
||||
if err != nil {
|
||||
t.acc.AddError(fmt.Errorf("error stopping tail on file %s", tailer.Filename))
|
||||
t.Log.Errorf("Stopping tail on %q: %s", tailer.Filename, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
package tail
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
@ -28,6 +30,7 @@ func TestTailFromBeginning(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
tt := NewTail()
|
||||
tt.Log = testutil.Logger{}
|
||||
tt.FromBeginning = true
|
||||
tt.Files = []string{tmpfile.Name()}
|
||||
tt.SetParserFunc(parsers.NewInfluxParser)
|
||||
|
@ -61,6 +64,7 @@ func TestTailFromEnd(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
tt := NewTail()
|
||||
tt.Log = testutil.Logger{}
|
||||
tt.Files = []string{tmpfile.Name()}
|
||||
tt.SetParserFunc(parsers.NewInfluxParser)
|
||||
defer tt.Stop()
|
||||
|
@ -97,6 +101,7 @@ func TestTailBadLine(t *testing.T) {
|
|||
defer os.Remove(tmpfile.Name())
|
||||
|
||||
tt := NewTail()
|
||||
tt.Log = testutil.Logger{}
|
||||
tt.FromBeginning = true
|
||||
tt.Files = []string{tmpfile.Name()}
|
||||
tt.SetParserFunc(parsers.NewInfluxParser)
|
||||
|
@ -105,13 +110,17 @@ func TestTailBadLine(t *testing.T) {
|
|||
|
||||
acc := testutil.Accumulator{}
|
||||
require.NoError(t, tt.Start(&acc))
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
log.SetOutput(buf)
|
||||
|
||||
require.NoError(t, acc.GatherError(tt.Gather))
|
||||
|
||||
_, err = tmpfile.WriteString("cpu mytag= foo usage_idle= 100\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
acc.WaitError(1)
|
||||
assert.Contains(t, acc.Errors[0].Error(), "malformed log line")
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
assert.Contains(t, buf.String(), "Malformed log line")
|
||||
}
|
||||
|
||||
func TestTailDosLineendings(t *testing.T) {
|
||||
|
@ -122,6 +131,7 @@ func TestTailDosLineendings(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
tt := NewTail()
|
||||
tt.Log = testutil.Logger{}
|
||||
tt.FromBeginning = true
|
||||
tt.Files = []string{tmpfile.Name()}
|
||||
tt.SetParserFunc(parsers.NewInfluxParser)
|
||||
|
@ -160,6 +170,7 @@ cpu,42
|
|||
require.NoError(t, err)
|
||||
|
||||
plugin := NewTail()
|
||||
plugin.Log = testutil.Logger{}
|
||||
plugin.FromBeginning = true
|
||||
plugin.Files = []string{tmpfile.Name()}
|
||||
plugin.SetParserFunc(func() (parsers.Parser, error) {
|
||||
|
@ -217,6 +228,7 @@ func TestMultipleMetricsOnFirstLine(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
plugin := NewTail()
|
||||
plugin.Log = testutil.Logger{}
|
||||
plugin.FromBeginning = true
|
||||
plugin.Files = []string{tmpfile.Name()}
|
||||
plugin.SetParserFunc(func() (parsers.Parser, error) {
|
||||
|
|
|
@ -48,13 +48,15 @@ type TcpListener struct {
|
|||
TotalConnections selfstat.Stat
|
||||
PacketsRecv selfstat.Stat
|
||||
BytesRecv selfstat.Stat
|
||||
|
||||
Log telegraf.Logger
|
||||
}
|
||||
|
||||
var dropwarn = "E! Error: tcp_listener message queue full. " +
|
||||
var dropwarn = "tcp_listener message queue full. " +
|
||||
"We have dropped %d messages so far. " +
|
||||
"You may want to increase allowed_pending_messages in the config\n"
|
||||
"You may want to increase allowed_pending_messages in the config"
|
||||
|
||||
var malformedwarn = "E! tcp_listener has received %d malformed packets" +
|
||||
var malformedwarn = "tcp_listener has received %d malformed packets" +
|
||||
" thus far."
|
||||
|
||||
const sampleConfig = `
|
||||
|
@ -114,16 +116,15 @@ func (t *TcpListener) Start(acc telegraf.Accumulator) error {
|
|||
address, _ := net.ResolveTCPAddr("tcp", t.ServiceAddress)
|
||||
t.listener, err = net.ListenTCP("tcp", address)
|
||||
if err != nil {
|
||||
log.Fatalf("ERROR: ListenUDP - %s", err)
|
||||
t.Log.Errorf("Failed to listen: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
log.Println("I! TCP server listening on: ", t.listener.Addr().String())
|
||||
|
||||
t.wg.Add(2)
|
||||
go t.tcpListen()
|
||||
go t.tcpParser()
|
||||
|
||||
log.Printf("I! Started TCP listener service on %s\n", t.ServiceAddress)
|
||||
t.Log.Infof("Started TCP listener service on %q", t.ServiceAddress)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -150,7 +151,7 @@ func (t *TcpListener) Stop() {
|
|||
|
||||
t.wg.Wait()
|
||||
close(t.in)
|
||||
log.Println("I! Stopped TCP listener service on ", t.ServiceAddress)
|
||||
t.Log.Infof("Stopped TCP listener service on %q", t.ServiceAddress)
|
||||
}
|
||||
|
||||
// tcpListen listens for incoming TCP connections.
|
||||
|
@ -191,9 +192,8 @@ func (t *TcpListener) refuser(conn *net.TCPConn) {
|
|||
" reached, closing.\nYou may want to increase max_tcp_connections in"+
|
||||
" the Telegraf tcp listener configuration.\n", t.MaxTCPConnections)
|
||||
conn.Close()
|
||||
log.Printf("I! Refused TCP Connection from %s", conn.RemoteAddr())
|
||||
log.Printf("I! WARNING: Maximum TCP Connections reached, you may want to" +
|
||||
" adjust max_tcp_connections")
|
||||
t.Log.Infof("Refused TCP Connection from %s", conn.RemoteAddr())
|
||||
t.Log.Warn("Maximum TCP Connections reached, you may want to adjust max_tcp_connections")
|
||||
}
|
||||
|
||||
// handler handles a single TCP Connection
|
||||
|
@ -235,7 +235,7 @@ func (t *TcpListener) handler(conn *net.TCPConn, id string) {
|
|||
default:
|
||||
t.drops++
|
||||
if t.drops == 1 || t.drops%t.AllowedPendingMessages == 0 {
|
||||
log.Printf(dropwarn, t.drops)
|
||||
t.Log.Errorf(dropwarn, t.drops)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -268,7 +268,7 @@ func (t *TcpListener) tcpParser() error {
|
|||
} else {
|
||||
t.malformed++
|
||||
if t.malformed == 1 || t.malformed%1000 == 0 {
|
||||
log.Printf(malformedwarn, t.malformed)
|
||||
t.Log.Errorf(malformedwarn, t.malformed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ cpu_load_short,host=server06 value=12.0 1422568543702900257
|
|||
func newTestTcpListener() (*TcpListener, chan []byte) {
|
||||
in := make(chan []byte, 1500)
|
||||
listener := &TcpListener{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: "localhost:8194",
|
||||
AllowedPendingMessages: 10000,
|
||||
MaxTCPConnections: 250,
|
||||
|
@ -45,6 +46,7 @@ func newTestTcpListener() (*TcpListener, chan []byte) {
|
|||
// benchmark how long it takes to accept & process 100,000 metrics:
|
||||
func BenchmarkTCP(b *testing.B) {
|
||||
listener := TcpListener{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: "localhost:8198",
|
||||
AllowedPendingMessages: 100000,
|
||||
MaxTCPConnections: 250,
|
||||
|
@ -76,6 +78,7 @@ func BenchmarkTCP(b *testing.B) {
|
|||
|
||||
func TestHighTrafficTCP(t *testing.T) {
|
||||
listener := TcpListener{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: "localhost:8199",
|
||||
AllowedPendingMessages: 100000,
|
||||
MaxTCPConnections: 250,
|
||||
|
@ -103,6 +106,7 @@ func TestHighTrafficTCP(t *testing.T) {
|
|||
|
||||
func TestConnectTCP(t *testing.T) {
|
||||
listener := TcpListener{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: "localhost:8194",
|
||||
AllowedPendingMessages: 10000,
|
||||
MaxTCPConnections: 250,
|
||||
|
@ -140,6 +144,7 @@ func TestConnectTCP(t *testing.T) {
|
|||
// Test that MaxTCPConections is respected
|
||||
func TestConcurrentConns(t *testing.T) {
|
||||
listener := TcpListener{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: "localhost:8195",
|
||||
AllowedPendingMessages: 10000,
|
||||
MaxTCPConnections: 2,
|
||||
|
@ -175,6 +180,7 @@ func TestConcurrentConns(t *testing.T) {
|
|||
// Test that MaxTCPConections is respected when max==1
|
||||
func TestConcurrentConns1(t *testing.T) {
|
||||
listener := TcpListener{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: "localhost:8196",
|
||||
AllowedPendingMessages: 10000,
|
||||
MaxTCPConnections: 1,
|
||||
|
@ -208,6 +214,7 @@ func TestConcurrentConns1(t *testing.T) {
|
|||
// Test that MaxTCPConections is respected
|
||||
func TestCloseConcurrentConns(t *testing.T) {
|
||||
listener := TcpListener{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: "localhost:8195",
|
||||
AllowedPendingMessages: 10000,
|
||||
MaxTCPConnections: 2,
|
||||
|
|
|
@ -53,17 +53,19 @@ type UdpListener struct {
|
|||
|
||||
PacketsRecv selfstat.Stat
|
||||
BytesRecv selfstat.Stat
|
||||
|
||||
Log telegraf.Logger
|
||||
}
|
||||
|
||||
// UDP_MAX_PACKET_SIZE is packet limit, see
|
||||
// https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure
|
||||
const UDP_MAX_PACKET_SIZE int = 64 * 1024
|
||||
|
||||
var dropwarn = "E! Error: udp_listener message queue full. " +
|
||||
var dropwarn = "udp_listener message queue full. " +
|
||||
"We have dropped %d messages so far. " +
|
||||
"You may want to increase allowed_pending_messages in the config\n"
|
||||
"You may want to increase allowed_pending_messages in the config"
|
||||
|
||||
var malformedwarn = "E! udp_listener has received %d malformed packets" +
|
||||
var malformedwarn = "udp_listener has received %d malformed packets" +
|
||||
" thus far."
|
||||
|
||||
const sampleConfig = `
|
||||
|
@ -113,7 +115,7 @@ func (u *UdpListener) Start(acc telegraf.Accumulator) error {
|
|||
u.wg.Add(1)
|
||||
go u.udpParser()
|
||||
|
||||
log.Printf("I! Started UDP listener service on %s (ReadBuffer: %d)\n", u.ServiceAddress, u.UDPBufferSize)
|
||||
u.Log.Infof("Started service on %q (ReadBuffer: %d)", u.ServiceAddress, u.UDPBufferSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -124,7 +126,7 @@ func (u *UdpListener) Stop() {
|
|||
u.wg.Wait()
|
||||
u.listener.Close()
|
||||
close(u.in)
|
||||
log.Println("I! Stopped UDP listener service on ", u.ServiceAddress)
|
||||
u.Log.Infof("Stopped service on %q", u.ServiceAddress)
|
||||
}
|
||||
|
||||
func (u *UdpListener) udpListen() error {
|
||||
|
@ -134,15 +136,15 @@ func (u *UdpListener) udpListen() error {
|
|||
u.listener, err = net.ListenUDP("udp", address)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("E! Error: ListenUDP - %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Println("I! UDP server listening on: ", u.listener.LocalAddr().String())
|
||||
u.Log.Infof("Server listening on %q", u.listener.LocalAddr().String())
|
||||
|
||||
if u.UDPBufferSize > 0 {
|
||||
err = u.listener.SetReadBuffer(u.UDPBufferSize) // if we want to move away from OS default
|
||||
if err != nil {
|
||||
return fmt.Errorf("E! Failed to set UDP read buffer to %d: %s", u.UDPBufferSize, err)
|
||||
return fmt.Errorf("failed to set UDP read buffer to %d: %s", u.UDPBufferSize, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -166,7 +168,7 @@ func (u *UdpListener) udpListenLoop() {
|
|||
if err != nil {
|
||||
if err, ok := err.(net.Error); ok && err.Timeout() {
|
||||
} else {
|
||||
log.Printf("E! Error: %s\n", err.Error())
|
||||
u.Log.Error(err.Error())
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -180,7 +182,7 @@ func (u *UdpListener) udpListenLoop() {
|
|||
default:
|
||||
u.drops++
|
||||
if u.drops == 1 || u.drops%u.AllowedPendingMessages == 0 {
|
||||
log.Printf(dropwarn, u.drops)
|
||||
u.Log.Errorf(dropwarn, u.drops)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -208,7 +210,7 @@ func (u *UdpListener) udpParser() error {
|
|||
} else {
|
||||
u.malformed++
|
||||
if u.malformed == 1 || u.malformed%1000 == 0 {
|
||||
log.Printf(malformedwarn, u.malformed)
|
||||
u.Log.Errorf(malformedwarn, u.malformed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ cpu_load_short,host=server06 value=12.0 1422568543702900257
|
|||
func newTestUdpListener() (*UdpListener, chan []byte) {
|
||||
in := make(chan []byte, 1500)
|
||||
listener := &UdpListener{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: ":8125",
|
||||
AllowedPendingMessages: 10000,
|
||||
in: in,
|
||||
|
@ -78,6 +79,7 @@ func newTestUdpListener() (*UdpListener, chan []byte) {
|
|||
|
||||
func TestConnectUDP(t *testing.T) {
|
||||
listener := UdpListener{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: ":8127",
|
||||
AllowedPendingMessages: 10000,
|
||||
}
|
||||
|
|
|
@ -4,13 +4,13 @@ import (
|
|||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/vmware/govmomi"
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/performance"
|
||||
|
@ -45,6 +45,7 @@ type Client struct {
|
|||
Valid bool
|
||||
Timeout time.Duration
|
||||
closeGate sync.Once
|
||||
log telegraf.Logger
|
||||
}
|
||||
|
||||
// NewClientFactory creates a new ClientFactory and prepares it for use.
|
||||
|
@ -76,7 +77,7 @@ func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) {
|
|||
ctx1, cancel1 := context.WithTimeout(ctx, cf.parent.Timeout.Duration)
|
||||
defer cancel1()
|
||||
if _, err := methods.GetCurrentTime(ctx1, cf.client.Client); err != nil {
|
||||
log.Printf("I! [inputs.vsphere]: Client session seems to have time out. Reauthenticating!")
|
||||
cf.parent.Log.Info("Client session seems to have time out. Reauthenticating!")
|
||||
ctx2, cancel2 := context.WithTimeout(ctx, cf.parent.Timeout.Duration)
|
||||
defer cancel2()
|
||||
if err := cf.client.Client.SessionManager.Login(ctx2, url.UserPassword(cf.parent.Username, cf.parent.Password)); err != nil {
|
||||
|
@ -88,7 +89,7 @@ func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) {
|
|||
cf.client = nil
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("Renewing authentication failed: %v", err)
|
||||
return nil, fmt.Errorf("renewing authentication failed: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -113,7 +114,7 @@ func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) {
|
|||
u.User = url.UserPassword(vs.Username, vs.Password)
|
||||
}
|
||||
|
||||
log.Printf("D! [inputs.vsphere]: Creating client: %s", u.Host)
|
||||
vs.Log.Debugf("Creating client: %s", u.Host)
|
||||
soapClient := soap.NewClient(u, tlsCfg.InsecureSkipVerify)
|
||||
|
||||
// Add certificate if we have it. Use it to log us in.
|
||||
|
@ -170,6 +171,7 @@ func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) {
|
|||
p := performance.NewManager(c.Client)
|
||||
|
||||
client := &Client{
|
||||
log: vs.Log,
|
||||
Client: c,
|
||||
Views: m,
|
||||
Root: v,
|
||||
|
@ -184,9 +186,9 @@ func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Printf("D! [inputs.vsphere] vCenter says max_query_metrics should be %d", n)
|
||||
vs.Log.Debugf("vCenter says max_query_metrics should be %d", n)
|
||||
if n < vs.MaxQueryMetrics {
|
||||
log.Printf("W! [inputs.vsphere] Configured max_query_metrics is %d, but server limits it to %d. Reducing.", vs.MaxQueryMetrics, n)
|
||||
vs.Log.Warnf("Configured max_query_metrics is %d, but server limits it to %d. Reducing.", vs.MaxQueryMetrics, n)
|
||||
vs.MaxQueryMetrics = n
|
||||
}
|
||||
return client, nil
|
||||
|
@ -202,7 +204,6 @@ func (cf *ClientFactory) Close() {
|
|||
}
|
||||
|
||||
func (c *Client) close() {
|
||||
|
||||
// Use a Once to prevent us from panics stemming from trying
|
||||
// to close it multiple times.
|
||||
c.closeGate.Do(func() {
|
||||
|
@ -210,7 +211,7 @@ func (c *Client) close() {
|
|||
defer cancel()
|
||||
if c.Client != nil {
|
||||
if err := c.Client.Logout(ctx); err != nil {
|
||||
log.Printf("E! [inputs.vsphere]: Error during logout: %s", err)
|
||||
c.log.Errorf("Logout: %s", err.Error())
|
||||
}
|
||||
}
|
||||
})
|
||||
|
@ -239,7 +240,7 @@ func (c *Client) GetMaxQueryMetrics(ctx context.Context) (int, error) {
|
|||
if s, ok := res[0].GetOptionValue().Value.(string); ok {
|
||||
v, err := strconv.Atoi(s)
|
||||
if err == nil {
|
||||
log.Printf("D! [inputs.vsphere] vCenter maxQueryMetrics is defined: %d", v)
|
||||
c.log.Debugf("vCenter maxQueryMetrics is defined: %d", v)
|
||||
if v == -1 {
|
||||
// Whatever the server says, we never ask for more metrics than this.
|
||||
return absoluteMaxMetrics, nil
|
||||
|
@ -250,17 +251,17 @@ func (c *Client) GetMaxQueryMetrics(ctx context.Context) (int, error) {
|
|||
// Fall through version-based inference if value isn't usable
|
||||
}
|
||||
} else {
|
||||
log.Println("D! [inputs.vsphere] Option query for maxQueryMetrics failed. Using default")
|
||||
c.log.Debug("Option query for maxQueryMetrics failed. Using default")
|
||||
}
|
||||
|
||||
// No usable maxQueryMetrics setting. Infer based on version
|
||||
ver := c.Client.Client.ServiceContent.About.Version
|
||||
parts := strings.Split(ver, ".")
|
||||
if len(parts) < 2 {
|
||||
log.Printf("W! [inputs.vsphere] vCenter returned an invalid version string: %s. Using default query size=64", ver)
|
||||
c.log.Warnf("vCenter returned an invalid version string: %s. Using default query size=64", ver)
|
||||
return 64, nil
|
||||
}
|
||||
log.Printf("D! [inputs.vsphere] vCenter version is: %s", ver)
|
||||
c.log.Debugf("vCenter version is: %s", ver)
|
||||
major, err := strconv.Atoi(parts[0])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
|
|
@ -250,10 +250,10 @@ func (e *Endpoint) startDiscovery(ctx context.Context) {
|
|||
case <-e.discoveryTicker.C:
|
||||
err := e.discover(ctx)
|
||||
if err != nil && err != context.Canceled {
|
||||
log.Printf("E! [inputs.vsphere]: Error in discovery for %s: %v", e.URL.Host, err)
|
||||
e.Parent.Log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error())
|
||||
}
|
||||
case <-ctx.Done():
|
||||
log.Printf("D! [inputs.vsphere]: Exiting discovery goroutine for %s", e.URL.Host)
|
||||
e.Parent.Log.Debugf("Exiting discovery goroutine for %s", e.URL.Host)
|
||||
e.discoveryTicker.Stop()
|
||||
return
|
||||
}
|
||||
|
@ -264,7 +264,7 @@ func (e *Endpoint) startDiscovery(ctx context.Context) {
|
|||
func (e *Endpoint) initalDiscovery(ctx context.Context) {
|
||||
err := e.discover(ctx)
|
||||
if err != nil && err != context.Canceled {
|
||||
log.Printf("E! [inputs.vsphere]: Error in discovery for %s: %v", e.URL.Host, err)
|
||||
e.Parent.Log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error())
|
||||
}
|
||||
e.startDiscovery(ctx)
|
||||
}
|
||||
|
@ -279,7 +279,7 @@ func (e *Endpoint) init(ctx context.Context) error {
|
|||
if e.customAttrEnabled {
|
||||
fields, err := client.GetCustomFields(ctx)
|
||||
if err != nil {
|
||||
log.Println("W! [inputs.vsphere] Could not load custom field metadata")
|
||||
e.Parent.Log.Warn("Could not load custom field metadata")
|
||||
} else {
|
||||
e.customFields = fields
|
||||
}
|
||||
|
@ -291,7 +291,7 @@ func (e *Endpoint) init(ctx context.Context) error {
|
|||
// goroutine without waiting for it. This will probably cause us to report an empty
|
||||
// dataset on the first collection, but it solves the issue of the first collection timing out.
|
||||
if e.Parent.ForceDiscoverOnInit {
|
||||
log.Printf("D! [inputs.vsphere]: Running initial discovery and waiting for it to finish")
|
||||
e.Parent.Log.Debug("Running initial discovery and waiting for it to finish")
|
||||
e.initalDiscovery(ctx)
|
||||
} else {
|
||||
// Otherwise, just run it in the background. We'll probably have an incomplete first metric
|
||||
|
@ -354,7 +354,7 @@ func (e *Endpoint) getDatacenterName(ctx context.Context, client *Client, cache
|
|||
defer cancel1()
|
||||
err := o.Properties(ctx1, here, []string{"parent", "name"}, &result)
|
||||
if err != nil {
|
||||
log.Printf("W! [inputs.vsphere]: Error while resolving parent. Assuming no parent exists. Error: %s", err)
|
||||
e.Parent.Log.Warnf("Error while resolving parent. Assuming no parent exists. Error: %s", err.Error())
|
||||
break
|
||||
}
|
||||
if result.Reference().Type == "Datacenter" {
|
||||
|
@ -363,7 +363,7 @@ func (e *Endpoint) getDatacenterName(ctx context.Context, client *Client, cache
|
|||
break
|
||||
}
|
||||
if result.Parent == nil {
|
||||
log.Printf("D! [inputs.vsphere]: No parent found for %s (ascending from %s)", here.Reference(), r.Reference())
|
||||
e.Parent.Log.Debugf("No parent found for %s (ascending from %s)", here.Reference(), r.Reference())
|
||||
break
|
||||
}
|
||||
here = result.Parent.Reference()
|
||||
|
@ -393,7 +393,7 @@ func (e *Endpoint) discover(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
log.Printf("D! [inputs.vsphere]: Discover new objects for %s", e.URL.Host)
|
||||
e.Parent.Log.Debugf("Discover new objects for %s", e.URL.Host)
|
||||
dcNameCache := make(map[string]string)
|
||||
|
||||
numRes := int64(0)
|
||||
|
@ -401,7 +401,7 @@ func (e *Endpoint) discover(ctx context.Context) error {
|
|||
// Populate resource objects, and endpoint instance info.
|
||||
newObjects := make(map[string]objectMap)
|
||||
for k, res := range e.resourceKinds {
|
||||
log.Printf("D! [inputs.vsphere] Discovering resources for %s", res.name)
|
||||
e.Parent.Log.Debugf("Discovering resources for %s", res.name)
|
||||
// Need to do this for all resource types even if they are not enabled
|
||||
if res.enabled || k != "vm" {
|
||||
rf := ResourceFilter{
|
||||
|
@ -457,7 +457,7 @@ func (e *Endpoint) discover(ctx context.Context) error {
|
|||
if e.customAttrEnabled {
|
||||
fields, err = client.GetCustomFields(ctx)
|
||||
if err != nil {
|
||||
log.Println("W! [inputs.vsphere] Could not load custom field metadata")
|
||||
e.Parent.Log.Warn("Could not load custom field metadata")
|
||||
fields = nil
|
||||
}
|
||||
}
|
||||
|
@ -481,10 +481,10 @@ func (e *Endpoint) discover(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (e *Endpoint) simpleMetadataSelect(ctx context.Context, client *Client, res *resourceKind) {
|
||||
log.Printf("D! [inputs.vsphere] Using fast metric metadata selection for %s", res.name)
|
||||
e.Parent.Log.Debugf("Using fast metric metadata selection for %s", res.name)
|
||||
m, err := client.CounterInfoByName(ctx)
|
||||
if err != nil {
|
||||
log.Printf("E! [inputs.vsphere]: Error while getting metric metadata. Discovery will be incomplete. Error: %s", err)
|
||||
e.Parent.Log.Errorf("Getting metric metadata. Discovery will be incomplete. Error: %s", err.Error())
|
||||
return
|
||||
}
|
||||
res.metrics = make(performance.MetricList, 0, len(res.include))
|
||||
|
@ -500,7 +500,7 @@ func (e *Endpoint) simpleMetadataSelect(ctx context.Context, client *Client, res
|
|||
}
|
||||
res.metrics = append(res.metrics, cnt)
|
||||
} else {
|
||||
log.Printf("W! [inputs.vsphere] Metric name %s is unknown. Will not be collected", s)
|
||||
e.Parent.Log.Warnf("Metric name %s is unknown. Will not be collected", s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -533,7 +533,7 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind,
|
|||
te.Run(ctx, func() {
|
||||
metrics, err := e.getMetadata(ctx, obj, res.sampling)
|
||||
if err != nil {
|
||||
log.Printf("E! [inputs.vsphere]: Error while getting metric metadata. Discovery will be incomplete. Error: %s", err)
|
||||
e.Parent.Log.Errorf("Getting metric metadata. Discovery will be incomplete. Error: %s", err.Error())
|
||||
}
|
||||
mMap := make(map[string]types.PerfMetricId)
|
||||
for _, m := range metrics {
|
||||
|
@ -546,7 +546,7 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind,
|
|||
mMap[strconv.Itoa(int(m.CounterId))+"|"+m.Instance] = m
|
||||
}
|
||||
}
|
||||
log.Printf("D! [inputs.vsphere] Found %d metrics for %s", len(mMap), obj.name)
|
||||
e.Parent.Log.Debugf("Found %d metrics for %s", len(mMap), obj.name)
|
||||
instInfoMux.Lock()
|
||||
defer instInfoMux.Unlock()
|
||||
if len(mMap) > len(res.metrics) {
|
||||
|
@ -605,7 +605,7 @@ func getClusters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (obje
|
|||
defer cancel3()
|
||||
err = o.Properties(ctx3, *r.Parent, []string{"parent"}, &folder)
|
||||
if err != nil {
|
||||
log.Printf("W! [inputs.vsphere] Error while getting folder parent: %e", err)
|
||||
e.Parent.Log.Warnf("Error while getting folder parent: %s", err.Error())
|
||||
p = nil
|
||||
} else {
|
||||
pp := folder.Parent.Reference()
|
||||
|
@ -702,7 +702,7 @@ func getVMs(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap
|
|||
}
|
||||
key, ok := e.customFields[val.Key]
|
||||
if !ok {
|
||||
log.Printf("W! [inputs.vsphere] Metadata for custom field %d not found. Skipping", val.Key)
|
||||
e.Parent.Log.Warnf("Metadata for custom field %d not found. Skipping", val.Key)
|
||||
continue
|
||||
}
|
||||
if e.customAttrFilter.Match(key) {
|
||||
|
@ -847,7 +847,7 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim
|
|||
// Make sure endtime is always after start time. We may occasionally see samples from the future
|
||||
// returned from vCenter. This is presumably due to time drift between vCenter and EXSi nodes.
|
||||
if pq.StartTime.After(*pq.EndTime) {
|
||||
log.Printf("D! [inputs.vsphere] Future sample. Res: %s, StartTime: %s, EndTime: %s, Now: %s", pq.Entity, *pq.StartTime, *pq.EndTime, now)
|
||||
e.Parent.Log.Debugf("Future sample. Res: %s, StartTime: %s, EndTime: %s, Now: %s", pq.Entity, *pq.StartTime, *pq.EndTime, now)
|
||||
end := start.Add(time.Second)
|
||||
pq.EndTime = &end
|
||||
}
|
||||
|
@ -861,7 +861,7 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim
|
|||
// 2) We are at the last resource and have no more data to process.
|
||||
// 3) The query contains more than 100,000 individual metrics
|
||||
if mr > 0 || nRes >= e.Parent.MaxQueryObjects || len(pqs) > 100000 {
|
||||
log.Printf("D! [inputs.vsphere]: Queueing query: %d objects, %d metrics (%d remaining) of type %s for %s. Processed objects: %d. Total objects %d",
|
||||
e.Parent.Log.Debugf("Queueing query: %d objects, %d metrics (%d remaining) of type %s for %s. Processed objects: %d. Total objects %d",
|
||||
len(pqs), metrics, mr, res.name, e.URL.Host, total+1, len(res.objects))
|
||||
|
||||
// Don't send work items if the context has been cancelled.
|
||||
|
@ -882,7 +882,7 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim
|
|||
// Handle final partially filled chunk
|
||||
if len(pqs) > 0 {
|
||||
// Run collection job
|
||||
log.Printf("D! [inputs.vsphere]: Queuing query: %d objects, %d metrics (0 remaining) of type %s for %s. Total objects %d (final chunk)",
|
||||
e.Parent.Log.Debugf("Queuing query: %d objects, %d metrics (0 remaining) of type %s for %s. Total objects %d (final chunk)",
|
||||
len(pqs), metrics, res.name, e.URL.Host, len(res.objects))
|
||||
submitChunkJob(ctx, te, job, pqs)
|
||||
}
|
||||
|
@ -914,18 +914,18 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc
|
|||
if estInterval < s {
|
||||
estInterval = s
|
||||
}
|
||||
log.Printf("D! [inputs.vsphere] Raw interval %s, padded: %s, estimated: %s", rawInterval, paddedInterval, estInterval)
|
||||
e.Parent.Log.Debugf("Raw interval %s, padded: %s, estimated: %s", rawInterval, paddedInterval, estInterval)
|
||||
}
|
||||
log.Printf("D! [inputs.vsphere] Interval estimated to %s", estInterval)
|
||||
e.Parent.Log.Debugf("Interval estimated to %s", estInterval)
|
||||
res.lastColl = localNow
|
||||
|
||||
latest := res.latestSample
|
||||
if !latest.IsZero() {
|
||||
elapsed := now.Sub(latest).Seconds() + 5.0 // Allow 5 second jitter.
|
||||
log.Printf("D! [inputs.vsphere]: Latest: %s, elapsed: %f, resource: %s", latest, elapsed, resourceType)
|
||||
e.Parent.Log.Debugf("Latest: %s, elapsed: %f, resource: %s", latest, elapsed, resourceType)
|
||||
if !res.realTime && elapsed < float64(res.sampling) {
|
||||
// No new data would be available. We're outta here!
|
||||
log.Printf("D! [inputs.vsphere]: Sampling period for %s of %d has not elapsed on %s",
|
||||
e.Parent.Log.Debugf("Sampling period for %s of %d has not elapsed on %s",
|
||||
resourceType, res.sampling, e.URL.Host)
|
||||
return nil
|
||||
}
|
||||
|
@ -936,7 +936,7 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc
|
|||
internalTags := map[string]string{"resourcetype": resourceType}
|
||||
sw := NewStopwatchWithTags("gather_duration", e.URL.Host, internalTags)
|
||||
|
||||
log.Printf("D! [inputs.vsphere]: Collecting metrics for %d objects of type %s for %s",
|
||||
e.Parent.Log.Debugf("Collecting metrics for %d objects of type %s for %s",
|
||||
len(res.objects), resourceType, e.URL.Host)
|
||||
|
||||
count := int64(0)
|
||||
|
@ -948,9 +948,9 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc
|
|||
e.chunkify(ctx, res, now, latest, acc,
|
||||
func(chunk []types.PerfQuerySpec) {
|
||||
n, localLatest, err := e.collectChunk(ctx, chunk, res, acc, now, estInterval)
|
||||
log.Printf("D! [inputs.vsphere] CollectChunk for %s returned %d metrics", resourceType, n)
|
||||
e.Parent.Log.Debugf("CollectChunk for %s returned %d metrics", resourceType, n)
|
||||
if err != nil {
|
||||
acc.AddError(errors.New("While collecting " + res.name + ": " + err.Error()))
|
||||
acc.AddError(errors.New("while collecting " + res.name + ": " + err.Error()))
|
||||
}
|
||||
atomic.AddInt64(&count, int64(n))
|
||||
tsMux.Lock()
|
||||
|
@ -960,7 +960,7 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc
|
|||
}
|
||||
})
|
||||
|
||||
log.Printf("D! [inputs.vsphere] Latest sample for %s set to %s", resourceType, latestSample)
|
||||
e.Parent.Log.Debugf("Latest sample for %s set to %s", resourceType, latestSample)
|
||||
if !latestSample.IsZero() {
|
||||
res.latestSample = latestSample
|
||||
}
|
||||
|
@ -1004,12 +1004,11 @@ func alignSamples(info []types.PerfSampleInfo, values []int64, interval time.Dur
|
|||
lastBucket = roundedTs
|
||||
}
|
||||
}
|
||||
//log.Printf("D! [inputs.vsphere] Aligned samples: %d collapsed into %d", len(info), len(rInfo))
|
||||
return rInfo, rValues
|
||||
}
|
||||
|
||||
func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, res *resourceKind, acc telegraf.Accumulator, now time.Time, interval time.Duration) (int, time.Time, error) {
|
||||
log.Printf("D! [inputs.vsphere] Query for %s has %d QuerySpecs", res.name, len(pqs))
|
||||
e.Parent.Log.Debugf("Query for %s has %d QuerySpecs", res.name, len(pqs))
|
||||
latestSample := time.Time{}
|
||||
count := 0
|
||||
resourceType := res.name
|
||||
|
@ -1030,14 +1029,14 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec,
|
|||
return count, latestSample, err
|
||||
}
|
||||
|
||||
log.Printf("D! [inputs.vsphere] Query for %s returned metrics for %d objects", resourceType, len(ems))
|
||||
e.Parent.Log.Debugf("Query for %s returned metrics for %d objects", resourceType, len(ems))
|
||||
|
||||
// Iterate through results
|
||||
for _, em := range ems {
|
||||
moid := em.Entity.Reference().Value
|
||||
instInfo, found := res.objects[moid]
|
||||
if !found {
|
||||
log.Printf("E! [inputs.vsphere]: MOID %s not found in cache. Skipping! (This should not happen!)", moid)
|
||||
e.Parent.Log.Errorf("MOID %s not found in cache. Skipping! (This should not happen!)", moid)
|
||||
continue
|
||||
}
|
||||
buckets := make(map[string]metricEntry)
|
||||
|
@ -1052,7 +1051,7 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec,
|
|||
// Populate tags
|
||||
objectRef, ok := res.objects[moid]
|
||||
if !ok {
|
||||
log.Printf("E! [inputs.vsphere]: MOID %s not found in cache. Skipping", moid)
|
||||
e.Parent.Log.Errorf("MOID %s not found in cache. Skipping", moid)
|
||||
continue
|
||||
}
|
||||
e.populateTags(&objectRef, resourceType, res, t, &v)
|
||||
|
@ -1064,7 +1063,7 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec,
|
|||
// According to the docs, SampleInfo and Value should have the same length, but we've seen corrupted
|
||||
// data coming back with missing values. Take care of that gracefully!
|
||||
if idx >= len(alignedValues) {
|
||||
log.Printf("D! [inputs.vsphere] len(SampleInfo)>len(Value) %d > %d", len(alignedInfo), len(alignedValues))
|
||||
e.Parent.Log.Debugf("Len(SampleInfo)>len(Value) %d > %d", len(alignedInfo), len(alignedValues))
|
||||
break
|
||||
}
|
||||
ts := sample.Timestamp
|
||||
|
@ -1085,7 +1084,7 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec,
|
|||
// Percentage values must be scaled down by 100.
|
||||
info, ok := metricInfo[name]
|
||||
if !ok {
|
||||
log.Printf("E! [inputs.vsphere]: Could not determine unit for %s. Skipping", name)
|
||||
e.Parent.Log.Errorf("Could not determine unit for %s. Skipping", name)
|
||||
}
|
||||
v := alignedValues[idx]
|
||||
if info.UnitInfo.GetElementDescription().Key == "percent" {
|
||||
|
@ -1103,7 +1102,7 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec,
|
|||
e.hwMarks.Put(moid, ts)
|
||||
}
|
||||
if nValues == 0 {
|
||||
log.Printf("D! [inputs.vsphere]: Missing value for: %s, %s", name, objectRef.name)
|
||||
e.Parent.Log.Debugf("Missing value for: %s, %s", name, objectRef.name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package vsphere
|
|||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
|
@ -54,7 +53,7 @@ func (f *Finder) Find(ctx context.Context, resType, path string, dst interface{}
|
|||
return err
|
||||
}
|
||||
objectContentToTypedArray(objs, dst)
|
||||
log.Printf("D! [inputs.vsphere] Find(%s, %s) returned %d objects", resType, path, len(objs))
|
||||
f.client.log.Debugf("Find(%s, %s) returned %d objects", resType, path, len(objs))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ func (t *TSCache) Purge() {
|
|||
n++
|
||||
}
|
||||
}
|
||||
log.Printf("D! [inputs.vsphere] Purged timestamp cache. %d deleted with %d remaining", n, len(t.table))
|
||||
log.Printf("D! [inputs.vsphere] purged timestamp cache. %d deleted with %d remaining", n, len(t.table))
|
||||
}
|
||||
|
||||
// IsNew returns true if the supplied timestamp for the supplied key is more recent than the
|
||||
|
|
|
@ -2,7 +2,6 @@ package vsphere
|
|||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -58,6 +57,8 @@ type VSphere struct {
|
|||
|
||||
// Mix in the TLS/SSL goodness from core
|
||||
tls.ClientConfig
|
||||
|
||||
Log telegraf.Logger
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
|
@ -243,7 +244,7 @@ func (v *VSphere) Description() string {
|
|||
// Start is called from telegraf core when a plugin is started and allows it to
|
||||
// perform initialization tasks.
|
||||
func (v *VSphere) Start(acc telegraf.Accumulator) error {
|
||||
log.Println("D! [inputs.vsphere]: Starting plugin")
|
||||
v.Log.Info("Starting plugin")
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
v.cancel = cancel
|
||||
|
||||
|
@ -266,7 +267,7 @@ func (v *VSphere) Start(acc telegraf.Accumulator) error {
|
|||
// Stop is called from telegraf core when a plugin is stopped and allows it to
|
||||
// perform shutdown tasks.
|
||||
func (v *VSphere) Stop() {
|
||||
log.Println("D! [inputs.vsphere]: Stopping plugin")
|
||||
v.Log.Info("Stopping plugin")
|
||||
v.cancel()
|
||||
|
||||
// Wait for all endpoints to finish. No need to wait for
|
||||
|
@ -275,7 +276,7 @@ func (v *VSphere) Stop() {
|
|||
// wait for any discovery to complete by trying to grab the
|
||||
// "busy" mutex.
|
||||
for _, ep := range v.endpoints {
|
||||
log.Printf("D! [inputs.vsphere]: Waiting for endpoint %s to finish", ep.URL.Host)
|
||||
v.Log.Debugf("Waiting for endpoint %q to finish", ep.URL.Host)
|
||||
func() {
|
||||
ep.busy.Lock() // Wait until discovery is finished
|
||||
defer ep.busy.Unlock()
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue