Major Logging Overhaul
in this commit: - centralize logging output handler. - set global Info/Debug/Error log levels based on config file or flags. - remove per-plugin debug arg handling. - add a I!, D!, or E! to every log message. - add configuration option to specify where to send logs. closes #1786
This commit is contained in:
parent
78ced6bc30
commit
c7834209d2
1
Godeps
1
Godeps
|
@ -29,6 +29,7 @@ github.com/hpcloud/tail b2940955ab8b26e19d43a43c4da0475dd81bdb56
|
||||||
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
|
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
|
||||||
github.com/influxdata/influxdb e094138084855d444195b252314dfee9eae34cab
|
github.com/influxdata/influxdb e094138084855d444195b252314dfee9eae34cab
|
||||||
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
|
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
|
||||||
|
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
|
||||||
github.com/kardianos/osext 29ae4ffbc9a6fe9fb2bc5029050ce6996ea1d3bc
|
github.com/kardianos/osext 29ae4ffbc9a6fe9fb2bc5029050ce6996ea1d3bc
|
||||||
github.com/kardianos/service 5e335590050d6d00f3aa270217d288dda1c94d0a
|
github.com/kardianos/service 5e335590050d6d00f3aa270217d288dda1c94d0a
|
||||||
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
|
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
|
||||||
|
|
|
@ -132,7 +132,7 @@ func (ac *accumulator) makeMetric(
|
||||||
// NaNs are invalid values in influxdb, skip measurement
|
// NaNs are invalid values in influxdb, skip measurement
|
||||||
if math.IsNaN(val) || math.IsInf(val, 0) {
|
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||||
if ac.debug {
|
if ac.debug {
|
||||||
log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+
|
log.Printf("I! Measurement [%s] field [%s] has a NaN or Inf "+
|
||||||
"field, skipping",
|
"field, skipping",
|
||||||
measurement, k)
|
measurement, k)
|
||||||
}
|
}
|
||||||
|
@ -163,7 +163,7 @@ func (ac *accumulator) makeMetric(
|
||||||
m, err = telegraf.NewMetric(measurement, tags, fields, timestamp)
|
m, err = telegraf.NewMetric(measurement, tags, fields, timestamp)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
log.Printf("E! Error adding point [%s]: %s\n", measurement, err.Error())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -182,7 +182,7 @@ func (ac *accumulator) AddError(err error) {
|
||||||
}
|
}
|
||||||
atomic.AddUint64(&ac.errCount, 1)
|
atomic.AddUint64(&ac.errCount, 1)
|
||||||
//TODO suppress/throttle consecutive duplicate errors?
|
//TODO suppress/throttle consecutive duplicate errors?
|
||||||
log.Printf("ERROR in input [%s]: %s", ac.inputConfig.Name, err)
|
log.Printf("E! Error in input [%s]: %s", ac.inputConfig.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ac *accumulator) Debug() bool {
|
func (ac *accumulator) Debug() bool {
|
||||||
|
|
|
@ -49,18 +49,16 @@ func (a *Agent) Connect() error {
|
||||||
switch ot := o.Output.(type) {
|
switch ot := o.Output.(type) {
|
||||||
case telegraf.ServiceOutput:
|
case telegraf.ServiceOutput:
|
||||||
if err := ot.Start(); err != nil {
|
if err := ot.Start(); err != nil {
|
||||||
log.Printf("Service for output %s failed to start, exiting\n%s\n",
|
log.Printf("E! Service for output %s failed to start, exiting\n%s\n",
|
||||||
o.Name, err.Error())
|
o.Name, err.Error())
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if a.Config.Agent.Debug {
|
log.Printf("D! Attempting connection to output: %s\n", o.Name)
|
||||||
log.Printf("Attempting connection to output: %s\n", o.Name)
|
|
||||||
}
|
|
||||||
err := o.Output.Connect()
|
err := o.Output.Connect()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Failed to connect to output %s, retrying in 15s, "+
|
log.Printf("E! Failed to connect to output %s, retrying in 15s, "+
|
||||||
"error was '%s' \n", o.Name, err)
|
"error was '%s' \n", o.Name, err)
|
||||||
time.Sleep(15 * time.Second)
|
time.Sleep(15 * time.Second)
|
||||||
err = o.Output.Connect()
|
err = o.Output.Connect()
|
||||||
|
@ -68,9 +66,7 @@ func (a *Agent) Connect() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if a.Config.Agent.Debug {
|
log.Printf("D! Successfully connected to output: %s\n", o.Name)
|
||||||
log.Printf("Successfully connected to output: %s\n", o.Name)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -92,9 +88,9 @@ func panicRecover(input *models.RunningInput) {
|
||||||
if err := recover(); err != nil {
|
if err := recover(); err != nil {
|
||||||
trace := make([]byte, 2048)
|
trace := make([]byte, 2048)
|
||||||
runtime.Stack(trace, true)
|
runtime.Stack(trace, true)
|
||||||
log.Printf("FATAL: Input [%s] panicked: %s, Stack:\n%s\n",
|
log.Printf("E! FATAL: Input [%s] panicked: %s, Stack:\n%s\n",
|
||||||
input.Name, err, trace)
|
input.Name, err, trace)
|
||||||
log.Println("PLEASE REPORT THIS PANIC ON GITHUB with " +
|
log.Println("E! PLEASE REPORT THIS PANIC ON GITHUB with " +
|
||||||
"stack trace, configuration, and OS information: " +
|
"stack trace, configuration, and OS information: " +
|
||||||
"https://github.com/influxdata/telegraf/issues/new")
|
"https://github.com/influxdata/telegraf/issues/new")
|
||||||
}
|
}
|
||||||
|
@ -117,7 +113,6 @@ func (a *Agent) gatherer(
|
||||||
var outerr error
|
var outerr error
|
||||||
|
|
||||||
acc := NewAccumulator(input.Config, metricC)
|
acc := NewAccumulator(input.Config, metricC)
|
||||||
acc.SetDebug(a.Config.Agent.Debug)
|
|
||||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||||
a.Config.Agent.Interval.Duration)
|
a.Config.Agent.Interval.Duration)
|
||||||
acc.setDefaultTags(a.Config.Tags)
|
acc.setDefaultTags(a.Config.Tags)
|
||||||
|
@ -131,10 +126,8 @@ func (a *Agent) gatherer(
|
||||||
if outerr != nil {
|
if outerr != nil {
|
||||||
return outerr
|
return outerr
|
||||||
}
|
}
|
||||||
if a.Config.Agent.Debug {
|
log.Printf("D! Input [%s] gathered metrics, (%s interval) in %s\n",
|
||||||
log.Printf("Input [%s] gathered metrics, (%s interval) in %s\n",
|
input.Name, interval, elapsed)
|
||||||
input.Name, interval, elapsed)
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-shutdown:
|
case <-shutdown:
|
||||||
|
@ -167,11 +160,11 @@ func gatherWithTimeout(
|
||||||
select {
|
select {
|
||||||
case err := <-done:
|
case err := <-done:
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("ERROR in input [%s]: %s", input.Name, err)
|
log.Printf("E! ERROR in input [%s]: %s", input.Name, err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
log.Printf("ERROR: input [%s] took longer to collect than "+
|
log.Printf("E! ERROR: input [%s] took longer to collect than "+
|
||||||
"collection interval (%s)",
|
"collection interval (%s)",
|
||||||
input.Name, timeout)
|
input.Name, timeout)
|
||||||
continue
|
continue
|
||||||
|
@ -244,7 +237,7 @@ func (a *Agent) flush() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
err := output.Write()
|
err := output.Write()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error writing to output [%s]: %s\n",
|
log.Printf("E! Error writing to output [%s]: %s\n",
|
||||||
output.Name, err.Error())
|
output.Name, err.Error())
|
||||||
}
|
}
|
||||||
}(o)
|
}(o)
|
||||||
|
@ -264,7 +257,7 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-shutdown:
|
case <-shutdown:
|
||||||
log.Println("Hang on, flushing any cached metrics before shutdown")
|
log.Println("I! Hang on, flushing any cached metrics before shutdown")
|
||||||
a.flush()
|
a.flush()
|
||||||
return nil
|
return nil
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
|
@ -302,9 +295,9 @@ func copyMetric(m telegraf.Metric) telegraf.Metric {
|
||||||
func (a *Agent) Run(shutdown chan struct{}) error {
|
func (a *Agent) Run(shutdown chan struct{}) error {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
log.Printf("Agent Config: Interval:%s, Debug:%#v, Quiet:%#v, Hostname:%#v, "+
|
log.Printf("I! Agent Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+
|
||||||
"Flush Interval:%s \n",
|
"Flush Interval:%s \n",
|
||||||
a.Config.Agent.Interval.Duration, a.Config.Agent.Debug, a.Config.Agent.Quiet,
|
a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet,
|
||||||
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
|
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
|
||||||
|
|
||||||
// channel shared between all input threads for accumulating metrics
|
// channel shared between all input threads for accumulating metrics
|
||||||
|
@ -315,13 +308,12 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||||
switch p := input.Input.(type) {
|
switch p := input.Input.(type) {
|
||||||
case telegraf.ServiceInput:
|
case telegraf.ServiceInput:
|
||||||
acc := NewAccumulator(input.Config, metricC)
|
acc := NewAccumulator(input.Config, metricC)
|
||||||
acc.SetDebug(a.Config.Agent.Debug)
|
|
||||||
// Service input plugins should set their own precision of their
|
// Service input plugins should set their own precision of their
|
||||||
// metrics.
|
// metrics.
|
||||||
acc.DisablePrecision()
|
acc.DisablePrecision()
|
||||||
acc.setDefaultTags(a.Config.Tags)
|
acc.setDefaultTags(a.Config.Tags)
|
||||||
if err := p.Start(acc); err != nil {
|
if err := p.Start(acc); err != nil {
|
||||||
log.Printf("Service for input %s failed to start, exiting\n%s\n",
|
log.Printf("E! Service for input %s failed to start, exiting\n%s\n",
|
||||||
input.Name, err.Error())
|
input.Name, err.Error())
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -339,7 +331,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
if err := a.flusher(shutdown, metricC); err != nil {
|
if err := a.flusher(shutdown, metricC); err != nil {
|
||||||
log.Printf("Flusher routine failed, exiting: %s\n", err.Error())
|
log.Printf("E! Flusher routine failed, exiting: %s\n", err.Error())
|
||||||
close(shutdown)
|
close(shutdown)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -354,7 +346,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||||
go func(in *models.RunningInput, interv time.Duration) {
|
go func(in *models.RunningInput, interv time.Duration) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
if err := a.gatherer(shutdown, in, interv, metricC); err != nil {
|
if err := a.gatherer(shutdown, in, interv, metricC); err != nil {
|
||||||
log.Printf(err.Error())
|
log.Printf("E! " + err.Error())
|
||||||
}
|
}
|
||||||
}(input, interval)
|
}(input, interval)
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,15 +12,17 @@ import (
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/agent"
|
"github.com/influxdata/telegraf/agent"
|
||||||
"github.com/influxdata/telegraf/internal/config"
|
"github.com/influxdata/telegraf/internal/config"
|
||||||
|
"github.com/influxdata/telegraf/logger"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
||||||
"github.com/influxdata/telegraf/plugins/outputs"
|
"github.com/influxdata/telegraf/plugins/outputs"
|
||||||
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
||||||
|
|
||||||
"github.com/kardianos/service"
|
"github.com/kardianos/service"
|
||||||
)
|
)
|
||||||
|
|
||||||
var fDebug = flag.Bool("debug", false,
|
var fDebug = flag.Bool("debug", false,
|
||||||
"show metrics as they're generated to stdout")
|
"turn on debug logging")
|
||||||
var fQuiet = flag.Bool("quiet", false,
|
var fQuiet = flag.Bool("quiet", false,
|
||||||
"run in quiet mode")
|
"run in quiet mode")
|
||||||
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
|
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
|
||||||
|
@ -109,12 +111,9 @@ Examples:
|
||||||
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
||||||
`
|
`
|
||||||
|
|
||||||
var logger service.Logger
|
|
||||||
|
|
||||||
var stop chan struct{}
|
var stop chan struct{}
|
||||||
|
|
||||||
var srvc service.Service
|
var srvc service.Service
|
||||||
var svcConfig *service.Config
|
|
||||||
|
|
||||||
type program struct{}
|
type program struct{}
|
||||||
|
|
||||||
|
@ -212,13 +211,12 @@ func reloadLoop(stop chan struct{}, s service.Service) {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if *fDebug {
|
// Setup logging
|
||||||
ag.Config.Agent.Debug = true
|
logger.SetupLogging(
|
||||||
}
|
ag.Config.Agent.Debug || *fDebug,
|
||||||
|
ag.Config.Agent.Quiet || *fQuiet,
|
||||||
if *fQuiet {
|
ag.Config.Agent.Logfile,
|
||||||
ag.Config.Agent.Quiet = true
|
)
|
||||||
}
|
|
||||||
|
|
||||||
if *fTest {
|
if *fTest {
|
||||||
err = ag.Test()
|
err = ag.Test()
|
||||||
|
@ -243,7 +241,7 @@ func reloadLoop(stop chan struct{}, s service.Service) {
|
||||||
close(shutdown)
|
close(shutdown)
|
||||||
}
|
}
|
||||||
if sig == syscall.SIGHUP {
|
if sig == syscall.SIGHUP {
|
||||||
log.Printf("Reloading Telegraf config\n")
|
log.Printf("I! Reloading Telegraf config\n")
|
||||||
<-reload
|
<-reload
|
||||||
reload <- true
|
reload <- true
|
||||||
close(shutdown)
|
close(shutdown)
|
||||||
|
@ -253,10 +251,10 @@ func reloadLoop(stop chan struct{}, s service.Service) {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
log.Printf("Starting Telegraf (version %s)\n", version)
|
log.Printf("I! Starting Telegraf (version %s)\n", version)
|
||||||
log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||||
log.Printf("Loaded inputs: %s", strings.Join(c.InputNames(), " "))
|
log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " "))
|
||||||
log.Printf("Tags enabled: %s", c.ListTags())
|
log.Printf("I! Tags enabled: %s", c.ListTags())
|
||||||
|
|
||||||
if *fPidfile != "" {
|
if *fPidfile != "" {
|
||||||
f, err := os.Create(*fPidfile)
|
f, err := os.Create(*fPidfile)
|
||||||
|
@ -293,8 +291,9 @@ func (p *program) Stop(s service.Service) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
svcConfig = &service.Config{
|
svcConfig := &service.Config{
|
||||||
Name: "telegraf",
|
Name: "telegraf",
|
||||||
DisplayName: "Telegraf Data Collector Service",
|
DisplayName: "Telegraf Data Collector Service",
|
||||||
Description: "Collects data using a series of plugins and publishes it to" +
|
Description: "Collects data using a series of plugins and publishes it to" +
|
||||||
|
@ -307,13 +306,8 @@ func main() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
logger, err = s.Logger(nil)
|
// Handle the -service flag here to prevent any issues with tooling that
|
||||||
if err != nil {
|
// may not have an interactive session, e.g. installing from Ansible.
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
// Handle the -service flag here to prevent any issues with tooling that may not have an interactive
|
|
||||||
// session, e.g. installing from Ansible
|
|
||||||
flag.Parse()
|
|
||||||
if *fService != "" {
|
if *fService != "" {
|
||||||
if *fConfig != "" {
|
if *fConfig != "" {
|
||||||
(*svcConfig).Arguments = []string{"-config", *fConfig}
|
(*svcConfig).Arguments = []string{"-config", *fConfig}
|
||||||
|
@ -325,7 +319,7 @@ func main() {
|
||||||
} else {
|
} else {
|
||||||
err = s.Run()
|
err = s.Run()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(err)
|
log.Println("E! " + err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -30,12 +30,15 @@
|
||||||
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||||
round_interval = true
|
round_interval = true
|
||||||
|
|
||||||
## Telegraf will send metrics to outputs in batches of at
|
## Telegraf will send metrics to outputs in batches of at most
|
||||||
## most metric_batch_size metrics.
|
## metric_batch_size metrics.
|
||||||
|
## This controls the size of writes that Telegraf sends to output plugins.
|
||||||
metric_batch_size = 1000
|
metric_batch_size = 1000
|
||||||
|
|
||||||
## For failed writes, telegraf will cache metric_buffer_limit metrics for each
|
## For failed writes, telegraf will cache metric_buffer_limit metrics for each
|
||||||
## output, and will flush this buffer on a successful write. Oldest metrics
|
## output, and will flush this buffer on a successful write. Oldest metrics
|
||||||
## are dropped first when this buffer fills.
|
## are dropped first when this buffer fills.
|
||||||
|
## This buffer only fills when writes fail to output plugin(s).
|
||||||
metric_buffer_limit = 10000
|
metric_buffer_limit = 10000
|
||||||
|
|
||||||
## Collection jitter is used to jitter the collection by a random amount.
|
## Collection jitter is used to jitter the collection by a random amount.
|
||||||
|
@ -57,10 +60,15 @@
|
||||||
## Precision will NOT be used for service inputs, such as logparser and statsd.
|
## Precision will NOT be used for service inputs, such as logparser and statsd.
|
||||||
## Valid values are "ns", "us" (or "µs"), "ms", "s".
|
## Valid values are "ns", "us" (or "µs"), "ms", "s".
|
||||||
precision = ""
|
precision = ""
|
||||||
## Run telegraf in debug mode
|
|
||||||
|
## Logging configuration:
|
||||||
|
## Run telegraf with debug log messages.
|
||||||
debug = false
|
debug = false
|
||||||
## Run telegraf in quiet mode
|
## Run telegraf in quiet mode (error log messages only).
|
||||||
quiet = false
|
quiet = false
|
||||||
|
## Specify the log file name. The empty string means to log to stdout.
|
||||||
|
logfile = ""
|
||||||
|
|
||||||
## Override default hostname, if empty use os.Hostname()
|
## Override default hostname, if empty use os.Hostname()
|
||||||
hostname = ""
|
hostname = ""
|
||||||
## If set to true, do no set the "host" tag in the telegraf agent.
|
## If set to true, do no set the "host" tag in the telegraf agent.
|
||||||
|
@ -1064,8 +1072,6 @@
|
||||||
# # "tasks",
|
# # "tasks",
|
||||||
# # "messages",
|
# # "messages",
|
||||||
# # ]
|
# # ]
|
||||||
# ## Include mesos tasks statistics, default is false
|
|
||||||
# # slave_tasks = true
|
|
||||||
|
|
||||||
|
|
||||||
# # Read metrics from one or many MongoDB servers
|
# # Read metrics from one or many MongoDB servers
|
||||||
|
@ -1442,25 +1448,29 @@
|
||||||
# # Retrieves SNMP values from remote agents
|
# # Retrieves SNMP values from remote agents
|
||||||
# [[inputs.snmp]]
|
# [[inputs.snmp]]
|
||||||
# agents = [ "127.0.0.1:161" ]
|
# agents = [ "127.0.0.1:161" ]
|
||||||
|
# ## Timeout for each SNMP query.
|
||||||
# timeout = "5s"
|
# timeout = "5s"
|
||||||
|
# ## Number of retries to attempt within timeout.
|
||||||
|
# retries = 3
|
||||||
|
# ## SNMP version, values can be 1, 2, or 3
|
||||||
# version = 2
|
# version = 2
|
||||||
#
|
#
|
||||||
# # SNMPv1 & SNMPv2 parameters
|
# ## SNMP community string.
|
||||||
# community = "public"
|
# community = "public"
|
||||||
#
|
#
|
||||||
# # SNMPv2 & SNMPv3 parameters
|
# ## The GETBULK max-repetitions parameter
|
||||||
# max_repetitions = 50
|
# max_repetitions = 10
|
||||||
#
|
#
|
||||||
# # SNMPv3 parameters
|
# ## SNMPv3 auth parameters
|
||||||
# #sec_name = "myuser"
|
# #sec_name = "myuser"
|
||||||
# #auth_protocol = "md5" # Values: "MD5", "SHA", ""
|
# #auth_protocol = "md5" # Values: "MD5", "SHA", ""
|
||||||
# #auth_password = "password123"
|
# #auth_password = "pass"
|
||||||
# #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
|
# #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
|
||||||
# #context_name = ""
|
# #context_name = ""
|
||||||
# #priv_protocol = "" # Values: "DES", "AES", ""
|
# #priv_protocol = "" # Values: "DES", "AES", ""
|
||||||
# #priv_password = ""
|
# #priv_password = ""
|
||||||
#
|
#
|
||||||
# # measurement name
|
# ## measurement name
|
||||||
# name = "system"
|
# name = "system"
|
||||||
# [[inputs.snmp.field]]
|
# [[inputs.snmp.field]]
|
||||||
# name = "hostname"
|
# name = "hostname"
|
||||||
|
@ -1475,7 +1485,7 @@
|
||||||
# oid = "HOST-RESOURCES-MIB::hrMemorySize"
|
# oid = "HOST-RESOURCES-MIB::hrMemorySize"
|
||||||
#
|
#
|
||||||
# [[inputs.snmp.table]]
|
# [[inputs.snmp.table]]
|
||||||
# # measurement name
|
# ## measurement name
|
||||||
# name = "remote_servers"
|
# name = "remote_servers"
|
||||||
# inherit_tags = [ "hostname" ]
|
# inherit_tags = [ "hostname" ]
|
||||||
# [[inputs.snmp.table.field]]
|
# [[inputs.snmp.table.field]]
|
||||||
|
@ -1490,7 +1500,7 @@
|
||||||
# oid = ".1.0.0.0.1.2"
|
# oid = ".1.0.0.0.1.2"
|
||||||
#
|
#
|
||||||
# [[inputs.snmp.table]]
|
# [[inputs.snmp.table]]
|
||||||
# # auto populate table's fields using the MIB
|
# ## auto populate table's fields using the MIB
|
||||||
# oid = "HOST-RESOURCES-MIB::hrNetworkTable"
|
# oid = "HOST-RESOURCES-MIB::hrNetworkTable"
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -42,10 +42,14 @@
|
||||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||||
flush_jitter = "0s"
|
flush_jitter = "0s"
|
||||||
|
|
||||||
|
## Logging configuration:
|
||||||
## Run telegraf in debug mode
|
## Run telegraf in debug mode
|
||||||
debug = false
|
debug = false
|
||||||
## Run telegraf in quiet mode
|
## Run telegraf in quiet mode
|
||||||
quiet = false
|
quiet = false
|
||||||
|
## Specify the log file name. The empty string means to log to stdout.
|
||||||
|
logfile = "/Program Files/Telegraf/telegraf.log"
|
||||||
|
|
||||||
## Override default hostname, if empty use os.Hostname()
|
## Override default hostname, if empty use os.Hostname()
|
||||||
hostname = ""
|
hostname = ""
|
||||||
|
|
||||||
|
@ -85,7 +89,7 @@
|
||||||
# Windows Performance Counters plugin.
|
# Windows Performance Counters plugin.
|
||||||
# These are the recommended method of monitoring system metrics on windows,
|
# These are the recommended method of monitoring system metrics on windows,
|
||||||
# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI,
|
# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI,
|
||||||
# which utilizes a lot of system resources.
|
# which utilize more system resources.
|
||||||
#
|
#
|
||||||
# See more configuration examples at:
|
# See more configuration examples at:
|
||||||
# https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters
|
# https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters
|
||||||
|
@ -95,70 +99,104 @@
|
||||||
# Processor usage, alternative to native, reports on a per core.
|
# Processor usage, alternative to native, reports on a per core.
|
||||||
ObjectName = "Processor"
|
ObjectName = "Processor"
|
||||||
Instances = ["*"]
|
Instances = ["*"]
|
||||||
Counters = ["% Idle Time", "% Interrupt Time", "% Privileged Time", "% User Time", "% Processor Time"]
|
Counters = [
|
||||||
|
"% Idle Time",
|
||||||
|
"% Interrupt Time",
|
||||||
|
"% Privileged Time",
|
||||||
|
"% User Time",
|
||||||
|
"% Processor Time",
|
||||||
|
]
|
||||||
Measurement = "win_cpu"
|
Measurement = "win_cpu"
|
||||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
# Set to true to include _Total instance when querying for all (*).
|
||||||
|
#IncludeTotal=false
|
||||||
|
|
||||||
[[inputs.win_perf_counters.object]]
|
[[inputs.win_perf_counters.object]]
|
||||||
# Disk times and queues
|
# Disk times and queues
|
||||||
ObjectName = "LogicalDisk"
|
ObjectName = "LogicalDisk"
|
||||||
Instances = ["*"]
|
Instances = ["*"]
|
||||||
Counters = ["% Idle Time", "% Disk Time","% Disk Read Time", "% Disk Write Time", "% User Time", "Current Disk Queue Length"]
|
Counters = [
|
||||||
|
"% Idle Time",
|
||||||
|
"% Disk Time","% Disk Read Time",
|
||||||
|
"% Disk Write Time",
|
||||||
|
"% User Time",
|
||||||
|
"Current Disk Queue Length",
|
||||||
|
]
|
||||||
Measurement = "win_disk"
|
Measurement = "win_disk"
|
||||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
# Set to true to include _Total instance when querying for all (*).
|
||||||
|
#IncludeTotal=false
|
||||||
|
|
||||||
[[inputs.win_perf_counters.object]]
|
[[inputs.win_perf_counters.object]]
|
||||||
ObjectName = "System"
|
ObjectName = "System"
|
||||||
Counters = ["Context Switches/sec","System Calls/sec"]
|
Counters = [
|
||||||
|
"Context Switches/sec",
|
||||||
|
"System Calls/sec",
|
||||||
|
]
|
||||||
Instances = ["------"]
|
Instances = ["------"]
|
||||||
Measurement = "win_system"
|
Measurement = "win_system"
|
||||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
# Set to true to include _Total instance when querying for all (*).
|
||||||
|
#IncludeTotal=false
|
||||||
|
|
||||||
[[inputs.win_perf_counters.object]]
|
[[inputs.win_perf_counters.object]]
|
||||||
# Example query where the Instance portion must be removed to get data back, such as from the Memory object.
|
# Example query where the Instance portion must be removed to get data back,
|
||||||
|
# such as from the Memory object.
|
||||||
ObjectName = "Memory"
|
ObjectName = "Memory"
|
||||||
Counters = ["Available Bytes","Cache Faults/sec","Demand Zero Faults/sec","Page Faults/sec","Pages/sec","Transition Faults/sec","Pool Nonpaged Bytes","Pool Paged Bytes"]
|
Counters = [
|
||||||
Instances = ["------"] # Use 6 x - to remove the Instance bit from the query.
|
"Available Bytes",
|
||||||
|
"Cache Faults/sec",
|
||||||
|
"Demand Zero Faults/sec",
|
||||||
|
"Page Faults/sec",
|
||||||
|
"Pages/sec",
|
||||||
|
"Transition Faults/sec",
|
||||||
|
"Pool Nonpaged Bytes",
|
||||||
|
"Pool Paged Bytes",
|
||||||
|
]
|
||||||
|
# Use 6 x - to remove the Instance bit from the query.
|
||||||
|
Instances = ["------"]
|
||||||
Measurement = "win_mem"
|
Measurement = "win_mem"
|
||||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
# Set to true to include _Total instance when querying for all (*).
|
||||||
|
#IncludeTotal=false
|
||||||
|
|
||||||
|
|
||||||
# Windows system plugins using WMI (disabled by default, using
|
# Windows system plugins using WMI (disabled by default, using
|
||||||
# win_perf_counters over WMI is recommended)
|
# win_perf_counters over WMI is recommended)
|
||||||
|
|
||||||
# Read metrics about cpu usage
|
# # Read metrics about cpu usage
|
||||||
#[[inputs.cpu]]
|
# [[inputs.cpu]]
|
||||||
## Whether to report per-cpu stats or not
|
# ## Whether to report per-cpu stats or not
|
||||||
#percpu = true
|
# percpu = true
|
||||||
## Whether to report total system cpu stats or not
|
# ## Whether to report total system cpu stats or not
|
||||||
#totalcpu = true
|
# totalcpu = true
|
||||||
## Comment this line if you want the raw CPU time metrics
|
# ## Comment this line if you want the raw CPU time metrics
|
||||||
#fielddrop = ["time_*"]
|
# fielddrop = ["time_*"]
|
||||||
|
|
||||||
# Read metrics about disk usage by mount point
|
|
||||||
#[[inputs.disk]]
|
|
||||||
## By default, telegraf gather stats for all mountpoints.
|
|
||||||
## Setting mountpoints will restrict the stats to the specified mountpoints.
|
|
||||||
## mount_points=["/"]
|
|
||||||
|
|
||||||
## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
# # Read metrics about disk usage by mount point
|
||||||
## present on /run, /var/run, /dev/shm or /dev).
|
# [[inputs.disk]]
|
||||||
#ignore_fs = ["tmpfs", "devtmpfs"]
|
# ## By default, telegraf gather stats for all mountpoints.
|
||||||
|
# ## Setting mountpoints will restrict the stats to the specified mountpoints.
|
||||||
|
# ## mount_points=["/"]
|
||||||
|
#
|
||||||
|
# ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
||||||
|
# ## present on /run, /var/run, /dev/shm or /dev).
|
||||||
|
# # ignore_fs = ["tmpfs", "devtmpfs"]
|
||||||
|
|
||||||
# Read metrics about disk IO by device
|
|
||||||
#[[inputs.diskio]]
|
|
||||||
## By default, telegraf will gather stats for all devices including
|
|
||||||
## disk partitions.
|
|
||||||
## Setting devices will restrict the stats to the specified devices.
|
|
||||||
## devices = ["sda", "sdb"]
|
|
||||||
## Uncomment the following line if you do not need disk serial numbers.
|
|
||||||
## skip_serial_number = true
|
|
||||||
|
|
||||||
# Read metrics about memory usage
|
# # Read metrics about disk IO by device
|
||||||
#[[inputs.mem]]
|
# [[inputs.diskio]]
|
||||||
# no configuration
|
# ## By default, telegraf will gather stats for all devices including
|
||||||
|
# ## disk partitions.
|
||||||
|
# ## Setting devices will restrict the stats to the specified devices.
|
||||||
|
# ## devices = ["sda", "sdb"]
|
||||||
|
# ## Uncomment the following line if you do not need disk serial numbers.
|
||||||
|
# ## skip_serial_number = true
|
||||||
|
|
||||||
# Read metrics about swap memory usage
|
|
||||||
#[[inputs.swap]]
|
# # Read metrics about memory usage
|
||||||
# no configuration
|
# [[inputs.mem]]
|
||||||
|
# # no configuration
|
||||||
|
|
||||||
|
|
||||||
|
# # Read metrics about swap memory usage
|
||||||
|
# [[inputs.swap]]
|
||||||
|
# # no configuration
|
||||||
|
|
||||||
|
|
|
@ -125,6 +125,9 @@ type AgentConfig struct {
|
||||||
// Debug is the option for running in debug mode
|
// Debug is the option for running in debug mode
|
||||||
Debug bool
|
Debug bool
|
||||||
|
|
||||||
|
// Logfile specifies the file to send logs to
|
||||||
|
Logfile string
|
||||||
|
|
||||||
// Quiet is the option for running in quiet mode
|
// Quiet is the option for running in quiet mode
|
||||||
Quiet bool
|
Quiet bool
|
||||||
Hostname string
|
Hostname string
|
||||||
|
@ -195,12 +198,15 @@ var header = `# Telegraf Configuration
|
||||||
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||||
round_interval = true
|
round_interval = true
|
||||||
|
|
||||||
## Telegraf will send metrics to outputs in batches of at
|
## Telegraf will send metrics to outputs in batches of at most
|
||||||
## most metric_batch_size metrics.
|
## metric_batch_size metrics.
|
||||||
|
## This controls the size of writes that Telegraf sends to output plugins.
|
||||||
metric_batch_size = 1000
|
metric_batch_size = 1000
|
||||||
|
|
||||||
## For failed writes, telegraf will cache metric_buffer_limit metrics for each
|
## For failed writes, telegraf will cache metric_buffer_limit metrics for each
|
||||||
## output, and will flush this buffer on a successful write. Oldest metrics
|
## output, and will flush this buffer on a successful write. Oldest metrics
|
||||||
## are dropped first when this buffer fills.
|
## are dropped first when this buffer fills.
|
||||||
|
## This buffer only fills when writes fail to output plugin(s).
|
||||||
metric_buffer_limit = 10000
|
metric_buffer_limit = 10000
|
||||||
|
|
||||||
## Collection jitter is used to jitter the collection by a random amount.
|
## Collection jitter is used to jitter the collection by a random amount.
|
||||||
|
@ -222,10 +228,15 @@ var header = `# Telegraf Configuration
|
||||||
## Precision will NOT be used for service inputs, such as logparser and statsd.
|
## Precision will NOT be used for service inputs, such as logparser and statsd.
|
||||||
## Valid values are "ns", "us" (or "µs"), "ms", "s".
|
## Valid values are "ns", "us" (or "µs"), "ms", "s".
|
||||||
precision = ""
|
precision = ""
|
||||||
## Run telegraf in debug mode
|
|
||||||
|
## Logging configuration:
|
||||||
|
## Run telegraf with debug log messages.
|
||||||
debug = false
|
debug = false
|
||||||
## Run telegraf in quiet mode
|
## Run telegraf in quiet mode (error log messages only).
|
||||||
quiet = false
|
quiet = false
|
||||||
|
## Specify the log file name. The empty string means to log to stdout.
|
||||||
|
logfile = ""
|
||||||
|
|
||||||
## Override default hostname, if empty use os.Hostname()
|
## Override default hostname, if empty use os.Hostname()
|
||||||
hostname = ""
|
hostname = ""
|
||||||
## If set to true, do no set the "host" tag in the telegraf agent.
|
## If set to true, do no set the "host" tag in the telegraf agent.
|
||||||
|
@ -435,7 +446,7 @@ func getDefaultConfigPath() (string, error) {
|
||||||
}
|
}
|
||||||
for _, path := range []string{envfile, homefile, etcfile} {
|
for _, path := range []string{envfile, homefile, etcfile} {
|
||||||
if _, err := os.Stat(path); err == nil {
|
if _, err := os.Stat(path); err == nil {
|
||||||
log.Printf("Using config file: %s", path)
|
log.Printf("I! Using config file: %s", path)
|
||||||
return path, nil
|
return path, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -466,7 +477,7 @@ func (c *Config) LoadConfig(path string) error {
|
||||||
return fmt.Errorf("%s: invalid configuration", path)
|
return fmt.Errorf("%s: invalid configuration", path)
|
||||||
}
|
}
|
||||||
if err = config.UnmarshalTable(subTable, c.Tags); err != nil {
|
if err = config.UnmarshalTable(subTable, c.Tags); err != nil {
|
||||||
log.Printf("Could not parse [global_tags] config\n")
|
log.Printf("E! Could not parse [global_tags] config\n")
|
||||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -479,7 +490,7 @@ func (c *Config) LoadConfig(path string) error {
|
||||||
return fmt.Errorf("%s: invalid configuration", path)
|
return fmt.Errorf("%s: invalid configuration", path)
|
||||||
}
|
}
|
||||||
if err = config.UnmarshalTable(subTable, c.Agent); err != nil {
|
if err = config.UnmarshalTable(subTable, c.Agent); err != nil {
|
||||||
log.Printf("Could not parse [agent] config\n")
|
log.Printf("E! Could not parse [agent] config\n")
|
||||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -832,7 +843,7 @@ func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
|
||||||
if node, ok := tbl.Fields["tags"]; ok {
|
if node, ok := tbl.Fields["tags"]; ok {
|
||||||
if subtbl, ok := node.(*ast.Table); ok {
|
if subtbl, ok := node.(*ast.Table); ok {
|
||||||
if err := config.UnmarshalTable(subtbl, cp.Tags); err != nil {
|
if err := config.UnmarshalTable(subtbl, cp.Tags); err != nil {
|
||||||
log.Printf("Could not parse tags for input %s\n", name)
|
log.Printf("E! Could not parse tags for input %s\n", name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -198,7 +198,7 @@ func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
|
||||||
return err
|
return err
|
||||||
case <-timer.C:
|
case <-timer.C:
|
||||||
if err := c.Process.Kill(); err != nil {
|
if err := c.Process.Kill(); err != nil {
|
||||||
log.Printf("FATAL error killing process: %s", err)
|
log.Printf("E! FATAL error killing process: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// wait for the command to return after killing it
|
// wait for the command to return after killing it
|
||||||
|
|
|
@ -85,7 +85,7 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
|
||||||
// Write writes all cached points to this output.
|
// Write writes all cached points to this output.
|
||||||
func (ro *RunningOutput) Write() error {
|
func (ro *RunningOutput) Write() error {
|
||||||
if !ro.Quiet {
|
if !ro.Quiet {
|
||||||
log.Printf("Output [%s] buffer fullness: %d / %d metrics. "+
|
log.Printf("I! Output [%s] buffer fullness: %d / %d metrics. "+
|
||||||
"Total gathered metrics: %d. Total dropped metrics: %d.",
|
"Total gathered metrics: %d. Total dropped metrics: %d.",
|
||||||
ro.Name,
|
ro.Name,
|
||||||
ro.failMetrics.Len()+ro.metrics.Len(),
|
ro.failMetrics.Len()+ro.metrics.Len(),
|
||||||
|
@ -142,7 +142,7 @@ func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
|
||||||
elapsed := time.Since(start)
|
elapsed := time.Since(start)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if !ro.Quiet {
|
if !ro.Quiet {
|
||||||
log.Printf("Output [%s] wrote batch of %d metrics in %s\n",
|
log.Printf("I! Output [%s] wrote batch of %d metrics in %s\n",
|
||||||
ro.Name, len(metrics), elapsed)
|
ro.Name, len(metrics), elapsed)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,58 @@
|
||||||
|
package logger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/influxdata/wlog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// newTelegrafWriter returns a logging-wrapped writer.
|
||||||
|
func newTelegrafWriter(w io.Writer) io.Writer {
|
||||||
|
return &telegrafLog{
|
||||||
|
writer: wlog.NewWriter(w),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type telegrafLog struct {
|
||||||
|
writer io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *telegrafLog) Write(p []byte) (n int, err error) {
|
||||||
|
return t.writer.Write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetupLogging configures the logging output.
|
||||||
|
// debug will set the log level to DEBUG
|
||||||
|
// quiet will set the log level to ERROR
|
||||||
|
// logfile will direct the logging output to a file. Empty string is
|
||||||
|
// interpreted as stdout. If there is an error opening the file the
|
||||||
|
// logger will fallback to stdout.
|
||||||
|
func SetupLogging(debug, quiet bool, logfile string) {
|
||||||
|
if debug {
|
||||||
|
wlog.SetLevel(wlog.DEBUG)
|
||||||
|
}
|
||||||
|
if quiet {
|
||||||
|
wlog.SetLevel(wlog.ERROR)
|
||||||
|
}
|
||||||
|
|
||||||
|
var oFile *os.File
|
||||||
|
if logfile != "" {
|
||||||
|
if _, err := os.Stat(logfile); os.IsNotExist(err) {
|
||||||
|
if oFile, err = os.Create(logfile); err != nil {
|
||||||
|
log.Printf("E! Unable to create %s (%s), using stdout", logfile, err)
|
||||||
|
oFile = os.Stdout
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if oFile, err = os.OpenFile(logfile, os.O_APPEND|os.O_WRONLY, os.ModeAppend); err != nil {
|
||||||
|
log.Printf("E! Unable to append to %s (%s), using stdout", logfile, err)
|
||||||
|
oFile = os.Stdout
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
oFile = os.Stdout
|
||||||
|
}
|
||||||
|
|
||||||
|
log.SetOutput(newTelegrafWriter(oFile))
|
||||||
|
}
|
|
@ -88,7 +88,7 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fields[strings.Replace(k, "-", "_", -1)] = val
|
fields[strings.Replace(k, "-", "_", -1)] = val
|
||||||
} else {
|
} else {
|
||||||
log.Printf("skipping aerospike field %v with int64 overflow", k)
|
log.Printf("I! skipping aerospike field %v with int64 overflow", k)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
acc.AddFields("aerospike_node", fields, tags, time.Now())
|
acc.AddFields("aerospike_node", fields, tags, time.Now())
|
||||||
|
@ -121,7 +121,7 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
|
||||||
if err == nil {
|
if err == nil {
|
||||||
nFields[strings.Replace(parts[0], "-", "_", -1)] = val
|
nFields[strings.Replace(parts[0], "-", "_", -1)] = val
|
||||||
} else {
|
} else {
|
||||||
log.Printf("skipping aerospike field %v with int64 overflow", parts[0])
|
log.Printf("I! skipping aerospike field %v with int64 overflow", parts[0])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
acc.AddFields("aerospike_namespace", nFields, nTags, time.Now())
|
acc.AddFields("aerospike_namespace", nFields, nTags, time.Now())
|
||||||
|
|
|
@ -274,7 +274,7 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
|
||||||
m = newCassandraMetric(serverTokens["host"], metric, acc)
|
m = newCassandraMetric(serverTokens["host"], metric, acc)
|
||||||
} else {
|
} else {
|
||||||
// unsupported metric type
|
// unsupported metric type
|
||||||
log.Printf("Unsupported Cassandra metric [%s], skipping",
|
log.Printf("I! Unsupported Cassandra metric [%s], skipping",
|
||||||
metric)
|
metric)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -100,12 +100,12 @@ func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error {
|
||||||
for _, s := range sockets {
|
for _, s := range sockets {
|
||||||
dump, err := perfDump(c.CephBinary, s)
|
dump, err := perfDump(c.CephBinary, s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("error reading from socket '%s': %v", s.socket, err)
|
log.Printf("E! error reading from socket '%s': %v", s.socket, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
data, err := parseDump(dump)
|
data, err := parseDump(dump)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("error parsing dump from socket '%s': %v", s.socket, err)
|
log.Printf("E! error parsing dump from socket '%s': %v", s.socket, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for tag, metrics := range *data {
|
for tag, metrics := range *data {
|
||||||
|
@ -293,7 +293,7 @@ func flatten(data interface{}) []*metric {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
log.Printf("Ignoring unexpected type '%T' for value %v", val, val)
|
log.Printf("I! Ignoring unexpected type '%T' for value %v", val, val)
|
||||||
}
|
}
|
||||||
|
|
||||||
return metrics
|
return metrics
|
||||||
|
|
|
@ -93,13 +93,14 @@ func (c *Conntrack) Gather(acc telegraf.Accumulator) error {
|
||||||
|
|
||||||
contents, err := ioutil.ReadFile(fName)
|
contents, err := ioutil.ReadFile(fName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("failed to read file '%s': %v", fName, err)
|
log.Printf("E! failed to read file '%s': %v", fName, err)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
v := strings.TrimSpace(string(contents))
|
v := strings.TrimSpace(string(contents))
|
||||||
fields[metricKey], err = strconv.ParseFloat(v, 64)
|
fields[metricKey], err = strconv.ParseFloat(v, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("failed to parse metric, expected number but "+
|
log.Printf("E! failed to parse metric, expected number but "+
|
||||||
" found '%s': %v", v, err)
|
" found '%s': %v", v, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -126,7 +126,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
err := d.gatherContainer(c, acc)
|
err := d.gatherContainer(c, acc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error gathering container %s stats: %s\n",
|
log.Printf("E! Error gathering container %s stats: %s\n",
|
||||||
c.Names, err.Error())
|
c.Names, err.Error())
|
||||||
}
|
}
|
||||||
}(container)
|
}(container)
|
||||||
|
|
|
@ -74,7 +74,7 @@ func (t *HttpListener) Start(acc telegraf.Accumulator) error {
|
||||||
|
|
||||||
go t.httpListen()
|
go t.httpListen()
|
||||||
|
|
||||||
log.Printf("Started HTTP listener service on %s\n", t.ServiceAddress)
|
log.Printf("I! Started HTTP listener service on %s\n", t.ServiceAddress)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -89,7 +89,7 @@ func (t *HttpListener) Stop() {
|
||||||
|
|
||||||
t.wg.Wait()
|
t.wg.Wait()
|
||||||
|
|
||||||
log.Println("Stopped HTTP listener service on ", t.ServiceAddress)
|
log.Println("I! Stopped HTTP listener service on ", t.ServiceAddress)
|
||||||
}
|
}
|
||||||
|
|
||||||
// httpListen listens for HTTP requests.
|
// httpListen listens for HTTP requests.
|
||||||
|
|
|
@ -90,7 +90,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error {
|
||||||
case "newest":
|
case "newest":
|
||||||
config.Offsets.Initial = sarama.OffsetNewest
|
config.Offsets.Initial = sarama.OffsetNewest
|
||||||
default:
|
default:
|
||||||
log.Printf("WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n",
|
log.Printf("I! WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n",
|
||||||
k.Offset)
|
k.Offset)
|
||||||
config.Offsets.Initial = sarama.OffsetOldest
|
config.Offsets.Initial = sarama.OffsetOldest
|
||||||
}
|
}
|
||||||
|
@ -115,7 +115,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error {
|
||||||
|
|
||||||
// Start the kafka message reader
|
// Start the kafka message reader
|
||||||
go k.receiver()
|
go k.receiver()
|
||||||
log.Printf("Started the kafka consumer service, peers: %v, topics: %v\n",
|
log.Printf("I! Started the kafka consumer service, peers: %v, topics: %v\n",
|
||||||
k.ZookeeperPeers, k.Topics)
|
k.ZookeeperPeers, k.Topics)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -129,12 +129,12 @@ func (k *Kafka) receiver() {
|
||||||
return
|
return
|
||||||
case err := <-k.errs:
|
case err := <-k.errs:
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Kafka Consumer Error: %s\n", err)
|
log.Printf("E! Kafka Consumer Error: %s\n", err)
|
||||||
}
|
}
|
||||||
case msg := <-k.in:
|
case msg := <-k.in:
|
||||||
metrics, err := k.parser.Parse(msg.Value)
|
metrics, err := k.parser.Parse(msg.Value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("KAFKA PARSE ERROR\nmessage: %s\nerror: %s",
|
log.Printf("E! Kafka Message Parse Error\nmessage: %s\nerror: %s",
|
||||||
string(msg.Value), err.Error())
|
string(msg.Value), err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -158,7 +158,7 @@ func (k *Kafka) Stop() {
|
||||||
defer k.Unlock()
|
defer k.Unlock()
|
||||||
close(k.done)
|
close(k.done)
|
||||||
if err := k.Consumer.Close(); err != nil {
|
if err := k.Consumer.Close(); err != nil {
|
||||||
log.Printf("Error closing kafka consumer: %s\n", err.Error())
|
log.Printf("E! Error closing kafka consumer: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -202,21 +202,21 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
||||||
case INT:
|
case INT:
|
||||||
iv, err := strconv.ParseInt(v, 10, 64)
|
iv, err := strconv.ParseInt(v, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("ERROR parsing %s to int: %s", v, err)
|
log.Printf("E! Error parsing %s to int: %s", v, err)
|
||||||
} else {
|
} else {
|
||||||
fields[k] = iv
|
fields[k] = iv
|
||||||
}
|
}
|
||||||
case FLOAT:
|
case FLOAT:
|
||||||
fv, err := strconv.ParseFloat(v, 64)
|
fv, err := strconv.ParseFloat(v, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("ERROR parsing %s to float: %s", v, err)
|
log.Printf("E! Error parsing %s to float: %s", v, err)
|
||||||
} else {
|
} else {
|
||||||
fields[k] = fv
|
fields[k] = fv
|
||||||
}
|
}
|
||||||
case DURATION:
|
case DURATION:
|
||||||
d, err := time.ParseDuration(v)
|
d, err := time.ParseDuration(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("ERROR parsing %s to duration: %s", v, err)
|
log.Printf("E! Error parsing %s to duration: %s", v, err)
|
||||||
} else {
|
} else {
|
||||||
fields[k] = int64(d)
|
fields[k] = int64(d)
|
||||||
}
|
}
|
||||||
|
@ -227,14 +227,14 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
||||||
case EPOCH:
|
case EPOCH:
|
||||||
iv, err := strconv.ParseInt(v, 10, 64)
|
iv, err := strconv.ParseInt(v, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("ERROR parsing %s to int: %s", v, err)
|
log.Printf("E! Error parsing %s to int: %s", v, err)
|
||||||
} else {
|
} else {
|
||||||
timestamp = time.Unix(iv, 0)
|
timestamp = time.Unix(iv, 0)
|
||||||
}
|
}
|
||||||
case EPOCH_NANO:
|
case EPOCH_NANO:
|
||||||
iv, err := strconv.ParseInt(v, 10, 64)
|
iv, err := strconv.ParseInt(v, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("ERROR parsing %s to int: %s", v, err)
|
log.Printf("E! Error parsing %s to int: %s", v, err)
|
||||||
} else {
|
} else {
|
||||||
timestamp = time.Unix(0, iv)
|
timestamp = time.Unix(0, iv)
|
||||||
}
|
}
|
||||||
|
@ -265,7 +265,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
||||||
// if we still haven't found a timestamp layout, log it and we will
|
// if we still haven't found a timestamp layout, log it and we will
|
||||||
// just use time.Now()
|
// just use time.Now()
|
||||||
if !foundTs {
|
if !foundTs {
|
||||||
log.Printf("ERROR parsing timestamp [%s], could not find any "+
|
log.Printf("E! Error parsing timestamp [%s], could not find any "+
|
||||||
"suitable time layouts.", v)
|
"suitable time layouts.", v)
|
||||||
}
|
}
|
||||||
case DROP:
|
case DROP:
|
||||||
|
@ -275,7 +275,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
timestamp = ts
|
timestamp = ts
|
||||||
} else {
|
} else {
|
||||||
log.Printf("ERROR parsing %s to time layout [%s]: %s", v, t, err)
|
log.Printf("E! Error parsing %s to time layout [%s]: %s", v, t, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -134,7 +134,7 @@ func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error {
|
||||||
for _, filepath := range l.Files {
|
for _, filepath := range l.Files {
|
||||||
g, err := globpath.Compile(filepath)
|
g, err := globpath.Compile(filepath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("ERROR Glob %s failed to compile, %s", filepath, err)
|
log.Printf("E! Error Glob %s failed to compile, %s", filepath, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
files := g.Match()
|
files := g.Match()
|
||||||
|
@ -167,7 +167,7 @@ func (l *LogParserPlugin) receiver(tailer *tail.Tail) {
|
||||||
var line *tail.Line
|
var line *tail.Line
|
||||||
for line = range tailer.Lines {
|
for line = range tailer.Lines {
|
||||||
if line.Err != nil {
|
if line.Err != nil {
|
||||||
log.Printf("ERROR tailing file %s, Error: %s\n",
|
log.Printf("E! Error tailing file %s, Error: %s\n",
|
||||||
tailer.Filename, line.Err)
|
tailer.Filename, line.Err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -216,7 +216,7 @@ func (l *LogParserPlugin) Stop() {
|
||||||
for _, t := range l.tailers {
|
for _, t := range l.tailers {
|
||||||
err := t.Stop()
|
err := t.Stop()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("ERROR stopping tail on file %s\n", t.Filename)
|
log.Printf("E! Error stopping tail on file %s\n", t.Filename)
|
||||||
}
|
}
|
||||||
t.Cleanup()
|
t.Cleanup()
|
||||||
}
|
}
|
||||||
|
|
|
@ -134,7 +134,7 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) {
|
||||||
req.URL.RawQuery = params.String()
|
req.URL.RawQuery = params.String()
|
||||||
req.Header.Set("User-Agent", "Telegraf-MailChimp-Plugin")
|
req.Header.Set("User-Agent", "Telegraf-MailChimp-Plugin")
|
||||||
if api.Debug {
|
if api.Debug {
|
||||||
log.Printf("Request URL: %s", req.URL.String())
|
log.Printf("D! Request URL: %s", req.URL.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := client.Do(req)
|
resp, err := client.Do(req)
|
||||||
|
@ -148,7 +148,7 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if api.Debug {
|
if api.Debug {
|
||||||
log.Printf("Response Body:%s", string(body))
|
log.Printf("D! Response Body:%s", string(body))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = chimpErrorCheck(body); err != nil {
|
if err = chimpErrorCheck(body); err != nil {
|
||||||
|
|
|
@ -88,7 +88,7 @@ func (m *Mesos) SetDefaults() {
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.Timeout == 0 {
|
if m.Timeout == 0 {
|
||||||
log.Println("[mesos] Missing timeout value, setting default value (100ms)")
|
log.Println("I! [mesos] Missing timeout value, setting default value (100ms)")
|
||||||
m.Timeout = 100
|
m.Timeout = 100
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -383,7 +383,7 @@ func getMetrics(role Role, group string) []string {
|
||||||
ret, ok := m[group]
|
ret, ok := m[group]
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Printf("[mesos] Unkown %s metrics group: %s\n", role, group)
|
log.Printf("I! [mesos] Unkown %s metrics group: %s\n", role, group)
|
||||||
return []string{}
|
return []string{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -47,7 +47,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error
|
||||||
},
|
},
|
||||||
}, result_repl)
|
}, result_repl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("Not gathering replica set status, member not in replica set (" + err.Error() + ")")
|
log.Println("E! Not gathering replica set status, member not in replica set (" + err.Error() + ")")
|
||||||
}
|
}
|
||||||
|
|
||||||
jumbo_chunks, _ := s.Session.DB("config").C("chunks").Find(bson.M{"jumbo": true}).Count()
|
jumbo_chunks, _ := s.Session.DB("config").C("chunks").Find(bson.M{"jumbo": true}).Count()
|
||||||
|
@ -62,7 +62,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error
|
||||||
names := []string{}
|
names := []string{}
|
||||||
names, err = s.Session.DatabaseNames()
|
names, err = s.Session.DatabaseNames()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("Error getting database names (" + err.Error() + ")")
|
log.Println("E! Error getting database names (" + err.Error() + ")")
|
||||||
}
|
}
|
||||||
for _, db_name := range names {
|
for _, db_name := range names {
|
||||||
db_stat_line := &DbStatsData{}
|
db_stat_line := &DbStatsData{}
|
||||||
|
@ -73,7 +73,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error
|
||||||
},
|
},
|
||||||
}, db_stat_line)
|
}, db_stat_line)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("Error getting db stats from " + db_name + "(" + err.Error() + ")")
|
log.Println("E! Error getting db stats from " + db_name + "(" + err.Error() + ")")
|
||||||
}
|
}
|
||||||
db := &Db{
|
db := &Db{
|
||||||
Name: db_name,
|
Name: db_name,
|
||||||
|
|
|
@ -133,7 +133,7 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *MQTTConsumer) onConnect(c mqtt.Client) {
|
func (m *MQTTConsumer) onConnect(c mqtt.Client) {
|
||||||
log.Printf("MQTT Client Connected")
|
log.Printf("I! MQTT Client Connected")
|
||||||
if !m.PersistentSession || !m.started {
|
if !m.PersistentSession || !m.started {
|
||||||
topics := make(map[string]byte)
|
topics := make(map[string]byte)
|
||||||
for _, topic := range m.Topics {
|
for _, topic := range m.Topics {
|
||||||
|
@ -142,7 +142,7 @@ func (m *MQTTConsumer) onConnect(c mqtt.Client) {
|
||||||
subscribeToken := c.SubscribeMultiple(topics, m.recvMessage)
|
subscribeToken := c.SubscribeMultiple(topics, m.recvMessage)
|
||||||
subscribeToken.Wait()
|
subscribeToken.Wait()
|
||||||
if subscribeToken.Error() != nil {
|
if subscribeToken.Error() != nil {
|
||||||
log.Printf("MQTT SUBSCRIBE ERROR\ntopics: %s\nerror: %s",
|
log.Printf("E! MQTT Subscribe Error\ntopics: %s\nerror: %s",
|
||||||
strings.Join(m.Topics[:], ","), subscribeToken.Error())
|
strings.Join(m.Topics[:], ","), subscribeToken.Error())
|
||||||
}
|
}
|
||||||
m.started = true
|
m.started = true
|
||||||
|
@ -151,7 +151,7 @@ func (m *MQTTConsumer) onConnect(c mqtt.Client) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MQTTConsumer) onConnectionLost(c mqtt.Client, err error) {
|
func (m *MQTTConsumer) onConnectionLost(c mqtt.Client, err error) {
|
||||||
log.Printf("MQTT Connection lost\nerror: %s\nMQTT Client will try to reconnect", err.Error())
|
log.Printf("E! MQTT Connection lost\nerror: %s\nMQTT Client will try to reconnect", err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -166,7 +166,7 @@ func (m *MQTTConsumer) receiver() {
|
||||||
topic := msg.Topic()
|
topic := msg.Topic()
|
||||||
metrics, err := m.parser.Parse(msg.Payload())
|
metrics, err := m.parser.Parse(msg.Payload())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("MQTT PARSE ERROR\nmessage: %s\nerror: %s",
|
log.Printf("E! MQTT Parse Error\nmessage: %s\nerror: %s",
|
||||||
string(msg.Payload()), err.Error())
|
string(msg.Payload()), err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -119,7 +119,7 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error {
|
||||||
|
|
||||||
// Start the message reader
|
// Start the message reader
|
||||||
go n.receiver()
|
go n.receiver()
|
||||||
log.Printf("Started the NATS consumer service, nats: %v, subjects: %v, queue: %v\n",
|
log.Printf("I! Started the NATS consumer service, nats: %v, subjects: %v, queue: %v\n",
|
||||||
n.Conn.ConnectedUrl(), n.Subjects, n.QueueGroup)
|
n.Conn.ConnectedUrl(), n.Subjects, n.QueueGroup)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -134,11 +134,11 @@ func (n *natsConsumer) receiver() {
|
||||||
case <-n.done:
|
case <-n.done:
|
||||||
return
|
return
|
||||||
case err := <-n.errs:
|
case err := <-n.errs:
|
||||||
log.Printf("error reading from %s\n", err.Error())
|
log.Printf("E! error reading from %s\n", err.Error())
|
||||||
case msg := <-n.in:
|
case msg := <-n.in:
|
||||||
metrics, err := n.parser.Parse(msg.Data)
|
metrics, err := n.parser.Parse(msg.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("subject: %s, error: %s", msg.Subject, err.Error())
|
log.Printf("E! subject: %s, error: %s", msg.Subject, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, metric := range metrics {
|
for _, metric := range metrics {
|
||||||
|
@ -157,7 +157,7 @@ func (n *natsConsumer) clean() {
|
||||||
|
|
||||||
for _, sub := range n.Subs {
|
for _, sub := range n.Subs {
|
||||||
if err := sub.Unsubscribe(); err != nil {
|
if err := sub.Unsubscribe(); err != nil {
|
||||||
log.Printf("Error unsubscribing from subject %s in queue %s: %s\n",
|
log.Printf("E! Error unsubscribing from subject %s in queue %s: %s\n",
|
||||||
sub.Subject, sub.Queue, err.Error())
|
sub.Subject, sub.Queue, err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -62,7 +62,7 @@ func (n *NSQConsumer) Start(acc telegraf.Accumulator) error {
|
||||||
n.consumer.AddConcurrentHandlers(nsq.HandlerFunc(func(message *nsq.Message) error {
|
n.consumer.AddConcurrentHandlers(nsq.HandlerFunc(func(message *nsq.Message) error {
|
||||||
metrics, err := n.parser.Parse(message.Body)
|
metrics, err := n.parser.Parse(message.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("NSQConsumer Parse Error\nmessage:%s\nerror:%s", string(message.Body), err.Error())
|
log.Printf("E! NSQConsumer Parse Error\nmessage:%s\nerror:%s", string(message.Body), err.Error())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
for _, metric := range metrics {
|
for _, metric := range metrics {
|
||||||
|
|
|
@ -132,7 +132,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
|
||||||
case strings.HasSuffix(when, "h"):
|
case strings.HasSuffix(when, "h"):
|
||||||
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "h"))
|
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "h"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("ERROR ntpq: parsing int: %s", fields[index])
|
log.Printf("E! Error ntpq: parsing int: %s", fields[index])
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// seconds in an hour
|
// seconds in an hour
|
||||||
|
@ -141,7 +141,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
|
||||||
case strings.HasSuffix(when, "d"):
|
case strings.HasSuffix(when, "d"):
|
||||||
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "d"))
|
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "d"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("ERROR ntpq: parsing int: %s", fields[index])
|
log.Printf("E! Error ntpq: parsing int: %s", fields[index])
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// seconds in a day
|
// seconds in a day
|
||||||
|
@ -150,7 +150,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
|
||||||
case strings.HasSuffix(when, "m"):
|
case strings.HasSuffix(when, "m"):
|
||||||
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "m"))
|
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "m"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("ERROR ntpq: parsing int: %s", fields[index])
|
log.Printf("E! Error ntpq: parsing int: %s", fields[index])
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// seconds in a day
|
// seconds in a day
|
||||||
|
@ -161,7 +161,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
|
||||||
|
|
||||||
m, err := strconv.Atoi(fields[index])
|
m, err := strconv.Atoi(fields[index])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("ERROR ntpq: parsing int: %s", fields[index])
|
log.Printf("E! Error ntpq: parsing int: %s", fields[index])
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
mFields[key] = int64(m)
|
mFields[key] = int64(m)
|
||||||
|
@ -178,7 +178,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
|
||||||
|
|
||||||
m, err := strconv.ParseFloat(fields[index], 64)
|
m, err := strconv.ParseFloat(fields[index], 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("ERROR ntpq: parsing float: %s", fields[index])
|
log.Printf("E! Error ntpq: parsing float: %s", fields[index])
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
mFields[key] = m
|
mFields[key] = m
|
||||||
|
|
|
@ -269,9 +269,7 @@ func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumula
|
||||||
fields := make(map[string]interface{})
|
fields := make(map[string]interface{})
|
||||||
COLUMN:
|
COLUMN:
|
||||||
for col, val := range columnMap {
|
for col, val := range columnMap {
|
||||||
if acc.Debug() {
|
log.Printf("D! postgresql_extensible: column: %s = %T: %s\n", col, *val, *val)
|
||||||
log.Printf("postgresql_extensible: column: %s = %T: %s\n", col, *val, *val)
|
|
||||||
}
|
|
||||||
_, ignore := ignoredColumns[col]
|
_, ignore := ignoredColumns[col]
|
||||||
if ignore || *val == nil {
|
if ignore || *val == nil {
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -110,7 +110,7 @@ func parseResponse(metrics string) map[string]interface{} {
|
||||||
|
|
||||||
i, err := strconv.ParseInt(m[1], 10, 64)
|
i, err := strconv.ParseInt(m[1], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("powerdns: Error parsing integer for metric [%s]: %s",
|
log.Printf("E! powerdns: Error parsing integer for metric [%s]: %s",
|
||||||
metric, err)
|
metric, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,7 +66,7 @@ func (_ *Procstat) Description() string {
|
||||||
func (p *Procstat) Gather(acc telegraf.Accumulator) error {
|
func (p *Procstat) Gather(acc telegraf.Accumulator) error {
|
||||||
err := p.createProcesses()
|
err := p.createProcesses()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s",
|
log.Printf("E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s",
|
||||||
p.Exe, p.PidFile, p.Pattern, p.User, err.Error())
|
p.Exe, p.PidFile, p.Pattern, p.User, err.Error())
|
||||||
} else {
|
} else {
|
||||||
for pid, proc := range p.pidmap {
|
for pid, proc := range p.pidmap {
|
||||||
|
|
|
@ -296,7 +296,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
|
||||||
|
|
||||||
data, err := ioutil.ReadFile(s.SnmptranslateFile)
|
data, err := ioutil.ReadFile(s.SnmptranslateFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Reading SNMPtranslate file error: %s", err)
|
log.Printf("E! Reading SNMPtranslate file error: %s", err)
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
for _, line := range strings.Split(string(data), "\n") {
|
for _, line := range strings.Split(string(data), "\n") {
|
||||||
|
@ -394,16 +394,16 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
|
||||||
// only if len(s.OidInstanceMapping) == 0
|
// only if len(s.OidInstanceMapping) == 0
|
||||||
if len(host.OidInstanceMapping) >= 0 {
|
if len(host.OidInstanceMapping) >= 0 {
|
||||||
if err := host.SNMPMap(acc, s.nameToOid, s.subTableMap); err != nil {
|
if err := host.SNMPMap(acc, s.nameToOid, s.subTableMap); err != nil {
|
||||||
log.Printf("SNMP Mapping error for host '%s': %s", host.Address, err)
|
log.Printf("E! SNMP Mapping error for host '%s': %s", host.Address, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Launch Get requests
|
// Launch Get requests
|
||||||
if err := host.SNMPGet(acc, s.initNode); err != nil {
|
if err := host.SNMPGet(acc, s.initNode); err != nil {
|
||||||
log.Printf("SNMP Error for host '%s': %s", host.Address, err)
|
log.Printf("E! SNMP Error for host '%s': %s", host.Address, err)
|
||||||
}
|
}
|
||||||
if err := host.SNMPBulk(acc, s.initNode); err != nil {
|
if err := host.SNMPBulk(acc, s.initNode); err != nil {
|
||||||
log.Printf("SNMP Error for host '%s': %s", host.Address, err)
|
log.Printf("E! SNMP Error for host '%s': %s", host.Address, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -800,7 +800,7 @@ func (h *Host) HandleResponse(
|
||||||
acc.AddFields(field_name, fields, tags)
|
acc.AddFields(field_name, fields, tags)
|
||||||
case gosnmp.NoSuchObject, gosnmp.NoSuchInstance:
|
case gosnmp.NoSuchObject, gosnmp.NoSuchInstance:
|
||||||
// Oid not found
|
// Oid not found
|
||||||
log.Printf("[snmp input] Oid not found: %s", oid_key)
|
log.Printf("E! [snmp input] Oid not found: %s", oid_key)
|
||||||
default:
|
default:
|
||||||
// delete other data
|
// delete other data
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,7 @@ const (
|
||||||
defaultAllowPendingMessage = 10000
|
defaultAllowPendingMessage = 10000
|
||||||
)
|
)
|
||||||
|
|
||||||
var dropwarn = "ERROR: statsd message queue full. " +
|
var dropwarn = "E! Error: statsd message queue full. " +
|
||||||
"We have dropped %d messages so far. " +
|
"We have dropped %d messages so far. " +
|
||||||
"You may want to increase allowed_pending_messages in the config\n"
|
"You may want to increase allowed_pending_messages in the config\n"
|
||||||
|
|
||||||
|
@ -251,7 +251,7 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.ConvertNames {
|
if s.ConvertNames {
|
||||||
log.Printf("WARNING statsd: convert_names config option is deprecated," +
|
log.Printf("I! WARNING statsd: convert_names config option is deprecated," +
|
||||||
" please use metric_separator instead")
|
" please use metric_separator instead")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -264,7 +264,7 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error {
|
||||||
go s.udpListen()
|
go s.udpListen()
|
||||||
// Start the line parser
|
// Start the line parser
|
||||||
go s.parser()
|
go s.parser()
|
||||||
log.Printf("Started the statsd service on %s\n", s.ServiceAddress)
|
log.Printf("I! Started the statsd service on %s\n", s.ServiceAddress)
|
||||||
prevInstance = s
|
prevInstance = s
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -278,7 +278,7 @@ func (s *Statsd) udpListen() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("ERROR: ListenUDP - %s", err)
|
log.Fatalf("ERROR: ListenUDP - %s", err)
|
||||||
}
|
}
|
||||||
log.Println("Statsd listener listening on: ", s.listener.LocalAddr().String())
|
log.Println("I! Statsd listener listening on: ", s.listener.LocalAddr().String())
|
||||||
|
|
||||||
buf := make([]byte, UDP_MAX_PACKET_SIZE)
|
buf := make([]byte, UDP_MAX_PACKET_SIZE)
|
||||||
for {
|
for {
|
||||||
|
@ -288,7 +288,7 @@ func (s *Statsd) udpListen() error {
|
||||||
default:
|
default:
|
||||||
n, _, err := s.listener.ReadFromUDP(buf)
|
n, _, err := s.listener.ReadFromUDP(buf)
|
||||||
if err != nil && !strings.Contains(err.Error(), "closed network") {
|
if err != nil && !strings.Contains(err.Error(), "closed network") {
|
||||||
log.Printf("ERROR READ: %s\n", err.Error())
|
log.Printf("E! Error READ: %s\n", err.Error())
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
bufCopy := make([]byte, n)
|
bufCopy := make([]byte, n)
|
||||||
|
@ -374,7 +374,7 @@ func (s *Statsd) parseStatsdLine(line string) error {
|
||||||
// Validate splitting the line on ":"
|
// Validate splitting the line on ":"
|
||||||
bits := strings.Split(line, ":")
|
bits := strings.Split(line, ":")
|
||||||
if len(bits) < 2 {
|
if len(bits) < 2 {
|
||||||
log.Printf("Error: splitting ':', Unable to parse metric: %s\n", line)
|
log.Printf("E! Error: splitting ':', Unable to parse metric: %s\n", line)
|
||||||
return errors.New("Error Parsing statsd line")
|
return errors.New("Error Parsing statsd line")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -390,11 +390,11 @@ func (s *Statsd) parseStatsdLine(line string) error {
|
||||||
// Validate splitting the bit on "|"
|
// Validate splitting the bit on "|"
|
||||||
pipesplit := strings.Split(bit, "|")
|
pipesplit := strings.Split(bit, "|")
|
||||||
if len(pipesplit) < 2 {
|
if len(pipesplit) < 2 {
|
||||||
log.Printf("Error: splitting '|', Unable to parse metric: %s\n", line)
|
log.Printf("E! Error: splitting '|', Unable to parse metric: %s\n", line)
|
||||||
return errors.New("Error Parsing statsd line")
|
return errors.New("Error Parsing statsd line")
|
||||||
} else if len(pipesplit) > 2 {
|
} else if len(pipesplit) > 2 {
|
||||||
sr := pipesplit[2]
|
sr := pipesplit[2]
|
||||||
errmsg := "Error: parsing sample rate, %s, it must be in format like: " +
|
errmsg := "E! Error: parsing sample rate, %s, it must be in format like: " +
|
||||||
"@0.1, @0.5, etc. Ignoring sample rate for line: %s\n"
|
"@0.1, @0.5, etc. Ignoring sample rate for line: %s\n"
|
||||||
if strings.Contains(sr, "@") && len(sr) > 1 {
|
if strings.Contains(sr, "@") && len(sr) > 1 {
|
||||||
samplerate, err := strconv.ParseFloat(sr[1:], 64)
|
samplerate, err := strconv.ParseFloat(sr[1:], 64)
|
||||||
|
@ -414,14 +414,14 @@ func (s *Statsd) parseStatsdLine(line string) error {
|
||||||
case "g", "c", "s", "ms", "h":
|
case "g", "c", "s", "ms", "h":
|
||||||
m.mtype = pipesplit[1]
|
m.mtype = pipesplit[1]
|
||||||
default:
|
default:
|
||||||
log.Printf("Error: Statsd Metric type %s unsupported", pipesplit[1])
|
log.Printf("E! Error: Statsd Metric type %s unsupported", pipesplit[1])
|
||||||
return errors.New("Error Parsing statsd line")
|
return errors.New("Error Parsing statsd line")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse the value
|
// Parse the value
|
||||||
if strings.HasPrefix(pipesplit[0], "-") || strings.HasPrefix(pipesplit[0], "+") {
|
if strings.HasPrefix(pipesplit[0], "-") || strings.HasPrefix(pipesplit[0], "+") {
|
||||||
if m.mtype != "g" {
|
if m.mtype != "g" {
|
||||||
log.Printf("Error: +- values are only supported for gauges: %s\n", line)
|
log.Printf("E! Error: +- values are only supported for gauges: %s\n", line)
|
||||||
return errors.New("Error Parsing statsd line")
|
return errors.New("Error Parsing statsd line")
|
||||||
}
|
}
|
||||||
m.additive = true
|
m.additive = true
|
||||||
|
@ -431,7 +431,7 @@ func (s *Statsd) parseStatsdLine(line string) error {
|
||||||
case "g", "ms", "h":
|
case "g", "ms", "h":
|
||||||
v, err := strconv.ParseFloat(pipesplit[0], 64)
|
v, err := strconv.ParseFloat(pipesplit[0], 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error: parsing value to float64: %s\n", line)
|
log.Printf("E! Error: parsing value to float64: %s\n", line)
|
||||||
return errors.New("Error Parsing statsd line")
|
return errors.New("Error Parsing statsd line")
|
||||||
}
|
}
|
||||||
m.floatvalue = v
|
m.floatvalue = v
|
||||||
|
@ -441,7 +441,7 @@ func (s *Statsd) parseStatsdLine(line string) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
v2, err2 := strconv.ParseFloat(pipesplit[0], 64)
|
v2, err2 := strconv.ParseFloat(pipesplit[0], 64)
|
||||||
if err2 != nil {
|
if err2 != nil {
|
||||||
log.Printf("Error: parsing value to int64: %s\n", line)
|
log.Printf("E! Error: parsing value to int64: %s\n", line)
|
||||||
return errors.New("Error Parsing statsd line")
|
return errors.New("Error Parsing statsd line")
|
||||||
}
|
}
|
||||||
v = int64(v2)
|
v = int64(v2)
|
||||||
|
@ -641,7 +641,7 @@ func (s *Statsd) aggregate(m metric) {
|
||||||
func (s *Statsd) Stop() {
|
func (s *Statsd) Stop() {
|
||||||
s.Lock()
|
s.Lock()
|
||||||
defer s.Unlock()
|
defer s.Unlock()
|
||||||
log.Println("Stopping the statsd service")
|
log.Println("I! Stopping the statsd service")
|
||||||
close(s.done)
|
close(s.done)
|
||||||
s.listener.Close()
|
s.listener.Close()
|
||||||
s.wg.Wait()
|
s.wg.Wait()
|
||||||
|
|
|
@ -203,7 +203,7 @@ func (s *Sysstat) collect() error {
|
||||||
out, err := internal.CombinedOutputTimeout(cmd, time.Second*time.Duration(collectInterval+parseInterval))
|
out, err := internal.CombinedOutputTimeout(cmd, time.Second*time.Duration(collectInterval+parseInterval))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err := os.Remove(s.tmpFile); err != nil {
|
if err := os.Remove(s.tmpFile); err != nil {
|
||||||
log.Printf("failed to remove tmp file after %s command: %s", strings.Join(cmd.Args, " "), err)
|
log.Printf("E! failed to remove tmp file after %s command: %s", strings.Join(cmd.Args, " "), err)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out))
|
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out))
|
||||||
}
|
}
|
||||||
|
|
|
@ -118,7 +118,7 @@ func (p *Processes) gatherFromPS(fields map[string]interface{}) error {
|
||||||
case '?':
|
case '?':
|
||||||
fields["unknown"] = fields["unknown"].(int64) + int64(1)
|
fields["unknown"] = fields["unknown"].(int64) + int64(1)
|
||||||
default:
|
default:
|
||||||
log.Printf("processes: Unknown state [ %s ] from ps",
|
log.Printf("I! processes: Unknown state [ %s ] from ps",
|
||||||
string(status[0]))
|
string(status[0]))
|
||||||
}
|
}
|
||||||
fields["total"] = fields["total"].(int64) + int64(1)
|
fields["total"] = fields["total"].(int64) + int64(1)
|
||||||
|
@ -169,14 +169,14 @@ func (p *Processes) gatherFromProc(fields map[string]interface{}) error {
|
||||||
case 'W':
|
case 'W':
|
||||||
fields["paging"] = fields["paging"].(int64) + int64(1)
|
fields["paging"] = fields["paging"].(int64) + int64(1)
|
||||||
default:
|
default:
|
||||||
log.Printf("processes: Unknown state [ %s ] in file %s",
|
log.Printf("I! processes: Unknown state [ %s ] in file %s",
|
||||||
string(stats[0][0]), filename)
|
string(stats[0][0]), filename)
|
||||||
}
|
}
|
||||||
fields["total"] = fields["total"].(int64) + int64(1)
|
fields["total"] = fields["total"].(int64) + int64(1)
|
||||||
|
|
||||||
threads, err := strconv.Atoi(string(stats[17]))
|
threads, err := strconv.Atoi(string(stats[17]))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("processes: Error parsing thread count: %s", err)
|
log.Printf("I! processes: Error parsing thread count: %s", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fields["total_threads"] = fields["total_threads"].(int64) + int64(threads)
|
fields["total_threads"] = fields["total_threads"].(int64) + int64(threads)
|
||||||
|
|
|
@ -81,7 +81,7 @@ func (t *Tail) Start(acc telegraf.Accumulator) error {
|
||||||
for _, filepath := range t.Files {
|
for _, filepath := range t.Files {
|
||||||
g, err := globpath.Compile(filepath)
|
g, err := globpath.Compile(filepath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("ERROR Glob %s failed to compile, %s", filepath, err)
|
log.Printf("E! Error Glob %s failed to compile, %s", filepath, err)
|
||||||
}
|
}
|
||||||
for file, _ := range g.Match() {
|
for file, _ := range g.Match() {
|
||||||
tailer, err := tail.TailFile(file,
|
tailer, err := tail.TailFile(file,
|
||||||
|
@ -118,7 +118,7 @@ func (t *Tail) receiver(tailer *tail.Tail) {
|
||||||
var line *tail.Line
|
var line *tail.Line
|
||||||
for line = range tailer.Lines {
|
for line = range tailer.Lines {
|
||||||
if line.Err != nil {
|
if line.Err != nil {
|
||||||
log.Printf("ERROR tailing file %s, Error: %s\n",
|
log.Printf("E! Error tailing file %s, Error: %s\n",
|
||||||
tailer.Filename, err)
|
tailer.Filename, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -126,7 +126,7 @@ func (t *Tail) receiver(tailer *tail.Tail) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
|
t.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Malformed log line in %s: [%s], Error: %s\n",
|
log.Printf("E! Malformed log line in %s: [%s], Error: %s\n",
|
||||||
tailer.Filename, line.Text, err)
|
tailer.Filename, line.Text, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -139,7 +139,7 @@ func (t *Tail) Stop() {
|
||||||
for _, t := range t.tailers {
|
for _, t := range t.tailers {
|
||||||
err := t.Stop()
|
err := t.Stop()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("ERROR stopping tail on file %s\n", t.Filename)
|
log.Printf("E! Error stopping tail on file %s\n", t.Filename)
|
||||||
}
|
}
|
||||||
t.Cleanup()
|
t.Cleanup()
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,11 +43,11 @@ type TcpListener struct {
|
||||||
acc telegraf.Accumulator
|
acc telegraf.Accumulator
|
||||||
}
|
}
|
||||||
|
|
||||||
var dropwarn = "ERROR: tcp_listener message queue full. " +
|
var dropwarn = "E! Error: tcp_listener message queue full. " +
|
||||||
"We have dropped %d messages so far. " +
|
"We have dropped %d messages so far. " +
|
||||||
"You may want to increase allowed_pending_messages in the config\n"
|
"You may want to increase allowed_pending_messages in the config\n"
|
||||||
|
|
||||||
var malformedwarn = "WARNING: tcp_listener has received %d malformed packets" +
|
var malformedwarn = "E! tcp_listener has received %d malformed packets" +
|
||||||
" thus far."
|
" thus far."
|
||||||
|
|
||||||
const sampleConfig = `
|
const sampleConfig = `
|
||||||
|
@ -108,13 +108,13 @@ func (t *TcpListener) Start(acc telegraf.Accumulator) error {
|
||||||
log.Fatalf("ERROR: ListenUDP - %s", err)
|
log.Fatalf("ERROR: ListenUDP - %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Println("TCP server listening on: ", t.listener.Addr().String())
|
log.Println("I! TCP server listening on: ", t.listener.Addr().String())
|
||||||
|
|
||||||
t.wg.Add(2)
|
t.wg.Add(2)
|
||||||
go t.tcpListen()
|
go t.tcpListen()
|
||||||
go t.tcpParser()
|
go t.tcpParser()
|
||||||
|
|
||||||
log.Printf("Started TCP listener service on %s\n", t.ServiceAddress)
|
log.Printf("I! Started TCP listener service on %s\n", t.ServiceAddress)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,7 +141,7 @@ func (t *TcpListener) Stop() {
|
||||||
|
|
||||||
t.wg.Wait()
|
t.wg.Wait()
|
||||||
close(t.in)
|
close(t.in)
|
||||||
log.Println("Stopped TCP listener service on ", t.ServiceAddress)
|
log.Println("I! Stopped TCP listener service on ", t.ServiceAddress)
|
||||||
}
|
}
|
||||||
|
|
||||||
// tcpListen listens for incoming TCP connections.
|
// tcpListen listens for incoming TCP connections.
|
||||||
|
@ -182,8 +182,8 @@ func (t *TcpListener) refuser(conn *net.TCPConn) {
|
||||||
" reached, closing.\nYou may want to increase max_tcp_connections in"+
|
" reached, closing.\nYou may want to increase max_tcp_connections in"+
|
||||||
" the Telegraf tcp listener configuration.\n", t.MaxTCPConnections)
|
" the Telegraf tcp listener configuration.\n", t.MaxTCPConnections)
|
||||||
conn.Close()
|
conn.Close()
|
||||||
log.Printf("Refused TCP Connection from %s", conn.RemoteAddr())
|
log.Printf("I! Refused TCP Connection from %s", conn.RemoteAddr())
|
||||||
log.Printf("WARNING: Maximum TCP Connections reached, you may want to" +
|
log.Printf("I! WARNING: Maximum TCP Connections reached, you may want to" +
|
||||||
" adjust max_tcp_connections")
|
" adjust max_tcp_connections")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -42,11 +42,11 @@ type UdpListener struct {
|
||||||
// https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure
|
// https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure
|
||||||
const UDP_MAX_PACKET_SIZE int = 64 * 1024
|
const UDP_MAX_PACKET_SIZE int = 64 * 1024
|
||||||
|
|
||||||
var dropwarn = "ERROR: udp_listener message queue full. " +
|
var dropwarn = "E! Error: udp_listener message queue full. " +
|
||||||
"We have dropped %d messages so far. " +
|
"We have dropped %d messages so far. " +
|
||||||
"You may want to increase allowed_pending_messages in the config\n"
|
"You may want to increase allowed_pending_messages in the config\n"
|
||||||
|
|
||||||
var malformedwarn = "WARNING: udp_listener has received %d malformed packets" +
|
var malformedwarn = "E! udp_listener has received %d malformed packets" +
|
||||||
" thus far."
|
" thus far."
|
||||||
|
|
||||||
const sampleConfig = `
|
const sampleConfig = `
|
||||||
|
@ -94,7 +94,7 @@ func (u *UdpListener) Start(acc telegraf.Accumulator) error {
|
||||||
go u.udpListen()
|
go u.udpListen()
|
||||||
go u.udpParser()
|
go u.udpParser()
|
||||||
|
|
||||||
log.Printf("Started UDP listener service on %s\n", u.ServiceAddress)
|
log.Printf("I! Started UDP listener service on %s\n", u.ServiceAddress)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -105,7 +105,7 @@ func (u *UdpListener) Stop() {
|
||||||
u.wg.Wait()
|
u.wg.Wait()
|
||||||
u.listener.Close()
|
u.listener.Close()
|
||||||
close(u.in)
|
close(u.in)
|
||||||
log.Println("Stopped UDP listener service on ", u.ServiceAddress)
|
log.Println("I! Stopped UDP listener service on ", u.ServiceAddress)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *UdpListener) udpListen() error {
|
func (u *UdpListener) udpListen() error {
|
||||||
|
@ -116,7 +116,7 @@ func (u *UdpListener) udpListen() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("ERROR: ListenUDP - %s", err)
|
log.Fatalf("ERROR: ListenUDP - %s", err)
|
||||||
}
|
}
|
||||||
log.Println("UDP server listening on: ", u.listener.LocalAddr().String())
|
log.Println("I! UDP server listening on: ", u.listener.LocalAddr().String())
|
||||||
|
|
||||||
buf := make([]byte, UDP_MAX_PACKET_SIZE)
|
buf := make([]byte, UDP_MAX_PACKET_SIZE)
|
||||||
for {
|
for {
|
||||||
|
@ -129,7 +129,7 @@ func (u *UdpListener) udpListen() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err, ok := err.(net.Error); ok && err.Timeout() {
|
if err, ok := err.(net.Error); ok && err.Timeout() {
|
||||||
} else {
|
} else {
|
||||||
log.Printf("ERROR: %s\n", err.Error())
|
log.Printf("E! Error: %s\n", err.Error())
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,7 @@ type FilestackWebhook struct {
|
||||||
func (fs *FilestackWebhook) Register(router *mux.Router, acc telegraf.Accumulator) {
|
func (fs *FilestackWebhook) Register(router *mux.Router, acc telegraf.Accumulator) {
|
||||||
router.HandleFunc(fs.Path, fs.eventHandler).Methods("POST")
|
router.HandleFunc(fs.Path, fs.eventHandler).Methods("POST")
|
||||||
|
|
||||||
log.Printf("Started the webhooks_filestack on %s\n", fs.Path)
|
log.Printf("I! Started the webhooks_filestack on %s\n", fs.Path)
|
||||||
fs.acc = acc
|
fs.acc = acc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ type GithubWebhook struct {
|
||||||
|
|
||||||
func (gh *GithubWebhook) Register(router *mux.Router, acc telegraf.Accumulator) {
|
func (gh *GithubWebhook) Register(router *mux.Router, acc telegraf.Accumulator) {
|
||||||
router.HandleFunc(gh.Path, gh.eventHandler).Methods("POST")
|
router.HandleFunc(gh.Path, gh.eventHandler).Methods("POST")
|
||||||
log.Printf("Started the webhooks_github on %s\n", gh.Path)
|
log.Printf("I! Started the webhooks_github on %s\n", gh.Path)
|
||||||
gh.acc = acc
|
gh.acc = acc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ func (e *newEventError) Error() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEvent(data []byte, name string) (Event, error) {
|
func NewEvent(data []byte, name string) (Event, error) {
|
||||||
log.Printf("New %v event received", name)
|
log.Printf("D! New %v event received", name)
|
||||||
switch name {
|
switch name {
|
||||||
case "commit_comment":
|
case "commit_comment":
|
||||||
return generateEvent(data, &CommitCommentEvent{})
|
return generateEvent(data, &CommitCommentEvent{})
|
||||||
|
|
|
@ -21,7 +21,7 @@ func (md *MandrillWebhook) Register(router *mux.Router, acc telegraf.Accumulator
|
||||||
router.HandleFunc(md.Path, md.returnOK).Methods("HEAD")
|
router.HandleFunc(md.Path, md.returnOK).Methods("HEAD")
|
||||||
router.HandleFunc(md.Path, md.eventHandler).Methods("POST")
|
router.HandleFunc(md.Path, md.eventHandler).Methods("POST")
|
||||||
|
|
||||||
log.Printf("Started the webhooks_mandrill on %s\n", md.Path)
|
log.Printf("I! Started the webhooks_mandrill on %s\n", md.Path)
|
||||||
md.acc = acc
|
md.acc = acc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ type RollbarWebhook struct {
|
||||||
|
|
||||||
func (rb *RollbarWebhook) Register(router *mux.Router, acc telegraf.Accumulator) {
|
func (rb *RollbarWebhook) Register(router *mux.Router, acc telegraf.Accumulator) {
|
||||||
router.HandleFunc(rb.Path, rb.eventHandler).Methods("POST")
|
router.HandleFunc(rb.Path, rb.eventHandler).Methods("POST")
|
||||||
log.Printf("Started the webhooks_rollbar on %s\n", rb.Path)
|
log.Printf("I! Started the webhooks_rollbar on %s\n", rb.Path)
|
||||||
rb.acc = acc
|
rb.acc = acc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -73,7 +73,7 @@ func (wb *Webhooks) Listen(acc telegraf.Accumulator) {
|
||||||
|
|
||||||
err := http.ListenAndServe(fmt.Sprintf("%s", wb.ServiceAddress), r)
|
err := http.ListenAndServe(fmt.Sprintf("%s", wb.ServiceAddress), r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error starting server: %v", err)
|
log.Printf("E! Error starting server: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,10 +100,10 @@ func (wb *Webhooks) AvailableWebhooks() []Webhook {
|
||||||
|
|
||||||
func (wb *Webhooks) Start(acc telegraf.Accumulator) error {
|
func (wb *Webhooks) Start(acc telegraf.Accumulator) error {
|
||||||
go wb.Listen(acc)
|
go wb.Listen(acc)
|
||||||
log.Printf("Started the webhooks service on %s\n", wb.ServiceAddress)
|
log.Printf("I! Started the webhooks service on %s\n", wb.ServiceAddress)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rb *Webhooks) Stop() {
|
func (rb *Webhooks) Stop() {
|
||||||
log.Println("Stopping the Webhooks service")
|
log.Println("I! Stopping the Webhooks service")
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,7 +73,7 @@ func (a *Amon) Write(metrics []telegraf.Metric) error {
|
||||||
metricCounter++
|
metricCounter++
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Printf("unable to build Metric for %s, skipping\n", m.Name())
|
log.Printf("I! unable to build Metric for %s, skipping\n", m.Name())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -153,10 +153,10 @@ func (q *AMQP) Connect() error {
|
||||||
}
|
}
|
||||||
q.channel = channel
|
q.channel = channel
|
||||||
go func() {
|
go func() {
|
||||||
log.Printf("Closing: %s", <-connection.NotifyClose(make(chan *amqp.Error)))
|
log.Printf("I! Closing: %s", <-connection.NotifyClose(make(chan *amqp.Error)))
|
||||||
log.Printf("Trying to reconnect")
|
log.Printf("I! Trying to reconnect")
|
||||||
for err := q.Connect(); err != nil; err = q.Connect() {
|
for err := q.Connect(); err != nil; err = q.Connect() {
|
||||||
log.Println(err)
|
log.Println("E! ", err.Error())
|
||||||
time.Sleep(10 * time.Second)
|
time.Sleep(10 * time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -80,7 +80,7 @@ func (c *CloudWatch) Connect() error {
|
||||||
_, err := svc.ListMetrics(params) // Try a read-only call to test connection.
|
_, err := svc.ListMetrics(params) // Try a read-only call to test connection.
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("cloudwatch: Error in ListMetrics API call : %+v \n", err.Error())
|
log.Printf("E! cloudwatch: Error in ListMetrics API call : %+v \n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
c.svc = svc
|
c.svc = svc
|
||||||
|
@ -131,7 +131,7 @@ func (c *CloudWatch) WriteToCloudWatch(datums []*cloudwatch.MetricDatum) error {
|
||||||
_, err := c.svc.PutMetricData(params)
|
_, err := c.svc.PutMetricData(params)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("CloudWatch: Unable to write to CloudWatch : %+v \n", err.Error())
|
log.Printf("E! CloudWatch: Unable to write to CloudWatch : %+v \n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -92,7 +92,7 @@ func (d *Datadog) Write(metrics []telegraf.Metric) error {
|
||||||
metricCounter++
|
metricCounter++
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Printf("unable to build Metric for %s, skipping\n", m.Name())
|
log.Printf("I! unable to build Metric for %s, skipping\n", m.Name())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -85,7 +85,7 @@ func (g *Graphite) Write(metrics []telegraf.Metric) error {
|
||||||
for _, metric := range metrics {
|
for _, metric := range metrics {
|
||||||
gMetrics, err := s.Serialize(metric)
|
gMetrics, err := s.Serialize(metric)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error serializing some metrics to graphite: %s", err.Error())
|
log.Printf("E! Error serializing some metrics to graphite: %s", err.Error())
|
||||||
}
|
}
|
||||||
bp = append(bp, gMetrics...)
|
bp = append(bp, gMetrics...)
|
||||||
}
|
}
|
||||||
|
@ -102,7 +102,7 @@ func (g *Graphite) Write(metrics []telegraf.Metric) error {
|
||||||
}
|
}
|
||||||
if _, e := g.conns[n].Write([]byte(graphitePoints)); e != nil {
|
if _, e := g.conns[n].Write([]byte(graphitePoints)); e != nil {
|
||||||
// Error
|
// Error
|
||||||
log.Println("ERROR: " + e.Error())
|
log.Println("E! Graphite Error: " + e.Error())
|
||||||
// Let's try the next one
|
// Let's try the next one
|
||||||
} else {
|
} else {
|
||||||
// Success
|
// Success
|
||||||
|
|
|
@ -130,7 +130,7 @@ func (i *InfluxDB) Connect() error {
|
||||||
|
|
||||||
err = createDatabase(c, i.Database)
|
err = createDatabase(c, i.Database)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("Database creation failed: " + err.Error())
|
log.Println("E! Database creation failed: " + err.Error())
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -201,11 +201,11 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error {
|
||||||
for _, n := range p {
|
for _, n := range p {
|
||||||
if e := i.conns[n].Write(bp); e != nil {
|
if e := i.conns[n].Write(bp); e != nil {
|
||||||
// Log write failure
|
// Log write failure
|
||||||
log.Printf("ERROR: %s", e)
|
log.Printf("E! InfluxDB Output Error: %s", e)
|
||||||
// If the database was not found, try to recreate it
|
// If the database was not found, try to recreate it
|
||||||
if strings.Contains(e.Error(), "database not found") {
|
if strings.Contains(e.Error(), "database not found") {
|
||||||
if errc := createDatabase(i.conns[n], i.Database); errc != nil {
|
if errc := createDatabase(i.conns[n], i.Database); errc != nil {
|
||||||
log.Printf("ERROR: Database %s not found and failed to recreate\n",
|
log.Printf("E! Error: Database %s not found and failed to recreate\n",
|
||||||
i.Database)
|
i.Database)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -119,7 +119,7 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error {
|
||||||
|
|
||||||
stats, err := s.Serialize(toSerialize)
|
stats, err := s.Serialize(toSerialize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error serializing a metric to Instrumental: %s", err)
|
log.Printf("E! Error serializing a metric to Instrumental: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch metricType {
|
switch metricType {
|
||||||
|
@ -144,7 +144,7 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error {
|
||||||
if !ValueIncludesBadChar.MatchString(value) {
|
if !ValueIncludesBadChar.MatchString(value) {
|
||||||
points = append(points, fmt.Sprintf("%s %s %s %s", metricType, clean_metric, value, time))
|
points = append(points, fmt.Sprintf("%s %s %s %s", metricType, clean_metric, value, time))
|
||||||
} else if i.Debug {
|
} else if i.Debug {
|
||||||
log.Printf("Unable to send bad stat: %s", stat)
|
log.Printf("E! Instrumental unable to send bad stat: %s", stat)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -152,9 +152,7 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error {
|
||||||
allPoints := strings.Join(points, "\n") + "\n"
|
allPoints := strings.Join(points, "\n") + "\n"
|
||||||
_, err = fmt.Fprintf(i.conn, allPoints)
|
_, err = fmt.Fprintf(i.conn, allPoints)
|
||||||
|
|
||||||
if i.Debug {
|
log.Println("D! Instrumental: " + allPoints)
|
||||||
log.Println(allPoints)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
|
|
|
@ -83,7 +83,7 @@ func (k *KinesisOutput) Connect() error {
|
||||||
// We attempt first to create a session to Kinesis using an IAMS role, if that fails it will fall through to using
|
// We attempt first to create a session to Kinesis using an IAMS role, if that fails it will fall through to using
|
||||||
// environment variables, and then Shared Credentials.
|
// environment variables, and then Shared Credentials.
|
||||||
if k.Debug {
|
if k.Debug {
|
||||||
log.Printf("kinesis: Establishing a connection to Kinesis in %+v", k.Region)
|
log.Printf("E! kinesis: Establishing a connection to Kinesis in %+v", k.Region)
|
||||||
}
|
}
|
||||||
|
|
||||||
credentialConfig := &internalaws.CredentialConfig{
|
credentialConfig := &internalaws.CredentialConfig{
|
||||||
|
@ -105,17 +105,17 @@ func (k *KinesisOutput) Connect() error {
|
||||||
resp, err := svc.ListStreams(KinesisParams)
|
resp, err := svc.ListStreams(KinesisParams)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("kinesis: Error in ListSteams API call : %+v \n", err)
|
log.Printf("E! kinesis: Error in ListSteams API call : %+v \n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if checkstream(resp.StreamNames, k.StreamName) {
|
if checkstream(resp.StreamNames, k.StreamName) {
|
||||||
if k.Debug {
|
if k.Debug {
|
||||||
log.Printf("kinesis: Stream Exists")
|
log.Printf("E! kinesis: Stream Exists")
|
||||||
}
|
}
|
||||||
k.svc = svc
|
k.svc = svc
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
log.Printf("kinesis : You have configured a StreamName %+v which does not exist. exiting.", k.StreamName)
|
log.Printf("E! kinesis : You have configured a StreamName %+v which does not exist. exiting.", k.StreamName)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
@ -147,14 +147,14 @@ func writekinesis(k *KinesisOutput, r []*kinesis.PutRecordsRequestEntry) time.Du
|
||||||
if k.Debug {
|
if k.Debug {
|
||||||
resp, err := k.svc.PutRecords(payload)
|
resp, err := k.svc.PutRecords(payload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("kinesis: Unable to write to Kinesis : %+v \n", err.Error())
|
log.Printf("E! kinesis: Unable to write to Kinesis : %+v \n", err.Error())
|
||||||
}
|
}
|
||||||
log.Printf("%+v \n", resp)
|
log.Printf("E! %+v \n", resp)
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
_, err := k.svc.PutRecords(payload)
|
_, err := k.svc.PutRecords(payload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("kinesis: Unable to write to Kinesis : %+v \n", err.Error())
|
log.Printf("E! kinesis: Unable to write to Kinesis : %+v \n", err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return time.Since(start)
|
return time.Since(start)
|
||||||
|
@ -182,7 +182,7 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error {
|
||||||
if sz == 500 {
|
if sz == 500 {
|
||||||
// Max Messages Per PutRecordRequest is 500
|
// Max Messages Per PutRecordRequest is 500
|
||||||
elapsed := writekinesis(k, r)
|
elapsed := writekinesis(k, r)
|
||||||
log.Printf("Wrote a %+v point batch to Kinesis in %+v.\n", sz, elapsed)
|
log.Printf("E! Wrote a %+v point batch to Kinesis in %+v.\n", sz, elapsed)
|
||||||
atomic.StoreUint32(&sz, 0)
|
atomic.StoreUint32(&sz, 0)
|
||||||
r = nil
|
r = nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -103,15 +103,13 @@ func (l *Librato) Write(metrics []telegraf.Metric) error {
|
||||||
if gauges, err := l.buildGauges(m); err == nil {
|
if gauges, err := l.buildGauges(m); err == nil {
|
||||||
for _, gauge := range gauges {
|
for _, gauge := range gauges {
|
||||||
tempGauges = append(tempGauges, gauge)
|
tempGauges = append(tempGauges, gauge)
|
||||||
if l.Debug {
|
log.Printf("D! Got a gauge: %v\n", gauge)
|
||||||
log.Printf("[DEBUG] Got a gauge: %v\n", gauge)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Printf("unable to build Gauge for %s, skipping\n", m.Name())
|
log.Printf("I! unable to build Gauge for %s, skipping\n", m.Name())
|
||||||
if l.Debug {
|
log.Printf("D! Couldn't build gauge: %v\n", err)
|
||||||
log.Printf("[DEBUG] Couldn't build gauge: %v\n", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,9 +130,7 @@ func (l *Librato) Write(metrics []telegraf.Metric) error {
|
||||||
return fmt.Errorf("unable to marshal Metrics, %s\n", err.Error())
|
return fmt.Errorf("unable to marshal Metrics, %s\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if l.Debug {
|
log.Printf("D! Librato request: %v\n", string(metricsBytes))
|
||||||
log.Printf("[DEBUG] Librato request: %v\n", string(metricsBytes))
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequest(
|
req, err := http.NewRequest(
|
||||||
"POST",
|
"POST",
|
||||||
|
@ -150,9 +146,7 @@ func (l *Librato) Write(metrics []telegraf.Metric) error {
|
||||||
|
|
||||||
resp, err := l.client.Do(req)
|
resp, err := l.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if l.Debug {
|
log.Printf("D! Error POSTing metrics: %v\n", err.Error())
|
||||||
log.Printf("[DEBUG] Error POSTing metrics: %v\n", err.Error())
|
|
||||||
}
|
|
||||||
return fmt.Errorf("error POSTing metrics, %s\n", err.Error())
|
return fmt.Errorf("error POSTing metrics, %s\n", err.Error())
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
@ -160,7 +154,7 @@ func (l *Librato) Write(metrics []telegraf.Metric) error {
|
||||||
if resp.StatusCode != 200 || l.Debug {
|
if resp.StatusCode != 200 || l.Debug {
|
||||||
htmlData, err := ioutil.ReadAll(resp.Body)
|
htmlData, err := ioutil.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("[DEBUG] Couldn't get response! (%v)\n", err)
|
log.Printf("D! Couldn't get response! (%v)\n", err)
|
||||||
}
|
}
|
||||||
if resp.StatusCode != 200 {
|
if resp.StatusCode != 200 {
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
|
@ -168,9 +162,7 @@ func (l *Librato) Write(metrics []telegraf.Metric) error {
|
||||||
resp.StatusCode,
|
resp.StatusCode,
|
||||||
string(htmlData))
|
string(htmlData))
|
||||||
}
|
}
|
||||||
if l.Debug {
|
log.Printf("D! Librato response: %v\n", string(htmlData))
|
||||||
log.Printf("[DEBUG] Librato response: %v\n", string(htmlData))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -226,9 +218,8 @@ func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) {
|
||||||
}
|
}
|
||||||
gauges = append(gauges, gauge)
|
gauges = append(gauges, gauge)
|
||||||
}
|
}
|
||||||
if l.Debug {
|
|
||||||
fmt.Printf("[DEBUG] Built gauges: %v\n", gauges)
|
log.Printf("D! Built gauges: %v\n", gauges)
|
||||||
}
|
|
||||||
return gauges, nil
|
return gauges, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -164,9 +164,11 @@ func (o *openTSDBHttp) flush() error {
|
||||||
|
|
||||||
if resp.StatusCode/100 != 2 {
|
if resp.StatusCode/100 != 2 {
|
||||||
if resp.StatusCode/100 == 4 {
|
if resp.StatusCode/100 == 4 {
|
||||||
log.Printf("WARNING: Received %d status code. Dropping metrics to avoid overflowing buffer.", resp.StatusCode)
|
log.Printf("E! Received %d status code. Dropping metrics to avoid overflowing buffer.",
|
||||||
|
resp.StatusCode)
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("Error when sending metrics.Received status %d", resp.StatusCode)
|
return fmt.Errorf("Error when sending metrics. Received status %d",
|
||||||
|
resp.StatusCode)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -167,7 +167,7 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("ERROR creating prometheus metric, "+
|
log.Printf("E! Error creating prometheus metric, "+
|
||||||
"key: %s, labels: %v,\nerr: %s\n",
|
"key: %s, labels: %v,\nerr: %s\n",
|
||||||
mname, l, err.Error())
|
mname, l, err.Error())
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue