statsd, udp, tcp: do not log every dropped metric.

also applying this change to the udp_listener and tcp_listener input
plugins

closes #1340
This commit is contained in:
Cameron Sparr 2016-06-10 13:28:50 +01:00
parent ea2521bf27
commit 06cb5a041e
4 changed files with 26 additions and 6 deletions

View File

@ -2,6 +2,8 @@
### Features ### Features
- [#1340](https://github.com/influxdata/telegraf/issues/1340): statsd: do not log every dropped metric.
### Bugfixes ### Bugfixes
- [#1330](https://github.com/influxdata/telegraf/issues/1330): Fix exec plugin panic when using single binary. - [#1330](https://github.com/influxdata/telegraf/issues/1330): Fix exec plugin panic when using single binary.

View File

@ -27,7 +27,8 @@ const (
defaultSeparator = "_" defaultSeparator = "_"
) )
var dropwarn = "ERROR: Message queue full. Discarding line [%s] " + var dropwarn = "ERROR: statsd message queue full. " +
"We have dropped %d messages so far. " +
"You may want to increase allowed_pending_messages in the config\n" "You may want to increase allowed_pending_messages in the config\n"
var prevInstance *Statsd var prevInstance *Statsd
@ -65,6 +66,8 @@ type Statsd struct {
sync.Mutex sync.Mutex
wg sync.WaitGroup wg sync.WaitGroup
// drops tracks the number of dropped metrics.
drops int
// Channel for all incoming statsd packets // Channel for all incoming statsd packets
in chan []byte in chan []byte
@ -291,7 +294,10 @@ func (s *Statsd) udpListen() error {
select { select {
case s.in <- bufCopy: case s.in <- bufCopy:
default: default:
log.Printf(dropwarn, string(buf[:n])) s.drops++
if s.drops == 1 || s.drops%s.AllowedPendingMessages == 0 {
log.Printf(dropwarn, s.drops)
}
} }
} }
} }

View File

@ -29,6 +29,8 @@ type TcpListener struct {
// is an available bool in accept, then we are below the maximum and can // is an available bool in accept, then we are below the maximum and can
// accept the connection // accept the connection
accept chan bool accept chan bool
// drops tracks the number of dropped metrics.
drops int
// track the listener here so we can close it in Stop() // track the listener here so we can close it in Stop()
listener *net.TCPListener listener *net.TCPListener
@ -39,7 +41,8 @@ type TcpListener struct {
acc telegraf.Accumulator acc telegraf.Accumulator
} }
var dropwarn = "ERROR: Message queue full. Discarding metric [%s], " + var dropwarn = "ERROR: tcp_listener message queue full. " +
"We have dropped %d messages so far. " +
"You may want to increase allowed_pending_messages in the config\n" "You may want to increase allowed_pending_messages in the config\n"
const sampleConfig = ` const sampleConfig = `
@ -212,7 +215,10 @@ func (t *TcpListener) handler(conn *net.TCPConn, id string) {
select { select {
case t.in <- bufCopy: case t.in <- bufCopy:
default: default:
log.Printf(dropwarn, scanner.Text()) t.drops++
if t.drops == 1 || t.drops%t.AllowedPendingMessages == 0 {
log.Printf(dropwarn, t.drops)
}
} }
} }
} }

View File

@ -25,6 +25,8 @@ type UdpListener struct {
in chan []byte in chan []byte
done chan struct{} done chan struct{}
// drops tracks the number of dropped metrics.
drops int
parser parsers.Parser parser parsers.Parser
@ -38,7 +40,8 @@ type UdpListener struct {
// https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure // https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure
const UDP_MAX_PACKET_SIZE int = 64 * 1024 const UDP_MAX_PACKET_SIZE int = 64 * 1024
var dropwarn = "ERROR: Message queue full. Discarding line [%s] " + var dropwarn = "ERROR: udp_listener message queue full. " +
"We have dropped %d messages so far. " +
"You may want to increase allowed_pending_messages in the config\n" "You may want to increase allowed_pending_messages in the config\n"
const sampleConfig = ` const sampleConfig = `
@ -125,7 +128,10 @@ func (u *UdpListener) udpListen() error {
select { select {
case u.in <- bufCopy: case u.in <- bufCopy:
default: default:
log.Printf(dropwarn, string(bufCopy)) u.drops++
if u.drops == 1 || u.drops%u.AllowedPendingMessages == 0 {
log.Printf(dropwarn, u.drops)
}
} }
} }
} }