2015-09-24 18:06:11 +00:00
|
|
|
package statsd
|
|
|
|
|
|
|
|
import (
|
2017-08-08 18:41:26 +00:00
|
|
|
"bufio"
|
2017-09-25 17:55:02 +00:00
|
|
|
"bytes"
|
2015-10-05 22:19:08 +00:00
|
|
|
"errors"
|
2015-10-06 21:38:16 +00:00
|
|
|
"fmt"
|
2015-09-24 18:06:11 +00:00
|
|
|
"log"
|
|
|
|
"net"
|
2015-10-06 21:38:16 +00:00
|
|
|
"sort"
|
2015-09-24 18:06:11 +00:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
2016-01-28 23:09:41 +00:00
|
|
|
"time"
|
2015-09-24 18:06:11 +00:00
|
|
|
|
2016-02-06 00:36:35 +00:00
|
|
|
"github.com/influxdata/telegraf/plugins/parsers/graphite"
|
2015-10-06 21:38:16 +00:00
|
|
|
|
2016-01-27 21:21:36 +00:00
|
|
|
"github.com/influxdata/telegraf"
|
2017-08-08 18:41:26 +00:00
|
|
|
"github.com/influxdata/telegraf/internal"
|
2016-01-20 18:57:35 +00:00
|
|
|
"github.com/influxdata/telegraf/plugins/inputs"
|
2017-08-08 18:41:26 +00:00
|
|
|
"github.com/influxdata/telegraf/selfstat"
|
2015-09-24 18:06:11 +00:00
|
|
|
)
|
|
|
|
|
2016-02-22 15:58:06 +00:00
|
|
|
const (
|
2016-04-05 16:37:21 +00:00
|
|
|
// UDP packet limit, see
|
|
|
|
// https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure
|
2016-04-07 18:06:56 +00:00
|
|
|
UDP_MAX_PACKET_SIZE int = 64 * 1024
|
2016-02-22 15:58:06 +00:00
|
|
|
|
|
|
|
defaultFieldName = "value"
|
2016-03-24 22:53:26 +00:00
|
|
|
|
2017-08-08 18:41:26 +00:00
|
|
|
defaultProtocol = "udp"
|
|
|
|
|
2016-09-23 10:37:47 +00:00
|
|
|
defaultSeparator = "_"
|
|
|
|
defaultAllowPendingMessage = 10000
|
2017-08-08 18:41:26 +00:00
|
|
|
MaxTCPConnections = 250
|
2016-02-22 15:58:06 +00:00
|
|
|
)
|
2016-01-19 18:01:53 +00:00
|
|
|
|
2016-09-30 21:37:56 +00:00
|
|
|
var dropwarn = "E! Error: statsd message queue full. " +
|
2016-06-10 12:28:50 +00:00
|
|
|
"We have dropped %d messages so far. " +
|
2015-09-24 18:06:11 +00:00
|
|
|
"You may want to increase allowed_pending_messages in the config\n"
|
|
|
|
|
2017-08-08 18:41:26 +00:00
|
|
|
var malformedwarn = "E! Statsd over TCP has received %d malformed packets" +
|
|
|
|
" thus far."
|
|
|
|
|
2015-09-24 18:06:11 +00:00
|
|
|
type Statsd struct {
|
2017-08-08 18:41:26 +00:00
|
|
|
// Protocol used on listener - udp or tcp
|
|
|
|
Protocol string `toml:"protocol"`
|
|
|
|
|
2015-09-24 18:06:11 +00:00
|
|
|
// Address & Port to serve from
|
|
|
|
ServiceAddress string
|
|
|
|
|
|
|
|
// Number of messages allowed to queue up in between calls to Gather. If this
|
|
|
|
// fills up, packets will get dropped until the next Gather interval is ran.
|
|
|
|
AllowedPendingMessages int
|
|
|
|
|
2015-10-07 22:11:52 +00:00
|
|
|
// Percentiles specifies the percentiles that will be calculated for timing
|
|
|
|
// and histogram stats.
|
|
|
|
Percentiles []int
|
|
|
|
PercentileLimit int
|
|
|
|
|
2015-09-24 18:06:11 +00:00
|
|
|
DeleteGauges bool
|
|
|
|
DeleteCounters bool
|
|
|
|
DeleteSets bool
|
2015-10-07 22:11:52 +00:00
|
|
|
DeleteTimings bool
|
2016-01-15 22:38:32 +00:00
|
|
|
ConvertNames bool
|
2015-09-24 18:06:11 +00:00
|
|
|
|
2016-03-24 22:53:26 +00:00
|
|
|
// MetricSeparator is the separator between parts of the metric name.
|
|
|
|
MetricSeparator string
|
2017-11-01 00:00:06 +00:00
|
|
|
// This flag enables parsing of tags in the dogstatsd extension to the
|
2016-03-24 21:31:23 +00:00
|
|
|
// statsd protocol (http://docs.datadoghq.com/guides/dogstatsd/)
|
|
|
|
ParseDataDogTags bool
|
|
|
|
|
2016-04-07 18:06:56 +00:00
|
|
|
// UDPPacketSize is deprecated, it's only here for legacy support
|
|
|
|
// we now always create 1 max size buffer and then copy only what we need
|
|
|
|
// into the in channel
|
|
|
|
// see https://github.com/influxdata/telegraf/pull/992
|
2016-01-19 18:01:53 +00:00
|
|
|
UDPPacketSize int `toml:"udp_packet_size"`
|
|
|
|
|
2015-09-24 18:06:11 +00:00
|
|
|
sync.Mutex
|
2017-08-08 18:41:26 +00:00
|
|
|
// Lock for preventing a data race during resource cleanup
|
|
|
|
cleanup sync.Mutex
|
|
|
|
wg sync.WaitGroup
|
|
|
|
// accept channel tracks how many active connections there are, if there
|
|
|
|
// is an available bool in accept, then we are below the maximum and can
|
|
|
|
// accept the connection
|
|
|
|
accept chan bool
|
2016-06-10 12:28:50 +00:00
|
|
|
// drops tracks the number of dropped metrics.
|
|
|
|
drops int
|
2017-08-08 18:41:26 +00:00
|
|
|
// malformed tracks the number of malformed packets
|
|
|
|
malformed int
|
2015-09-24 18:06:11 +00:00
|
|
|
|
2016-01-19 18:01:53 +00:00
|
|
|
// Channel for all incoming statsd packets
|
2017-09-25 17:55:02 +00:00
|
|
|
in chan *bytes.Buffer
|
2015-10-07 22:11:52 +00:00
|
|
|
done chan struct{}
|
2015-09-24 18:06:11 +00:00
|
|
|
|
|
|
|
// Cache gauges, counters & sets so they can be aggregated as they arrive
|
2016-01-28 23:09:41 +00:00
|
|
|
// gauges and counters map measurement/tags hash -> field name -> metrics
|
|
|
|
// sets and timings map measurement/tags hash -> metrics
|
2015-10-06 19:33:35 +00:00
|
|
|
gauges map[string]cachedgauge
|
|
|
|
counters map[string]cachedcounter
|
|
|
|
sets map[string]cachedset
|
2015-10-07 22:11:52 +00:00
|
|
|
timings map[string]cachedtimings
|
2015-09-24 18:06:11 +00:00
|
|
|
|
2015-10-06 21:38:16 +00:00
|
|
|
// bucket -> influx templates
|
|
|
|
Templates []string
|
2016-03-17 08:10:36 +00:00
|
|
|
|
2017-08-08 18:41:26 +00:00
|
|
|
// Protocol listeners
|
|
|
|
UDPlistener *net.UDPConn
|
|
|
|
TCPlistener *net.TCPListener
|
|
|
|
|
|
|
|
// track current connections so we can close them in Stop()
|
|
|
|
conns map[string]*net.TCPConn
|
|
|
|
|
|
|
|
MaxTCPConnections int `toml:"max_tcp_connections"`
|
2016-09-14 17:32:43 +00:00
|
|
|
|
2018-02-16 04:04:49 +00:00
|
|
|
TCPKeepAlive bool `toml:"tcp_keep_alive"`
|
|
|
|
TCPKeepAlivePeriod *internal.Duration `toml:"tcp_keep_alive_period"`
|
|
|
|
|
2016-09-14 17:32:43 +00:00
|
|
|
graphiteParser *graphite.GraphiteParser
|
2017-08-08 18:41:26 +00:00
|
|
|
|
|
|
|
acc telegraf.Accumulator
|
|
|
|
|
|
|
|
MaxConnections selfstat.Stat
|
|
|
|
CurrentConnections selfstat.Stat
|
|
|
|
TotalConnections selfstat.Stat
|
|
|
|
PacketsRecv selfstat.Stat
|
|
|
|
BytesRecv selfstat.Stat
|
2017-09-25 17:55:02 +00:00
|
|
|
|
|
|
|
// A pool of byte slices to handle parsing
|
|
|
|
bufPool sync.Pool
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// One statsd metric, form is <bucket>:<value>|<mtype>|@<samplerate>
|
|
|
|
type metric struct {
|
|
|
|
name string
|
2016-01-28 23:09:41 +00:00
|
|
|
field string
|
2015-09-24 18:06:11 +00:00
|
|
|
bucket string
|
2015-10-06 21:38:16 +00:00
|
|
|
hash string
|
2015-10-06 19:33:35 +00:00
|
|
|
intvalue int64
|
|
|
|
floatvalue float64
|
2016-12-13 15:24:05 +00:00
|
|
|
strvalue string
|
2015-09-24 18:06:11 +00:00
|
|
|
mtype string
|
|
|
|
additive bool
|
|
|
|
samplerate float64
|
|
|
|
tags map[string]string
|
|
|
|
}
|
|
|
|
|
2015-10-06 19:33:35 +00:00
|
|
|
type cachedset struct {
|
2016-01-28 23:09:41 +00:00
|
|
|
name string
|
2016-12-13 15:24:05 +00:00
|
|
|
fields map[string]map[string]bool
|
2016-01-28 23:09:41 +00:00
|
|
|
tags map[string]string
|
2015-10-06 19:33:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type cachedgauge struct {
|
2016-01-28 23:09:41 +00:00
|
|
|
name string
|
|
|
|
fields map[string]interface{}
|
|
|
|
tags map[string]string
|
2015-10-06 19:33:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type cachedcounter struct {
|
2016-01-28 23:09:41 +00:00
|
|
|
name string
|
|
|
|
fields map[string]interface{}
|
|
|
|
tags map[string]string
|
2015-10-06 19:33:35 +00:00
|
|
|
}
|
|
|
|
|
2015-10-07 22:11:52 +00:00
|
|
|
type cachedtimings struct {
|
2016-02-22 15:58:06 +00:00
|
|
|
name string
|
|
|
|
fields map[string]RunningStats
|
|
|
|
tags map[string]string
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (_ *Statsd) Description() string {
|
2017-08-08 18:41:26 +00:00
|
|
|
return "Statsd UDP/TCP Server"
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const sampleConfig = `
|
2017-10-16 21:18:36 +00:00
|
|
|
## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp)
|
2017-08-08 18:41:26 +00:00
|
|
|
protocol = "udp"
|
|
|
|
|
|
|
|
## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
|
|
|
|
max_tcp_connections = 250
|
|
|
|
|
2018-02-16 04:04:49 +00:00
|
|
|
## Enable TCP keep alive probes (default=false)
|
|
|
|
tcp_keep_alive = false
|
|
|
|
|
|
|
|
## Specifies the keep-alive period for an active network connection.
|
|
|
|
## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false.
|
|
|
|
## Defaults to the OS configuration.
|
|
|
|
# tcp_keep_alive_period = "2h"
|
|
|
|
|
2016-02-18 21:26:51 +00:00
|
|
|
## Address and port to host UDP listener on
|
2015-10-15 21:53:29 +00:00
|
|
|
service_address = ":8125"
|
2016-12-13 16:34:52 +00:00
|
|
|
|
|
|
|
## The following configuration options control when telegraf clears it's cache
|
|
|
|
## of previous values. If set to false, then telegraf will only clear it's
|
|
|
|
## cache when the daemon is restarted.
|
|
|
|
## Reset gauges every interval (default=true)
|
|
|
|
delete_gauges = true
|
|
|
|
## Reset counters every interval (default=true)
|
|
|
|
delete_counters = true
|
|
|
|
## Reset sets every interval (default=true)
|
|
|
|
delete_sets = true
|
|
|
|
## Reset timings & histograms every interval (default=true)
|
2015-10-15 21:53:29 +00:00
|
|
|
delete_timings = true
|
2016-12-13 16:34:52 +00:00
|
|
|
|
2016-02-18 21:26:51 +00:00
|
|
|
## Percentiles to calculate for timing & histogram stats
|
2015-10-15 21:53:29 +00:00
|
|
|
percentiles = [90]
|
|
|
|
|
2016-03-24 22:53:26 +00:00
|
|
|
## separator to use between elements of a statsd metric
|
|
|
|
metric_separator = "_"
|
2016-01-15 22:38:32 +00:00
|
|
|
|
2016-03-24 22:14:30 +00:00
|
|
|
## Parses tags in the datadog statsd format
|
|
|
|
## http://docs.datadoghq.com/guides/dogstatsd/
|
2016-03-24 21:31:23 +00:00
|
|
|
parse_data_dog_tags = false
|
|
|
|
|
2016-02-18 21:26:51 +00:00
|
|
|
## Statsd data translation templates, more info can be read here:
|
|
|
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite
|
2015-10-15 21:53:29 +00:00
|
|
|
# templates = [
|
|
|
|
# "cpu.* measurement*"
|
|
|
|
# ]
|
|
|
|
|
2016-02-18 21:26:51 +00:00
|
|
|
## Number of UDP messages allowed to queue up, once filled,
|
|
|
|
## the statsd server will start dropping packets
|
2015-10-15 21:53:29 +00:00
|
|
|
allowed_pending_messages = 10000
|
|
|
|
|
2016-02-18 21:26:51 +00:00
|
|
|
## Number of timing/histogram values to track per-measurement in the
|
|
|
|
## calculation of percentiles. Raising this limit increases the accuracy
|
|
|
|
## of percentiles but also increases the memory usage and cpu time.
|
2015-10-15 21:53:29 +00:00
|
|
|
percentile_limit = 1000
|
2015-09-24 18:06:11 +00:00
|
|
|
`
|
|
|
|
|
|
|
|
func (_ *Statsd) SampleConfig() string {
|
|
|
|
return sampleConfig
|
|
|
|
}
|
|
|
|
|
2016-01-27 21:21:36 +00:00
|
|
|
func (s *Statsd) Gather(acc telegraf.Accumulator) error {
|
2015-09-24 18:06:11 +00:00
|
|
|
s.Lock()
|
|
|
|
defer s.Unlock()
|
2016-01-28 23:09:41 +00:00
|
|
|
now := time.Now()
|
2015-09-24 18:06:11 +00:00
|
|
|
|
2015-10-07 22:11:52 +00:00
|
|
|
for _, metric := range s.timings {
|
2016-02-22 15:58:06 +00:00
|
|
|
// Defining a template to parse field names for timers allows us to split
|
|
|
|
// out multiple fields per timer. In this case we prefix each stat with the
|
|
|
|
// field name and store these all in a single measurement.
|
2016-01-28 13:49:28 +00:00
|
|
|
fields := make(map[string]interface{})
|
2016-02-22 15:58:06 +00:00
|
|
|
for fieldName, stats := range metric.fields {
|
|
|
|
var prefix string
|
|
|
|
if fieldName != defaultFieldName {
|
|
|
|
prefix = fieldName + "_"
|
|
|
|
}
|
|
|
|
fields[prefix+"mean"] = stats.Mean()
|
|
|
|
fields[prefix+"stddev"] = stats.Stddev()
|
2017-09-14 22:21:54 +00:00
|
|
|
fields[prefix+"sum"] = stats.Sum()
|
2016-02-22 15:58:06 +00:00
|
|
|
fields[prefix+"upper"] = stats.Upper()
|
|
|
|
fields[prefix+"lower"] = stats.Lower()
|
|
|
|
fields[prefix+"count"] = stats.Count()
|
|
|
|
for _, percentile := range s.Percentiles {
|
|
|
|
name := fmt.Sprintf("%s%v_percentile", prefix, percentile)
|
|
|
|
fields[name] = stats.Percentile(percentile)
|
|
|
|
}
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
2016-02-22 15:58:06 +00:00
|
|
|
|
2016-01-28 23:09:41 +00:00
|
|
|
acc.AddFields(metric.name, fields, metric.tags, now)
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
2015-10-07 22:11:52 +00:00
|
|
|
if s.DeleteTimings {
|
|
|
|
s.timings = make(map[string]cachedtimings)
|
|
|
|
}
|
2015-09-24 18:06:11 +00:00
|
|
|
|
2015-10-07 22:11:52 +00:00
|
|
|
for _, metric := range s.gauges {
|
2017-09-14 20:05:37 +00:00
|
|
|
acc.AddGauge(metric.name, metric.fields, metric.tags, now)
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
if s.DeleteGauges {
|
2015-10-06 19:33:35 +00:00
|
|
|
s.gauges = make(map[string]cachedgauge)
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
|
2015-10-07 22:11:52 +00:00
|
|
|
for _, metric := range s.counters {
|
2017-09-14 20:05:37 +00:00
|
|
|
acc.AddCounter(metric.name, metric.fields, metric.tags, now)
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
if s.DeleteCounters {
|
2015-10-06 19:33:35 +00:00
|
|
|
s.counters = make(map[string]cachedcounter)
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
|
2015-10-07 22:11:52 +00:00
|
|
|
for _, metric := range s.sets {
|
2016-01-28 23:09:41 +00:00
|
|
|
fields := make(map[string]interface{})
|
|
|
|
for field, set := range metric.fields {
|
|
|
|
fields[field] = int64(len(set))
|
|
|
|
}
|
|
|
|
acc.AddFields(metric.name, fields, metric.tags, now)
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
if s.DeleteSets {
|
2015-10-06 19:33:35 +00:00
|
|
|
s.sets = make(map[string]cachedset)
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-02-16 00:21:38 +00:00
|
|
|
func (s *Statsd) Start(_ telegraf.Accumulator) error {
|
2015-09-24 18:06:11 +00:00
|
|
|
// Make data structures
|
2016-12-20 17:48:06 +00:00
|
|
|
s.gauges = make(map[string]cachedgauge)
|
|
|
|
s.counters = make(map[string]cachedcounter)
|
|
|
|
s.sets = make(map[string]cachedset)
|
|
|
|
s.timings = make(map[string]cachedtimings)
|
2015-09-24 18:06:11 +00:00
|
|
|
|
2017-08-08 18:41:26 +00:00
|
|
|
s.Lock()
|
|
|
|
defer s.Unlock()
|
|
|
|
//
|
|
|
|
tags := map[string]string{
|
|
|
|
"address": s.ServiceAddress,
|
|
|
|
}
|
|
|
|
s.MaxConnections = selfstat.Register("statsd", "tcp_max_connections", tags)
|
|
|
|
s.MaxConnections.Set(int64(s.MaxTCPConnections))
|
|
|
|
s.CurrentConnections = selfstat.Register("statsd", "tcp_current_connections", tags)
|
|
|
|
s.TotalConnections = selfstat.Register("statsd", "tcp_total_connections", tags)
|
|
|
|
s.PacketsRecv = selfstat.Register("statsd", "tcp_packets_received", tags)
|
|
|
|
s.BytesRecv = selfstat.Register("statsd", "tcp_bytes_received", tags)
|
|
|
|
|
2017-09-25 17:55:02 +00:00
|
|
|
s.in = make(chan *bytes.Buffer, s.AllowedPendingMessages)
|
2017-08-08 18:41:26 +00:00
|
|
|
s.done = make(chan struct{})
|
|
|
|
s.accept = make(chan bool, s.MaxTCPConnections)
|
|
|
|
s.conns = make(map[string]*net.TCPConn)
|
2017-09-25 17:55:02 +00:00
|
|
|
s.bufPool = sync.Pool{
|
|
|
|
New: func() interface{} {
|
|
|
|
return new(bytes.Buffer)
|
|
|
|
},
|
|
|
|
}
|
2017-08-08 18:41:26 +00:00
|
|
|
for i := 0; i < s.MaxTCPConnections; i++ {
|
|
|
|
s.accept <- true
|
|
|
|
}
|
|
|
|
|
2016-03-24 22:53:26 +00:00
|
|
|
if s.ConvertNames {
|
2016-09-30 21:37:56 +00:00
|
|
|
log.Printf("I! WARNING statsd: convert_names config option is deprecated," +
|
2016-03-24 22:53:26 +00:00
|
|
|
" please use metric_separator instead")
|
|
|
|
}
|
|
|
|
|
|
|
|
if s.MetricSeparator == "" {
|
|
|
|
s.MetricSeparator = defaultSeparator
|
|
|
|
}
|
|
|
|
|
2016-03-17 16:16:12 +00:00
|
|
|
s.wg.Add(2)
|
2015-09-24 18:06:11 +00:00
|
|
|
// Start the UDP listener
|
2017-10-16 21:18:36 +00:00
|
|
|
if s.isUDP() {
|
2017-08-08 18:41:26 +00:00
|
|
|
go s.udpListen()
|
2017-10-16 21:18:36 +00:00
|
|
|
} else {
|
2017-08-08 18:41:26 +00:00
|
|
|
go s.tcpListen()
|
|
|
|
}
|
2015-09-24 18:06:11 +00:00
|
|
|
// Start the line parser
|
|
|
|
go s.parser()
|
2016-09-30 21:37:56 +00:00
|
|
|
log.Printf("I! Started the statsd service on %s\n", s.ServiceAddress)
|
2015-09-24 18:06:11 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-08-08 18:41:26 +00:00
|
|
|
// tcpListen() starts listening for udp packets on the configured port.
|
|
|
|
func (s *Statsd) tcpListen() error {
|
|
|
|
defer s.wg.Done()
|
|
|
|
// Start listener
|
|
|
|
var err error
|
|
|
|
address, _ := net.ResolveTCPAddr("tcp", s.ServiceAddress)
|
|
|
|
s.TCPlistener, err = net.ListenTCP("tcp", address)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("ERROR: ListenTCP - %s", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Println("I! TCP Statsd listening on: ", s.TCPlistener.Addr().String())
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-s.done:
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
// Accept connection:
|
|
|
|
conn, err := s.TCPlistener.AcceptTCP()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-02-16 04:04:49 +00:00
|
|
|
if s.TCPKeepAlive {
|
|
|
|
if err = conn.SetKeepAlive(true); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if s.TCPKeepAlivePeriod != nil {
|
|
|
|
if err = conn.SetKeepAlivePeriod(s.TCPKeepAlivePeriod.Duration); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-08 18:41:26 +00:00
|
|
|
select {
|
|
|
|
case <-s.accept:
|
|
|
|
// not over connection limit, handle the connection properly.
|
|
|
|
s.wg.Add(1)
|
|
|
|
// generate a random id for this TCPConn
|
|
|
|
id := internal.RandomString(6)
|
|
|
|
s.remember(id, conn)
|
|
|
|
go s.handler(conn, id)
|
|
|
|
default:
|
|
|
|
// We are over the connection limit, refuse & close.
|
|
|
|
s.refuser(conn)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-24 18:06:11 +00:00
|
|
|
// udpListen starts listening for udp packets on the configured port.
|
|
|
|
func (s *Statsd) udpListen() error {
|
2016-03-17 16:16:12 +00:00
|
|
|
defer s.wg.Done()
|
2016-03-17 08:38:09 +00:00
|
|
|
var err error
|
2017-10-16 21:18:36 +00:00
|
|
|
address, _ := net.ResolveUDPAddr(s.Protocol, s.ServiceAddress)
|
|
|
|
s.UDPlistener, err = net.ListenUDP(s.Protocol, address)
|
2015-09-24 18:06:11 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("ERROR: ListenUDP - %s", err)
|
|
|
|
}
|
2017-08-08 18:41:26 +00:00
|
|
|
log.Println("I! Statsd UDP listener listening on: ", s.UDPlistener.LocalAddr().String())
|
2015-09-24 18:06:11 +00:00
|
|
|
|
2016-04-07 18:06:56 +00:00
|
|
|
buf := make([]byte, UDP_MAX_PACKET_SIZE)
|
2015-09-24 18:06:11 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-s.done:
|
|
|
|
return nil
|
|
|
|
default:
|
2017-08-08 18:41:26 +00:00
|
|
|
n, _, err := s.UDPlistener.ReadFromUDP(buf)
|
2016-03-17 16:16:12 +00:00
|
|
|
if err != nil && !strings.Contains(err.Error(), "closed network") {
|
2016-09-30 21:37:56 +00:00
|
|
|
log.Printf("E! Error READ: %s\n", err.Error())
|
2016-03-17 08:38:09 +00:00
|
|
|
continue
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
2017-09-25 17:55:02 +00:00
|
|
|
b := s.bufPool.Get().(*bytes.Buffer)
|
|
|
|
b.Reset()
|
|
|
|
b.Write(buf[:n])
|
2015-09-24 18:06:11 +00:00
|
|
|
|
2016-01-19 18:01:53 +00:00
|
|
|
select {
|
2017-09-25 17:55:02 +00:00
|
|
|
case s.in <- b:
|
2016-01-19 18:01:53 +00:00
|
|
|
default:
|
2016-06-10 12:28:50 +00:00
|
|
|
s.drops++
|
2016-09-23 10:37:47 +00:00
|
|
|
if s.drops == 1 || s.AllowedPendingMessages == 0 || s.drops%s.AllowedPendingMessages == 0 {
|
2016-06-10 12:28:50 +00:00
|
|
|
log.Printf(dropwarn, s.drops)
|
|
|
|
}
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-19 18:01:53 +00:00
|
|
|
// parser monitors the s.in channel, if there is a packet ready, it parses the
|
|
|
|
// packet into statsd strings and then calls parseStatsdLine, which parses a
|
|
|
|
// single statsd metric into a struct.
|
2015-09-24 18:06:11 +00:00
|
|
|
func (s *Statsd) parser() error {
|
2016-03-17 16:16:12 +00:00
|
|
|
defer s.wg.Done()
|
2015-09-24 18:06:11 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-s.done:
|
|
|
|
return nil
|
2017-09-25 17:55:02 +00:00
|
|
|
case buf := <-s.in:
|
|
|
|
lines := strings.Split(buf.String(), "\n")
|
2017-10-05 19:12:14 +00:00
|
|
|
s.bufPool.Put(buf)
|
2016-01-19 18:01:53 +00:00
|
|
|
for _, line := range lines {
|
|
|
|
line = strings.TrimSpace(line)
|
|
|
|
if line != "" {
|
|
|
|
s.parseStatsdLine(line)
|
|
|
|
}
|
|
|
|
}
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// parseStatsdLine will parse the given statsd line, validating it as it goes.
|
|
|
|
// If the line is valid, it will be cached for the next call to Gather()
|
2015-10-05 22:19:08 +00:00
|
|
|
func (s *Statsd) parseStatsdLine(line string) error {
|
2015-09-24 18:06:11 +00:00
|
|
|
s.Lock()
|
|
|
|
defer s.Unlock()
|
|
|
|
|
2016-03-24 21:31:23 +00:00
|
|
|
lineTags := make(map[string]string)
|
|
|
|
if s.ParseDataDogTags {
|
|
|
|
recombinedSegments := make([]string, 0)
|
|
|
|
// datadog tags look like this:
|
|
|
|
// users.online:1|c|@0.5|#country:china,environment:production
|
|
|
|
// users.online:1|c|#sometagwithnovalue
|
|
|
|
// we will split on the pipe and remove any elements that are datadog
|
|
|
|
// tags, parse them, and rebuild the line sans the datadog tags
|
|
|
|
pipesplit := strings.Split(line, "|")
|
|
|
|
for _, segment := range pipesplit {
|
|
|
|
if len(segment) > 0 && segment[0] == '#' {
|
2016-03-24 22:14:30 +00:00
|
|
|
// we have ourselves a tag; they are comma separated
|
2016-03-24 21:31:23 +00:00
|
|
|
tagstr := segment[1:]
|
|
|
|
tags := strings.Split(tagstr, ",")
|
|
|
|
for _, tag := range tags {
|
2016-09-21 13:37:42 +00:00
|
|
|
ts := strings.SplitN(tag, ":", 2)
|
2016-03-24 21:31:23 +00:00
|
|
|
var k, v string
|
|
|
|
switch len(ts) {
|
|
|
|
case 1:
|
|
|
|
// just a tag
|
|
|
|
k = ts[0]
|
|
|
|
v = ""
|
|
|
|
case 2:
|
|
|
|
k = ts[0]
|
|
|
|
v = ts[1]
|
|
|
|
}
|
|
|
|
if k != "" {
|
|
|
|
lineTags[k] = v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
recombinedSegments = append(recombinedSegments, segment)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
line = strings.Join(recombinedSegments, "|")
|
|
|
|
}
|
|
|
|
|
2015-11-08 10:19:00 +00:00
|
|
|
// Validate splitting the line on ":"
|
|
|
|
bits := strings.Split(line, ":")
|
|
|
|
if len(bits) < 2 {
|
2016-09-30 21:37:56 +00:00
|
|
|
log.Printf("E! Error: splitting ':', Unable to parse metric: %s\n", line)
|
2015-10-05 22:19:08 +00:00
|
|
|
return errors.New("Error Parsing statsd line")
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
|
2015-11-08 10:19:00 +00:00
|
|
|
// Extract bucket name from individual metric bits
|
|
|
|
bucketName, bits := bits[0], bits[1:]
|
2015-09-24 18:06:11 +00:00
|
|
|
|
2015-11-08 10:19:00 +00:00
|
|
|
// Add a metric for each bit available
|
|
|
|
for _, bit := range bits {
|
|
|
|
m := metric{}
|
|
|
|
|
|
|
|
m.bucket = bucketName
|
2015-09-24 18:06:11 +00:00
|
|
|
|
2015-11-08 10:19:00 +00:00
|
|
|
// Validate splitting the bit on "|"
|
|
|
|
pipesplit := strings.Split(bit, "|")
|
|
|
|
if len(pipesplit) < 2 {
|
2016-09-30 21:37:56 +00:00
|
|
|
log.Printf("E! Error: splitting '|', Unable to parse metric: %s\n", line)
|
2015-10-05 22:19:08 +00:00
|
|
|
return errors.New("Error Parsing statsd line")
|
2015-11-08 10:19:00 +00:00
|
|
|
} else if len(pipesplit) > 2 {
|
|
|
|
sr := pipesplit[2]
|
2016-09-30 21:37:56 +00:00
|
|
|
errmsg := "E! Error: parsing sample rate, %s, it must be in format like: " +
|
2015-11-08 10:19:00 +00:00
|
|
|
"@0.1, @0.5, etc. Ignoring sample rate for line: %s\n"
|
|
|
|
if strings.Contains(sr, "@") && len(sr) > 1 {
|
|
|
|
samplerate, err := strconv.ParseFloat(sr[1:], 64)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf(errmsg, err.Error(), line)
|
|
|
|
} else {
|
|
|
|
// sample rate successfully parsed
|
|
|
|
m.samplerate = samplerate
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
log.Printf(errmsg, "", line)
|
|
|
|
}
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
2015-10-06 19:33:35 +00:00
|
|
|
|
2015-11-08 10:19:00 +00:00
|
|
|
// Validate metric type
|
|
|
|
switch pipesplit[1] {
|
|
|
|
case "g", "c", "s", "ms", "h":
|
|
|
|
m.mtype = pipesplit[1]
|
|
|
|
default:
|
2016-09-30 21:37:56 +00:00
|
|
|
log.Printf("E! Error: Statsd Metric type %s unsupported", pipesplit[1])
|
2015-10-06 19:33:35 +00:00
|
|
|
return errors.New("Error Parsing statsd line")
|
|
|
|
}
|
2015-11-08 10:19:00 +00:00
|
|
|
|
|
|
|
// Parse the value
|
2016-09-09 14:04:38 +00:00
|
|
|
if strings.HasPrefix(pipesplit[0], "-") || strings.HasPrefix(pipesplit[0], "+") {
|
2016-12-20 13:21:51 +00:00
|
|
|
if m.mtype != "g" && m.mtype != "c" {
|
|
|
|
log.Printf("E! Error: +- values are only supported for gauges & counters: %s\n", line)
|
2015-11-08 10:19:00 +00:00
|
|
|
return errors.New("Error Parsing statsd line")
|
|
|
|
}
|
|
|
|
m.additive = true
|
2015-10-06 19:33:35 +00:00
|
|
|
}
|
2015-11-08 10:19:00 +00:00
|
|
|
|
|
|
|
switch m.mtype {
|
|
|
|
case "g", "ms", "h":
|
|
|
|
v, err := strconv.ParseFloat(pipesplit[0], 64)
|
|
|
|
if err != nil {
|
2016-09-30 21:37:56 +00:00
|
|
|
log.Printf("E! Error: parsing value to float64: %s\n", line)
|
2015-11-08 10:19:00 +00:00
|
|
|
return errors.New("Error Parsing statsd line")
|
|
|
|
}
|
|
|
|
m.floatvalue = v
|
2016-12-13 15:24:05 +00:00
|
|
|
case "c":
|
2016-01-20 19:18:10 +00:00
|
|
|
var v int64
|
2015-11-08 10:19:00 +00:00
|
|
|
v, err := strconv.ParseInt(pipesplit[0], 10, 64)
|
|
|
|
if err != nil {
|
2016-01-20 19:18:10 +00:00
|
|
|
v2, err2 := strconv.ParseFloat(pipesplit[0], 64)
|
|
|
|
if err2 != nil {
|
2016-09-30 21:37:56 +00:00
|
|
|
log.Printf("E! Error: parsing value to int64: %s\n", line)
|
2016-01-20 19:18:10 +00:00
|
|
|
return errors.New("Error Parsing statsd line")
|
|
|
|
}
|
|
|
|
v = int64(v2)
|
2015-11-08 10:19:00 +00:00
|
|
|
}
|
|
|
|
// If a sample rate is given with a counter, divide value by the rate
|
|
|
|
if m.samplerate != 0 && m.mtype == "c" {
|
|
|
|
v = int64(float64(v) / m.samplerate)
|
|
|
|
}
|
|
|
|
m.intvalue = v
|
2016-12-13 15:24:05 +00:00
|
|
|
case "s":
|
|
|
|
m.strvalue = pipesplit[0]
|
2015-10-06 19:33:35 +00:00
|
|
|
}
|
2015-09-24 18:06:11 +00:00
|
|
|
|
2015-11-08 10:19:00 +00:00
|
|
|
// Parse the name & tags from bucket
|
2016-01-28 23:09:41 +00:00
|
|
|
m.name, m.field, m.tags = s.parseName(m.bucket)
|
2015-11-08 10:19:00 +00:00
|
|
|
switch m.mtype {
|
|
|
|
case "c":
|
|
|
|
m.tags["metric_type"] = "counter"
|
|
|
|
case "g":
|
|
|
|
m.tags["metric_type"] = "gauge"
|
|
|
|
case "s":
|
|
|
|
m.tags["metric_type"] = "set"
|
|
|
|
case "ms":
|
|
|
|
m.tags["metric_type"] = "timing"
|
|
|
|
case "h":
|
|
|
|
m.tags["metric_type"] = "histogram"
|
|
|
|
}
|
|
|
|
|
2016-03-24 21:31:23 +00:00
|
|
|
if len(lineTags) > 0 {
|
|
|
|
for k, v := range lineTags {
|
|
|
|
m.tags[k] = v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-08 10:19:00 +00:00
|
|
|
// Make a unique key for the measurement name/tags
|
|
|
|
var tg []string
|
|
|
|
for k, v := range m.tags {
|
2017-06-21 23:24:23 +00:00
|
|
|
tg = append(tg, k+"="+v)
|
2015-11-08 10:19:00 +00:00
|
|
|
}
|
|
|
|
sort.Strings(tg)
|
2017-06-21 23:24:23 +00:00
|
|
|
tg = append(tg, m.name)
|
|
|
|
m.hash = strings.Join(tg, "")
|
2015-09-24 18:06:11 +00:00
|
|
|
|
2015-11-08 10:19:00 +00:00
|
|
|
s.aggregate(m)
|
2015-10-06 21:38:16 +00:00
|
|
|
}
|
|
|
|
|
2015-10-05 22:19:08 +00:00
|
|
|
return nil
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// parseName parses the given bucket name with the list of bucket maps in the
|
|
|
|
// config file. If there is a match, it will parse the name of the metric and
|
|
|
|
// map of tags.
|
2016-01-28 23:09:41 +00:00
|
|
|
// Return values are (<name>, <field>, <tags>)
|
|
|
|
func (s *Statsd) parseName(bucket string) (string, string, map[string]string) {
|
2015-10-06 19:33:35 +00:00
|
|
|
tags := make(map[string]string)
|
2015-09-24 18:06:11 +00:00
|
|
|
|
2015-10-07 22:11:52 +00:00
|
|
|
bucketparts := strings.Split(bucket, ",")
|
|
|
|
// Parse out any tags in the bucket
|
|
|
|
if len(bucketparts) > 1 {
|
|
|
|
for _, btag := range bucketparts[1:] {
|
|
|
|
k, v := parseKeyValue(btag)
|
|
|
|
if k != "" {
|
|
|
|
tags[k] = v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-28 23:09:41 +00:00
|
|
|
var field string
|
2015-10-07 22:11:52 +00:00
|
|
|
name := bucketparts[0]
|
2016-09-14 17:32:43 +00:00
|
|
|
|
|
|
|
p := s.graphiteParser
|
|
|
|
var err error
|
|
|
|
|
|
|
|
if p == nil || s.graphiteParser.Separator != s.MetricSeparator {
|
|
|
|
p, err = graphite.NewGraphiteParser(s.MetricSeparator, s.Templates, nil)
|
|
|
|
s.graphiteParser = p
|
|
|
|
}
|
|
|
|
|
2015-10-06 21:38:16 +00:00
|
|
|
if err == nil {
|
2016-02-06 00:36:35 +00:00
|
|
|
p.DefaultTags = tags
|
2016-01-28 23:09:41 +00:00
|
|
|
name, tags, field, _ = p.ApplyTemplate(name)
|
2015-10-06 21:38:16 +00:00
|
|
|
}
|
2016-02-06 00:36:35 +00:00
|
|
|
|
2016-01-15 22:38:32 +00:00
|
|
|
if s.ConvertNames {
|
|
|
|
name = strings.Replace(name, ".", "_", -1)
|
|
|
|
name = strings.Replace(name, "-", "__", -1)
|
|
|
|
}
|
2016-01-28 23:09:41 +00:00
|
|
|
if field == "" {
|
2016-02-22 15:58:06 +00:00
|
|
|
field = defaultFieldName
|
2016-01-28 23:09:41 +00:00
|
|
|
}
|
2015-10-06 21:38:16 +00:00
|
|
|
|
2016-01-28 23:09:41 +00:00
|
|
|
return name, field, tags
|
2015-10-07 22:11:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Parse the key,value out of a string that looks like "key=value"
|
|
|
|
func parseKeyValue(keyvalue string) (string, string) {
|
|
|
|
var key, val string
|
|
|
|
|
|
|
|
split := strings.Split(keyvalue, "=")
|
|
|
|
// Must be exactly 2 to get anything meaningful out of them
|
|
|
|
if len(split) == 2 {
|
|
|
|
key = split[0]
|
|
|
|
val = split[1]
|
|
|
|
} else if len(split) == 1 {
|
|
|
|
val = split[0]
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
|
2015-10-07 22:11:52 +00:00
|
|
|
return key, val
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
|
2015-10-07 22:11:52 +00:00
|
|
|
// aggregate takes in a metric. It then
|
|
|
|
// aggregates and caches the current value(s). It does not deal with the
|
|
|
|
// Delete* options, because those are dealt with in the Gather function.
|
2015-09-24 18:06:11 +00:00
|
|
|
func (s *Statsd) aggregate(m metric) {
|
|
|
|
switch m.mtype {
|
2015-10-07 22:11:52 +00:00
|
|
|
case "ms", "h":
|
2016-02-22 15:58:06 +00:00
|
|
|
// Check if the measurement exists
|
2015-10-07 22:11:52 +00:00
|
|
|
cached, ok := s.timings[m.hash]
|
|
|
|
if !ok {
|
|
|
|
cached = cachedtimings{
|
2016-02-22 15:58:06 +00:00
|
|
|
name: m.name,
|
|
|
|
fields: make(map[string]RunningStats),
|
|
|
|
tags: m.tags,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Check if the field exists. If we've not enabled multiple fields per timer
|
|
|
|
// this will be the default field name, eg. "value"
|
|
|
|
field, ok := cached.fields[m.field]
|
|
|
|
if !ok {
|
|
|
|
field = RunningStats{
|
|
|
|
PercLimit: s.PercentileLimit,
|
2015-10-07 22:11:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if m.samplerate > 0 {
|
|
|
|
for i := 0; i < int(1.0/m.samplerate); i++ {
|
2016-02-22 15:58:06 +00:00
|
|
|
field.AddValue(m.floatvalue)
|
2015-10-07 22:11:52 +00:00
|
|
|
}
|
|
|
|
} else {
|
2016-02-22 15:58:06 +00:00
|
|
|
field.AddValue(m.floatvalue)
|
2015-10-07 22:11:52 +00:00
|
|
|
}
|
2016-02-22 15:58:06 +00:00
|
|
|
cached.fields[m.field] = field
|
|
|
|
s.timings[m.hash] = cached
|
2015-09-24 18:06:11 +00:00
|
|
|
case "c":
|
2016-01-28 23:09:41 +00:00
|
|
|
// check if the measurement exists
|
|
|
|
_, ok := s.counters[m.hash]
|
2015-09-24 18:06:11 +00:00
|
|
|
if !ok {
|
2015-10-06 21:38:16 +00:00
|
|
|
s.counters[m.hash] = cachedcounter{
|
2016-01-28 23:09:41 +00:00
|
|
|
name: m.name,
|
|
|
|
fields: make(map[string]interface{}),
|
|
|
|
tags: m.tags,
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
}
|
2016-01-28 23:09:41 +00:00
|
|
|
// check if the field exists
|
|
|
|
_, ok = s.counters[m.hash].fields[m.field]
|
|
|
|
if !ok {
|
|
|
|
s.counters[m.hash].fields[m.field] = int64(0)
|
|
|
|
}
|
|
|
|
s.counters[m.hash].fields[m.field] =
|
|
|
|
s.counters[m.hash].fields[m.field].(int64) + m.intvalue
|
2015-09-24 18:06:11 +00:00
|
|
|
case "g":
|
2016-01-28 23:09:41 +00:00
|
|
|
// check if the measurement exists
|
|
|
|
_, ok := s.gauges[m.hash]
|
2015-09-24 18:06:11 +00:00
|
|
|
if !ok {
|
2015-10-06 21:38:16 +00:00
|
|
|
s.gauges[m.hash] = cachedgauge{
|
2016-01-28 23:09:41 +00:00
|
|
|
name: m.name,
|
|
|
|
fields: make(map[string]interface{}),
|
|
|
|
tags: m.tags,
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
2016-01-28 23:09:41 +00:00
|
|
|
}
|
|
|
|
// check if the field exists
|
|
|
|
_, ok = s.gauges[m.hash].fields[m.field]
|
|
|
|
if !ok {
|
|
|
|
s.gauges[m.hash].fields[m.field] = float64(0)
|
|
|
|
}
|
|
|
|
if m.additive {
|
|
|
|
s.gauges[m.hash].fields[m.field] =
|
|
|
|
s.gauges[m.hash].fields[m.field].(float64) + m.floatvalue
|
2015-09-24 18:06:11 +00:00
|
|
|
} else {
|
2016-01-28 23:09:41 +00:00
|
|
|
s.gauges[m.hash].fields[m.field] = m.floatvalue
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
case "s":
|
2016-01-28 23:09:41 +00:00
|
|
|
// check if the measurement exists
|
|
|
|
_, ok := s.sets[m.hash]
|
2015-09-24 18:06:11 +00:00
|
|
|
if !ok {
|
2015-10-06 21:38:16 +00:00
|
|
|
s.sets[m.hash] = cachedset{
|
2016-01-28 23:09:41 +00:00
|
|
|
name: m.name,
|
2016-12-13 15:24:05 +00:00
|
|
|
fields: make(map[string]map[string]bool),
|
2016-01-28 23:09:41 +00:00
|
|
|
tags: m.tags,
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
}
|
2016-01-28 23:09:41 +00:00
|
|
|
// check if the field exists
|
|
|
|
_, ok = s.sets[m.hash].fields[m.field]
|
|
|
|
if !ok {
|
2016-12-13 15:24:05 +00:00
|
|
|
s.sets[m.hash].fields[m.field] = make(map[string]bool)
|
2016-01-28 23:09:41 +00:00
|
|
|
}
|
2016-12-13 15:24:05 +00:00
|
|
|
s.sets[m.hash].fields[m.field][m.strvalue] = true
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-08 18:41:26 +00:00
|
|
|
// handler handles a single TCP Connection
|
|
|
|
func (s *Statsd) handler(conn *net.TCPConn, id string) {
|
|
|
|
s.CurrentConnections.Incr(1)
|
|
|
|
s.TotalConnections.Incr(1)
|
|
|
|
// connection cleanup function
|
|
|
|
defer func() {
|
|
|
|
s.wg.Done()
|
|
|
|
conn.Close()
|
|
|
|
// Add one connection potential back to channel when this one closes
|
|
|
|
s.accept <- true
|
|
|
|
s.forget(id)
|
|
|
|
s.CurrentConnections.Incr(-1)
|
|
|
|
}()
|
|
|
|
|
|
|
|
var n int
|
|
|
|
scanner := bufio.NewScanner(conn)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-s.done:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
if !scanner.Scan() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
n = len(scanner.Bytes())
|
|
|
|
if n == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
s.BytesRecv.Incr(int64(n))
|
|
|
|
s.PacketsRecv.Incr(1)
|
2017-09-25 17:55:02 +00:00
|
|
|
|
|
|
|
b := s.bufPool.Get().(*bytes.Buffer)
|
|
|
|
b.Reset()
|
|
|
|
b.Write(scanner.Bytes())
|
|
|
|
b.WriteByte('\n')
|
2017-08-08 18:41:26 +00:00
|
|
|
|
|
|
|
select {
|
2017-09-25 17:55:02 +00:00
|
|
|
case s.in <- b:
|
2017-08-08 18:41:26 +00:00
|
|
|
default:
|
|
|
|
s.drops++
|
|
|
|
if s.drops == 1 || s.drops%s.AllowedPendingMessages == 0 {
|
|
|
|
log.Printf(dropwarn, s.drops)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// refuser refuses a TCP connection
|
|
|
|
func (s *Statsd) refuser(conn *net.TCPConn) {
|
|
|
|
conn.Close()
|
|
|
|
log.Printf("I! Refused TCP Connection from %s", conn.RemoteAddr())
|
|
|
|
log.Printf("I! WARNING: Maximum TCP Connections reached, you may want to" +
|
|
|
|
" adjust max_tcp_connections")
|
|
|
|
}
|
|
|
|
|
|
|
|
// forget a TCP connection
|
|
|
|
func (s *Statsd) forget(id string) {
|
|
|
|
s.cleanup.Lock()
|
|
|
|
defer s.cleanup.Unlock()
|
|
|
|
delete(s.conns, id)
|
|
|
|
}
|
|
|
|
|
|
|
|
// remember a TCP connection
|
|
|
|
func (s *Statsd) remember(id string, conn *net.TCPConn) {
|
|
|
|
s.cleanup.Lock()
|
|
|
|
defer s.cleanup.Unlock()
|
|
|
|
s.conns[id] = conn
|
|
|
|
}
|
|
|
|
|
2015-09-24 18:06:11 +00:00
|
|
|
func (s *Statsd) Stop() {
|
|
|
|
s.Lock()
|
2016-09-30 21:37:56 +00:00
|
|
|
log.Println("I! Stopping the statsd service")
|
2015-09-24 18:06:11 +00:00
|
|
|
close(s.done)
|
2017-10-16 21:18:36 +00:00
|
|
|
if s.isUDP() {
|
2017-08-08 18:41:26 +00:00
|
|
|
s.UDPlistener.Close()
|
2017-10-16 21:18:36 +00:00
|
|
|
} else {
|
2017-08-08 18:41:26 +00:00
|
|
|
s.TCPlistener.Close()
|
|
|
|
// Close all open TCP connections
|
|
|
|
// - get all conns from the s.conns map and put into slice
|
|
|
|
// - this is so the forget() function doesnt conflict with looping
|
|
|
|
// over the s.conns map
|
|
|
|
var conns []*net.TCPConn
|
|
|
|
s.cleanup.Lock()
|
|
|
|
for _, conn := range s.conns {
|
|
|
|
conns = append(conns, conn)
|
|
|
|
}
|
|
|
|
s.cleanup.Unlock()
|
|
|
|
for _, conn := range conns {
|
|
|
|
conn.Close()
|
|
|
|
}
|
|
|
|
}
|
2017-09-22 18:45:45 +00:00
|
|
|
s.Unlock()
|
|
|
|
|
2016-03-17 16:16:12 +00:00
|
|
|
s.wg.Wait()
|
2017-09-22 18:45:45 +00:00
|
|
|
|
|
|
|
s.Lock()
|
2015-09-24 18:06:11 +00:00
|
|
|
close(s.in)
|
2017-08-08 18:41:26 +00:00
|
|
|
log.Println("I! Stopped Statsd listener service on ", s.ServiceAddress)
|
2017-09-22 18:45:45 +00:00
|
|
|
s.Unlock()
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
|
2017-10-16 21:18:36 +00:00
|
|
|
// IsUDP returns true if the protocol is UDP, false otherwise.
|
|
|
|
func (s *Statsd) isUDP() bool {
|
|
|
|
return strings.HasPrefix(s.Protocol, "udp")
|
|
|
|
}
|
|
|
|
|
2015-09-24 18:06:11 +00:00
|
|
|
func init() {
|
2016-01-27 21:21:36 +00:00
|
|
|
inputs.Add("statsd", func() telegraf.Input {
|
2016-01-19 18:01:53 +00:00
|
|
|
return &Statsd{
|
2017-08-08 18:41:26 +00:00
|
|
|
Protocol: defaultProtocol,
|
2016-12-13 16:34:52 +00:00
|
|
|
ServiceAddress: ":8125",
|
2017-08-08 18:41:26 +00:00
|
|
|
MaxTCPConnections: 250,
|
2018-02-16 04:04:49 +00:00
|
|
|
TCPKeepAlive: false,
|
2016-09-23 10:37:47 +00:00
|
|
|
MetricSeparator: "_",
|
|
|
|
AllowedPendingMessages: defaultAllowPendingMessage,
|
2016-12-13 16:34:52 +00:00
|
|
|
DeleteCounters: true,
|
|
|
|
DeleteGauges: true,
|
|
|
|
DeleteSets: true,
|
|
|
|
DeleteTimings: true,
|
2016-01-19 18:01:53 +00:00
|
|
|
}
|
2015-09-24 18:06:11 +00:00
|
|
|
})
|
|
|
|
}
|