2015-09-24 18:06:11 +00:00
|
|
|
package statsd
|
|
|
|
|
|
|
|
import (
|
2015-10-05 22:19:08 +00:00
|
|
|
"errors"
|
2015-10-06 21:38:16 +00:00
|
|
|
"fmt"
|
2015-09-24 18:06:11 +00:00
|
|
|
"log"
|
|
|
|
"net"
|
2015-10-06 21:38:16 +00:00
|
|
|
"sort"
|
2015-09-24 18:06:11 +00:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
|
2015-10-06 21:38:16 +00:00
|
|
|
"github.com/influxdb/influxdb/services/graphite"
|
|
|
|
|
2016-01-07 20:39:43 +00:00
|
|
|
"github.com/influxdb/telegraf/plugins/inputs"
|
2015-09-24 18:06:11 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var dropwarn = "ERROR: Message queue full. Discarding line [%s] " +
|
|
|
|
"You may want to increase allowed_pending_messages in the config\n"
|
|
|
|
|
|
|
|
type Statsd struct {
|
|
|
|
// Address & Port to serve from
|
|
|
|
ServiceAddress string
|
|
|
|
|
|
|
|
// Number of messages allowed to queue up in between calls to Gather. If this
|
|
|
|
// fills up, packets will get dropped until the next Gather interval is ran.
|
|
|
|
AllowedPendingMessages int
|
|
|
|
|
2015-10-07 22:11:52 +00:00
|
|
|
// Percentiles specifies the percentiles that will be calculated for timing
|
|
|
|
// and histogram stats.
|
|
|
|
Percentiles []int
|
|
|
|
PercentileLimit int
|
|
|
|
|
2015-09-24 18:06:11 +00:00
|
|
|
DeleteGauges bool
|
|
|
|
DeleteCounters bool
|
|
|
|
DeleteSets bool
|
2015-10-07 22:11:52 +00:00
|
|
|
DeleteTimings bool
|
2015-09-24 18:06:11 +00:00
|
|
|
|
|
|
|
sync.Mutex
|
|
|
|
|
|
|
|
// Channel for all incoming statsd messages
|
2015-10-07 22:11:52 +00:00
|
|
|
in chan string
|
|
|
|
done chan struct{}
|
2015-09-24 18:06:11 +00:00
|
|
|
|
|
|
|
// Cache gauges, counters & sets so they can be aggregated as they arrive
|
2015-10-06 19:33:35 +00:00
|
|
|
gauges map[string]cachedgauge
|
|
|
|
counters map[string]cachedcounter
|
|
|
|
sets map[string]cachedset
|
2015-10-07 22:11:52 +00:00
|
|
|
timings map[string]cachedtimings
|
2015-09-24 18:06:11 +00:00
|
|
|
|
2015-10-06 21:38:16 +00:00
|
|
|
// bucket -> influx templates
|
|
|
|
Templates []string
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
|
2015-10-05 22:19:08 +00:00
|
|
|
func NewStatsd() *Statsd {
|
|
|
|
s := Statsd{}
|
|
|
|
|
|
|
|
// Make data structures
|
|
|
|
s.done = make(chan struct{})
|
|
|
|
s.in = make(chan string, s.AllowedPendingMessages)
|
2015-10-06 19:33:35 +00:00
|
|
|
s.gauges = make(map[string]cachedgauge)
|
|
|
|
s.counters = make(map[string]cachedcounter)
|
|
|
|
s.sets = make(map[string]cachedset)
|
2015-10-07 22:11:52 +00:00
|
|
|
s.timings = make(map[string]cachedtimings)
|
2015-10-05 22:19:08 +00:00
|
|
|
|
|
|
|
return &s
|
|
|
|
}
|
|
|
|
|
2015-09-24 18:06:11 +00:00
|
|
|
// One statsd metric, form is <bucket>:<value>|<mtype>|@<samplerate>
|
|
|
|
type metric struct {
|
|
|
|
name string
|
|
|
|
bucket string
|
2015-10-06 21:38:16 +00:00
|
|
|
hash string
|
2015-10-06 19:33:35 +00:00
|
|
|
intvalue int64
|
|
|
|
floatvalue float64
|
2015-09-24 18:06:11 +00:00
|
|
|
mtype string
|
|
|
|
additive bool
|
|
|
|
samplerate float64
|
|
|
|
tags map[string]string
|
|
|
|
}
|
|
|
|
|
2015-10-06 19:33:35 +00:00
|
|
|
type cachedset struct {
|
2015-10-06 21:38:16 +00:00
|
|
|
name string
|
2015-10-06 19:33:35 +00:00
|
|
|
set map[int64]bool
|
|
|
|
tags map[string]string
|
|
|
|
}
|
|
|
|
|
|
|
|
type cachedgauge struct {
|
2015-10-06 21:38:16 +00:00
|
|
|
name string
|
2015-10-06 19:33:35 +00:00
|
|
|
value float64
|
|
|
|
tags map[string]string
|
|
|
|
}
|
|
|
|
|
|
|
|
type cachedcounter struct {
|
2015-10-06 21:38:16 +00:00
|
|
|
name string
|
2015-09-24 18:06:11 +00:00
|
|
|
value int64
|
|
|
|
tags map[string]string
|
2015-10-06 19:33:35 +00:00
|
|
|
}
|
|
|
|
|
2015-10-07 22:11:52 +00:00
|
|
|
type cachedtimings struct {
|
|
|
|
name string
|
|
|
|
stats RunningStats
|
|
|
|
tags map[string]string
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (_ *Statsd) Description() string {
|
2015-10-15 21:53:29 +00:00
|
|
|
return "Statsd Server"
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const sampleConfig = `
|
2015-10-15 21:53:29 +00:00
|
|
|
# Address and port to host UDP listener on
|
|
|
|
service_address = ":8125"
|
|
|
|
# Delete gauges every interval (default=false)
|
|
|
|
delete_gauges = false
|
|
|
|
# Delete counters every interval (default=false)
|
|
|
|
delete_counters = false
|
|
|
|
# Delete sets every interval (default=false)
|
|
|
|
delete_sets = false
|
|
|
|
# Delete timings & histograms every interval (default=true)
|
|
|
|
delete_timings = true
|
|
|
|
# Percentiles to calculate for timing & histogram stats
|
|
|
|
percentiles = [90]
|
|
|
|
|
|
|
|
# templates = [
|
|
|
|
# "cpu.* measurement*"
|
|
|
|
# ]
|
|
|
|
|
|
|
|
# Number of UDP messages allowed to queue up, once filled,
|
|
|
|
# the statsd server will start dropping packets
|
|
|
|
allowed_pending_messages = 10000
|
|
|
|
|
|
|
|
# Number of timing/histogram values to track per-measurement in the
|
|
|
|
# calculation of percentiles. Raising this limit increases the accuracy
|
|
|
|
# of percentiles but also increases the memory usage and cpu time.
|
|
|
|
percentile_limit = 1000
|
2015-09-24 18:06:11 +00:00
|
|
|
`
|
|
|
|
|
|
|
|
func (_ *Statsd) SampleConfig() string {
|
|
|
|
return sampleConfig
|
|
|
|
}
|
|
|
|
|
2016-01-07 20:39:43 +00:00
|
|
|
func (s *Statsd) Gather(acc inputs.Accumulator) error {
|
2015-09-24 18:06:11 +00:00
|
|
|
s.Lock()
|
|
|
|
defer s.Unlock()
|
|
|
|
|
2015-10-07 22:11:52 +00:00
|
|
|
for _, metric := range s.timings {
|
|
|
|
acc.Add(metric.name+"_mean", metric.stats.Mean(), metric.tags)
|
|
|
|
acc.Add(metric.name+"_stddev", metric.stats.Stddev(), metric.tags)
|
|
|
|
acc.Add(metric.name+"_upper", metric.stats.Upper(), metric.tags)
|
|
|
|
acc.Add(metric.name+"_lower", metric.stats.Lower(), metric.tags)
|
|
|
|
acc.Add(metric.name+"_count", metric.stats.Count(), metric.tags)
|
|
|
|
for _, percentile := range s.Percentiles {
|
|
|
|
name := fmt.Sprintf("%s_percentile_%v", metric.name, percentile)
|
|
|
|
acc.Add(name, metric.stats.Percentile(percentile), metric.tags)
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
}
|
2015-10-07 22:11:52 +00:00
|
|
|
if s.DeleteTimings {
|
|
|
|
s.timings = make(map[string]cachedtimings)
|
|
|
|
}
|
2015-09-24 18:06:11 +00:00
|
|
|
|
2015-10-07 22:11:52 +00:00
|
|
|
for _, metric := range s.gauges {
|
|
|
|
acc.Add(metric.name, metric.value, metric.tags)
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
if s.DeleteGauges {
|
2015-10-06 19:33:35 +00:00
|
|
|
s.gauges = make(map[string]cachedgauge)
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
|
2015-10-07 22:11:52 +00:00
|
|
|
for _, metric := range s.counters {
|
|
|
|
acc.Add(metric.name, metric.value, metric.tags)
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
if s.DeleteCounters {
|
2015-10-06 19:33:35 +00:00
|
|
|
s.counters = make(map[string]cachedcounter)
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
|
2015-10-07 22:11:52 +00:00
|
|
|
for _, metric := range s.sets {
|
|
|
|
acc.Add(metric.name, int64(len(metric.set)), metric.tags)
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
if s.DeleteSets {
|
2015-10-06 19:33:35 +00:00
|
|
|
s.sets = make(map[string]cachedset)
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Statsd) Start() error {
|
|
|
|
// Make data structures
|
|
|
|
s.done = make(chan struct{})
|
|
|
|
s.in = make(chan string, s.AllowedPendingMessages)
|
2015-10-06 19:33:35 +00:00
|
|
|
s.gauges = make(map[string]cachedgauge)
|
|
|
|
s.counters = make(map[string]cachedcounter)
|
|
|
|
s.sets = make(map[string]cachedset)
|
2015-10-07 22:11:52 +00:00
|
|
|
s.timings = make(map[string]cachedtimings)
|
2015-09-24 18:06:11 +00:00
|
|
|
|
|
|
|
// Start the UDP listener
|
|
|
|
go s.udpListen()
|
|
|
|
// Start the line parser
|
|
|
|
go s.parser()
|
2015-11-16 20:12:45 +00:00
|
|
|
log.Printf("Started the statsd service on %s\n", s.ServiceAddress)
|
2015-09-24 18:06:11 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// udpListen starts listening for udp packets on the configured port.
|
|
|
|
func (s *Statsd) udpListen() error {
|
|
|
|
address, _ := net.ResolveUDPAddr("udp", s.ServiceAddress)
|
|
|
|
listener, err := net.ListenUDP("udp", address)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("ERROR: ListenUDP - %s", err)
|
|
|
|
}
|
|
|
|
defer listener.Close()
|
|
|
|
log.Println("Statsd listener listening on: ", listener.LocalAddr().String())
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-s.done:
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
buf := make([]byte, 1024)
|
|
|
|
n, _, err := listener.ReadFromUDP(buf)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("ERROR: %s\n", err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
lines := strings.Split(string(buf[:n]), "\n")
|
|
|
|
for _, line := range lines {
|
|
|
|
line = strings.TrimSpace(line)
|
|
|
|
if line != "" {
|
|
|
|
select {
|
|
|
|
case s.in <- line:
|
|
|
|
default:
|
|
|
|
log.Printf(dropwarn, line)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// parser monitors the s.in channel, if there is a line ready, it parses the
|
2015-10-07 22:11:52 +00:00
|
|
|
// statsd string into a usable metric struct and aggregates the value
|
2015-09-24 18:06:11 +00:00
|
|
|
func (s *Statsd) parser() error {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-s.done:
|
|
|
|
return nil
|
|
|
|
case line := <-s.in:
|
|
|
|
s.parseStatsdLine(line)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// parseStatsdLine will parse the given statsd line, validating it as it goes.
|
|
|
|
// If the line is valid, it will be cached for the next call to Gather()
|
2015-10-05 22:19:08 +00:00
|
|
|
func (s *Statsd) parseStatsdLine(line string) error {
|
2015-09-24 18:06:11 +00:00
|
|
|
s.Lock()
|
|
|
|
defer s.Unlock()
|
|
|
|
|
2015-11-08 10:19:00 +00:00
|
|
|
// Validate splitting the line on ":"
|
|
|
|
bits := strings.Split(line, ":")
|
|
|
|
if len(bits) < 2 {
|
|
|
|
log.Printf("Error: splitting ':', Unable to parse metric: %s\n", line)
|
2015-10-05 22:19:08 +00:00
|
|
|
return errors.New("Error Parsing statsd line")
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
|
2015-11-08 10:19:00 +00:00
|
|
|
// Extract bucket name from individual metric bits
|
|
|
|
bucketName, bits := bits[0], bits[1:]
|
2015-09-24 18:06:11 +00:00
|
|
|
|
2015-11-08 10:19:00 +00:00
|
|
|
// Add a metric for each bit available
|
|
|
|
for _, bit := range bits {
|
|
|
|
m := metric{}
|
|
|
|
|
|
|
|
m.bucket = bucketName
|
2015-09-24 18:06:11 +00:00
|
|
|
|
2015-11-08 10:19:00 +00:00
|
|
|
// Validate splitting the bit on "|"
|
|
|
|
pipesplit := strings.Split(bit, "|")
|
|
|
|
if len(pipesplit) < 2 {
|
|
|
|
log.Printf("Error: splitting '|', Unable to parse metric: %s\n", line)
|
2015-10-05 22:19:08 +00:00
|
|
|
return errors.New("Error Parsing statsd line")
|
2015-11-08 10:19:00 +00:00
|
|
|
} else if len(pipesplit) > 2 {
|
|
|
|
sr := pipesplit[2]
|
|
|
|
errmsg := "Error: parsing sample rate, %s, it must be in format like: " +
|
|
|
|
"@0.1, @0.5, etc. Ignoring sample rate for line: %s\n"
|
|
|
|
if strings.Contains(sr, "@") && len(sr) > 1 {
|
|
|
|
samplerate, err := strconv.ParseFloat(sr[1:], 64)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf(errmsg, err.Error(), line)
|
|
|
|
} else {
|
|
|
|
// sample rate successfully parsed
|
|
|
|
m.samplerate = samplerate
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
log.Printf(errmsg, "", line)
|
|
|
|
}
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
2015-10-06 19:33:35 +00:00
|
|
|
|
2015-11-08 10:19:00 +00:00
|
|
|
// Validate metric type
|
|
|
|
switch pipesplit[1] {
|
|
|
|
case "g", "c", "s", "ms", "h":
|
|
|
|
m.mtype = pipesplit[1]
|
|
|
|
default:
|
|
|
|
log.Printf("Error: Statsd Metric type %s unsupported", pipesplit[1])
|
2015-10-06 19:33:35 +00:00
|
|
|
return errors.New("Error Parsing statsd line")
|
|
|
|
}
|
2015-11-08 10:19:00 +00:00
|
|
|
|
|
|
|
// Parse the value
|
|
|
|
if strings.ContainsAny(pipesplit[0], "-+") {
|
|
|
|
if m.mtype != "g" {
|
|
|
|
log.Printf("Error: +- values are only supported for gauges: %s\n", line)
|
|
|
|
return errors.New("Error Parsing statsd line")
|
|
|
|
}
|
|
|
|
m.additive = true
|
2015-10-06 19:33:35 +00:00
|
|
|
}
|
2015-11-08 10:19:00 +00:00
|
|
|
|
|
|
|
switch m.mtype {
|
|
|
|
case "g", "ms", "h":
|
|
|
|
v, err := strconv.ParseFloat(pipesplit[0], 64)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("Error: parsing value to float64: %s\n", line)
|
|
|
|
return errors.New("Error Parsing statsd line")
|
|
|
|
}
|
|
|
|
m.floatvalue = v
|
|
|
|
case "c", "s":
|
|
|
|
v, err := strconv.ParseInt(pipesplit[0], 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("Error: parsing value to int64: %s\n", line)
|
|
|
|
return errors.New("Error Parsing statsd line")
|
|
|
|
}
|
|
|
|
// If a sample rate is given with a counter, divide value by the rate
|
|
|
|
if m.samplerate != 0 && m.mtype == "c" {
|
|
|
|
v = int64(float64(v) / m.samplerate)
|
|
|
|
}
|
|
|
|
m.intvalue = v
|
2015-10-06 19:33:35 +00:00
|
|
|
}
|
2015-09-24 18:06:11 +00:00
|
|
|
|
2015-11-08 10:19:00 +00:00
|
|
|
// Parse the name & tags from bucket
|
|
|
|
m.name, m.tags = s.parseName(m.bucket)
|
|
|
|
switch m.mtype {
|
|
|
|
case "c":
|
|
|
|
m.tags["metric_type"] = "counter"
|
|
|
|
case "g":
|
|
|
|
m.tags["metric_type"] = "gauge"
|
|
|
|
case "s":
|
|
|
|
m.tags["metric_type"] = "set"
|
|
|
|
case "ms":
|
|
|
|
m.tags["metric_type"] = "timing"
|
|
|
|
case "h":
|
|
|
|
m.tags["metric_type"] = "histogram"
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make a unique key for the measurement name/tags
|
|
|
|
var tg []string
|
|
|
|
for k, v := range m.tags {
|
|
|
|
tg = append(tg, fmt.Sprintf("%s=%s", k, v))
|
|
|
|
}
|
|
|
|
sort.Strings(tg)
|
|
|
|
m.hash = fmt.Sprintf("%s%s", strings.Join(tg, ""), m.name)
|
2015-09-24 18:06:11 +00:00
|
|
|
|
2015-11-08 10:19:00 +00:00
|
|
|
s.aggregate(m)
|
2015-10-06 21:38:16 +00:00
|
|
|
}
|
|
|
|
|
2015-10-05 22:19:08 +00:00
|
|
|
return nil
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// parseName parses the given bucket name with the list of bucket maps in the
|
|
|
|
// config file. If there is a match, it will parse the name of the metric and
|
|
|
|
// map of tags.
|
|
|
|
// Return values are (<name>, <tags>)
|
2015-10-07 22:11:52 +00:00
|
|
|
func (s *Statsd) parseName(bucket string) (string, map[string]string) {
|
2015-10-06 19:33:35 +00:00
|
|
|
tags := make(map[string]string)
|
2015-09-24 18:06:11 +00:00
|
|
|
|
2015-10-07 22:11:52 +00:00
|
|
|
bucketparts := strings.Split(bucket, ",")
|
|
|
|
// Parse out any tags in the bucket
|
|
|
|
if len(bucketparts) > 1 {
|
|
|
|
for _, btag := range bucketparts[1:] {
|
|
|
|
k, v := parseKeyValue(btag)
|
|
|
|
if k != "" {
|
|
|
|
tags[k] = v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-06 21:38:16 +00:00
|
|
|
o := graphite.Options{
|
2015-10-07 22:11:52 +00:00
|
|
|
Separator: "_",
|
|
|
|
Templates: s.Templates,
|
|
|
|
DefaultTags: tags,
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
|
2015-10-07 22:11:52 +00:00
|
|
|
name := bucketparts[0]
|
2015-10-06 21:38:16 +00:00
|
|
|
p, err := graphite.NewParserWithOptions(o)
|
|
|
|
if err == nil {
|
2015-10-16 22:43:31 +00:00
|
|
|
name, tags, _, _ = p.ApplyTemplate(name)
|
2015-10-06 21:38:16 +00:00
|
|
|
}
|
|
|
|
name = strings.Replace(name, ".", "_", -1)
|
|
|
|
name = strings.Replace(name, "-", "__", -1)
|
|
|
|
|
2015-10-07 22:11:52 +00:00
|
|
|
return name, tags
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse the key,value out of a string that looks like "key=value"
|
|
|
|
func parseKeyValue(keyvalue string) (string, string) {
|
|
|
|
var key, val string
|
|
|
|
|
|
|
|
split := strings.Split(keyvalue, "=")
|
|
|
|
// Must be exactly 2 to get anything meaningful out of them
|
|
|
|
if len(split) == 2 {
|
|
|
|
key = split[0]
|
|
|
|
val = split[1]
|
|
|
|
} else if len(split) == 1 {
|
|
|
|
val = split[0]
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
|
2015-10-07 22:11:52 +00:00
|
|
|
return key, val
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
|
2015-10-07 22:11:52 +00:00
|
|
|
// aggregate takes in a metric. It then
|
|
|
|
// aggregates and caches the current value(s). It does not deal with the
|
|
|
|
// Delete* options, because those are dealt with in the Gather function.
|
2015-09-24 18:06:11 +00:00
|
|
|
func (s *Statsd) aggregate(m metric) {
|
|
|
|
switch m.mtype {
|
2015-10-07 22:11:52 +00:00
|
|
|
case "ms", "h":
|
|
|
|
cached, ok := s.timings[m.hash]
|
|
|
|
if !ok {
|
|
|
|
cached = cachedtimings{
|
|
|
|
name: m.name,
|
|
|
|
tags: m.tags,
|
|
|
|
stats: RunningStats{
|
|
|
|
PercLimit: s.PercentileLimit,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if m.samplerate > 0 {
|
|
|
|
for i := 0; i < int(1.0/m.samplerate); i++ {
|
|
|
|
cached.stats.AddValue(m.floatvalue)
|
|
|
|
}
|
|
|
|
s.timings[m.hash] = cached
|
|
|
|
} else {
|
|
|
|
cached.stats.AddValue(m.floatvalue)
|
|
|
|
s.timings[m.hash] = cached
|
|
|
|
}
|
2015-09-24 18:06:11 +00:00
|
|
|
case "c":
|
2015-10-06 21:38:16 +00:00
|
|
|
cached, ok := s.counters[m.hash]
|
2015-09-24 18:06:11 +00:00
|
|
|
if !ok {
|
2015-10-06 21:38:16 +00:00
|
|
|
s.counters[m.hash] = cachedcounter{
|
|
|
|
name: m.name,
|
2015-10-06 19:33:35 +00:00
|
|
|
value: m.intvalue,
|
2015-09-24 18:06:11 +00:00
|
|
|
tags: m.tags,
|
|
|
|
}
|
|
|
|
} else {
|
2015-10-06 19:33:35 +00:00
|
|
|
cached.value += m.intvalue
|
2015-10-06 21:38:16 +00:00
|
|
|
s.counters[m.hash] = cached
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
case "g":
|
2015-10-06 21:38:16 +00:00
|
|
|
cached, ok := s.gauges[m.hash]
|
2015-09-24 18:06:11 +00:00
|
|
|
if !ok {
|
2015-10-06 21:38:16 +00:00
|
|
|
s.gauges[m.hash] = cachedgauge{
|
|
|
|
name: m.name,
|
2015-10-06 19:33:35 +00:00
|
|
|
value: m.floatvalue,
|
2015-09-24 18:06:11 +00:00
|
|
|
tags: m.tags,
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if m.additive {
|
2015-10-06 19:33:35 +00:00
|
|
|
cached.value = cached.value + m.floatvalue
|
2015-09-24 18:06:11 +00:00
|
|
|
} else {
|
2015-10-06 19:33:35 +00:00
|
|
|
cached.value = m.floatvalue
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
2015-10-06 21:38:16 +00:00
|
|
|
s.gauges[m.hash] = cached
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
case "s":
|
2015-10-06 21:38:16 +00:00
|
|
|
cached, ok := s.sets[m.hash]
|
2015-09-24 18:06:11 +00:00
|
|
|
if !ok {
|
|
|
|
// Completely new metric (initialize with count of 1)
|
2015-10-06 21:38:16 +00:00
|
|
|
s.sets[m.hash] = cachedset{
|
|
|
|
name: m.name,
|
2015-10-06 19:33:35 +00:00
|
|
|
tags: m.tags,
|
|
|
|
set: map[int64]bool{m.intvalue: true},
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
} else {
|
2015-10-06 19:33:35 +00:00
|
|
|
cached.set[m.intvalue] = true
|
2015-10-06 21:38:16 +00:00
|
|
|
s.sets[m.hash] = cached
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Statsd) Stop() {
|
|
|
|
s.Lock()
|
|
|
|
defer s.Unlock()
|
|
|
|
log.Println("Stopping the statsd service")
|
|
|
|
close(s.done)
|
|
|
|
close(s.in)
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
2016-01-07 20:39:43 +00:00
|
|
|
inputs.Add("statsd", func() inputs.Input {
|
2015-09-24 18:06:11 +00:00
|
|
|
return &Statsd{}
|
|
|
|
})
|
|
|
|
}
|