Add prometheus serializer and use it in prometheus output (#6703)

This commit is contained in:
Daniel Nelson 2019-11-26 15:46:31 -08:00 committed by GitHub
parent 8f71bbaa48
commit 80c5edd48e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 2516 additions and 1144 deletions

1
Gopkg.lock generated
View File

@ -1750,6 +1750,7 @@
"github.com/go-sql-driver/mysql",
"github.com/gobwas/glob",
"github.com/gofrs/uuid",
"github.com/gogo/protobuf/proto",
"github.com/golang/protobuf/proto",
"github.com/golang/protobuf/ptypes/duration",
"github.com/golang/protobuf/ptypes/empty",

View File

@ -5,10 +5,11 @@ standard data formats that may be selected from when configuring many output
plugins.
1. [InfluxDB Line Protocol](/plugins/serializers/influx)
1. [JSON](/plugins/serializers/json)
1. [Graphite](/plugins/serializers/graphite)
1. [SplunkMetric](/plugins/serializers/splunkmetric)
1. [Carbon2](/plugins/serializers/carbon2)
1. [Graphite](/plugins/serializers/graphite)
1. [JSON](/plugins/serializers/json)
1. [Prometheus](/plugins/serializers/prometheus)
1. [SplunkMetric](/plugins/serializers/splunkmetric)
1. [Wavefront](/plugins/serializers/wavefront)
You will be able to identify the plugins with support by the presence of a

View File

@ -2,6 +2,7 @@ package internal
import (
"crypto/subtle"
"net"
"net/http"
)
@ -43,3 +44,49 @@ func (h *basicAuthHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request)
h.next.ServeHTTP(rw, req)
}
// IPRangeHandler returns a http handler that requires the remote address to be
// in the specified network.
func IPRangeHandler(network []*net.IPNet, onError ErrorFunc) func(h http.Handler) http.Handler {
return func(h http.Handler) http.Handler {
return &ipRangeHandler{
network: network,
onError: onError,
next: h,
}
}
}
type ipRangeHandler struct {
network []*net.IPNet
onError ErrorFunc
next http.Handler
}
func (h *ipRangeHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
if len(h.network) == 0 {
h.next.ServeHTTP(rw, req)
return
}
remoteIPString, _, err := net.SplitHostPort(req.RemoteAddr)
if err != nil {
h.onError(rw, http.StatusForbidden)
return
}
remoteIP := net.ParseIP(remoteIPString)
if remoteIP == nil {
h.onError(rw, http.StatusForbidden)
return
}
for _, net := range h.network {
if net.Contains(remoteIP) {
h.next.ServeHTTP(rw, req)
return
}
}
h.onError(rw, http.StatusForbidden)
}

View File

@ -33,8 +33,8 @@ const sampleConfig = `
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
## Name a tag containing the name of the file the data was parsed from. Leave empty
## Name a tag containing the name of the file the data was parsed from. Leave empty
## to disable.
# file_tag = ""
`

View File

@ -11,8 +11,14 @@ in Prometheus format.
## An array of urls to scrape metrics from.
urls = ["http://localhost:9100/metrics"]
## Metric version (optional, default=1, supported values are 1 and 2)
# metric_version = 2
## Metric version controls the mapping from Prometheus metrics into
## Telegraf metrics. When using the prometheus_client output, use the same
## value in both plugins to ensure metrics are round-tripped without
## modification.
##
## example: metric_version = 1; deprecated in 1.13
## metric_version = 2; recommended version
# metric_version = 1
## An array of Kubernetes services to scrape metrics from.
# kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]

View File

@ -62,8 +62,14 @@ var sampleConfig = `
## An array of urls to scrape metrics from.
urls = ["http://localhost:9100/metrics"]
## Metric version (optional, default=1, supported values are 1 and 2)
# metric_version = 2
## Metric version controls the mapping from Prometheus metrics into
## Telegraf metrics. When using the prometheus_client output, use the same
## value in both plugins to ensure metrics are round-tripped without
## modification.
##
## example: metric_version = 1; deprecated in 1.13
## metric_version = 2; recommended version
# metric_version = 1
## Url tag name (tag containing scrapped url. optional, default is "url")
# url_tag = "scrapeUrl"
@ -95,7 +101,7 @@ var sampleConfig = `
# username = ""
# password = ""
## Specify timeout duration for slower prometheus clients (default is 3s)
## Specify timeout duration for slower prometheus clients (default is 3s)
# response_timeout = "3s"
## Optional TLS Config
@ -114,6 +120,13 @@ func (p *Prometheus) Description() string {
return "Read metrics from one or many prometheus clients"
}
func (p *Prometheus) Init() error {
if p.MetricVersion != 2 {
p.Log.Warnf("Use of deprecated configuration: 'metric_version = 1'; please update to 'metric_version = 2'")
}
return nil
}
var ErrProtocolError = errors.New("prometheus protocol error")
func (p *Prometheus) AddressToURL(u *url.URL, address string) *url.URL {
@ -311,7 +324,9 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error
tags := metric.Tags()
// strip user and password from URL
u.OriginalURL.User = nil
tags[p.URLTag] = u.OriginalURL.String()
if p.URLTag != "" {
tags[p.URLTag] = u.OriginalURL.String()
}
if u.Address != "" {
tags["address"] = u.Address
}

View File

@ -9,6 +9,11 @@ This plugin writes telegraf metrics to files
## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"]
## Use batch serialization format instead of line based delimiting. The
## batch format allows for the production of non line based output formats and
## may more effiently encode and write metrics.
# use_batch_format = false
## The file will be rotated after the time interval specified. When set
## to 0 no time based rotation is performed.
# rotation_interval = "0h"

View File

@ -3,7 +3,6 @@ package file
import (
"fmt"
"io"
"log"
"os"
"github.com/influxdata/telegraf"
@ -18,6 +17,8 @@ type File struct {
RotationInterval internal.Duration `toml:"rotation_interval"`
RotationMaxSize internal.Size `toml:"rotation_max_size"`
RotationMaxArchives int `toml:"rotation_max_archives"`
UseBatchFormat bool `toml:"use_batch_format"`
Log telegraf.Logger `toml:"-"`
writer io.Writer
closers []io.Closer
@ -28,6 +29,11 @@ var sampleConfig = `
## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"]
## Use batch serialization format instead of line based delimiting. The
## batch format allows for the production of non line based output formats and
## may more effiently encode metric groups.
# use_batch_format = false
## The file will be rotated after the time interval specified. When set
## to 0 no time based rotation is performed.
# rotation_interval = "0d"
@ -98,15 +104,27 @@ func (f *File) Description() string {
func (f *File) Write(metrics []telegraf.Metric) error {
var writeErr error = nil
for _, metric := range metrics {
b, err := f.serializer.Serialize(metric)
if f.UseBatchFormat {
octets, err := f.serializer.SerializeBatch(metrics)
if err != nil {
log.Printf("D! [outputs.file] Could not serialize metric: %v", err)
f.Log.Errorf("Could not serialize metric: %v", err)
}
_, err = f.writer.Write(b)
_, err = f.writer.Write(octets)
if err != nil {
writeErr = fmt.Errorf("E! [outputs.file] failed to write message: %v", err)
f.Log.Errorf("Error writing to file: %v", err)
}
} else {
for _, metric := range metrics {
b, err := f.serializer.Serialize(metric)
if err != nil {
f.Log.Debugf("Could not serialize metric: %v", err)
}
_, err = f.writer.Write(b)
if err != nil {
writeErr = fmt.Errorf("E! [outputs.file] failed to write message: %v", err)
}
}
}

View File

@ -1,6 +1,7 @@
# Prometheus Client Service Output Plugin
# Prometheus Output Plugin
This plugin starts a [Prometheus](https://prometheus.io/) Client, it exposes all metrics on `/metrics` (default) to be polled by a Prometheus server.
This plugin starts a [Prometheus](https://prometheus.io/) Client, it exposes
all metrics on `/metrics` (default) to be polled by a Prometheus server.
## Configuration
@ -10,6 +11,14 @@ This plugin starts a [Prometheus](https://prometheus.io/) Client, it exposes all
## Address to listen on.
listen = ":9273"
## Metric version controls the mapping from Telegraf metrics into
## Prometheus format. When using the prometheus input, use the same value in
## both plugins to ensure metrics are round-tripped without modification.
##
## example: metric_version = 1; deprecated in 1.13
## metric_version = 2; recommended version
# metric_version = 1
## Use HTTP Basic Authentication.
# basic_username = "Foo"
# basic_password = "Bar"

View File

@ -1,18 +1,12 @@
package prometheus_client
package prometheus
import (
"context"
"crypto/subtle"
"crypto/tls"
"fmt"
"log"
"net"
"net/http"
"net/url"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"time"
@ -20,73 +14,30 @@ import (
"github.com/influxdata/telegraf/internal"
tlsint "github.com/influxdata/telegraf/internal/tls"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/outputs/prometheus_client/v1"
"github.com/influxdata/telegraf/plugins/outputs/prometheus_client/v2"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
var (
invalidNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9_:]`)
validNameCharRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*`)
defaultListen = ":9273"
defaultPath = "/metrics"
defaultExpirationInterval = internal.Duration{Duration: 60 * time.Second}
)
// SampleID uniquely identifies a Sample
type SampleID string
// Sample represents the current value of a series.
type Sample struct {
// Labels are the Prometheus labels.
Labels map[string]string
// Value is the value in the Prometheus output. Only one of these will populated.
Value float64
HistogramValue map[float64]uint64
SummaryValue map[float64]float64
// Histograms and Summaries need a count and a sum
Count uint64
Sum float64
// Metric timestamp
Timestamp time.Time
// Expiration is the deadline that this Sample is valid until.
Expiration time.Time
}
// MetricFamily contains the data required to build valid prometheus Metrics.
type MetricFamily struct {
// Samples are the Sample belonging to this MetricFamily.
Samples map[SampleID]*Sample
// Need the telegraf ValueType because there isn't a Prometheus ValueType
// representing Histogram or Summary
TelegrafValueType telegraf.ValueType
// LabelSet is the label counts for all Samples.
LabelSet map[string]int
}
type PrometheusClient struct {
Listen string
BasicUsername string `toml:"basic_username"`
BasicPassword string `toml:"basic_password"`
IPRange []string `toml:"ip_range"`
ExpirationInterval internal.Duration `toml:"expiration_interval"`
Path string `toml:"path"`
CollectorsExclude []string `toml:"collectors_exclude"`
StringAsLabel bool `toml:"string_as_label"`
ExportTimestamp bool `toml:"export_timestamp"`
tlsint.ServerConfig
server *http.Server
url string
sync.Mutex
// fam is the non-expired MetricFamily by Prometheus metric name.
fam map[string]*MetricFamily
// now returns the current time.
now func() time.Time
}
var sampleConfig = `
## Address to listen on
listen = ":9273"
## Metric version controls the mapping from Telegraf metrics into
## Prometheus format. When using the prometheus input, use the same value in
## both plugins to ensure metrics are round-tripped without modification.
##
## example: metric_version = 1; deprecated in 1.13
## metric_version = 2; recommended version
# metric_version = 1
## Use HTTP Basic Authentication.
# basic_username = "Foo"
# basic_password = "Bar"
@ -121,46 +72,42 @@ var sampleConfig = `
# export_timestamp = false
`
func (p *PrometheusClient) auth(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if p.BasicUsername != "" && p.BasicPassword != "" {
w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`)
username, password, ok := r.BasicAuth()
if !ok ||
subtle.ConstantTimeCompare([]byte(username), []byte(p.BasicUsername)) != 1 ||
subtle.ConstantTimeCompare([]byte(password), []byte(p.BasicPassword)) != 1 {
http.Error(w, "Not authorized", 401)
return
}
}
if len(p.IPRange) > 0 {
matched := false
remoteIPs, _, _ := net.SplitHostPort(r.RemoteAddr)
remoteIP := net.ParseIP(remoteIPs)
for _, iprange := range p.IPRange {
_, ipNet, err := net.ParseCIDR(iprange)
if err != nil {
http.Error(w, "Config Error in ip_range setting", 500)
return
}
if ipNet.Contains(remoteIP) {
matched = true
break
}
}
if !matched {
http.Error(w, "Not authorized", 401)
return
}
}
h.ServeHTTP(w, r)
})
type Collector interface {
Describe(ch chan<- *prometheus.Desc)
Collect(ch chan<- prometheus.Metric)
Add(metrics []telegraf.Metric) error
}
func (p *PrometheusClient) Connect() error {
type PrometheusClient struct {
Listen string `toml:"listen"`
MetricVersion int `toml:"metric_version"`
BasicUsername string `toml:"basic_username"`
BasicPassword string `toml:"basic_password"`
IPRange []string `toml:"ip_range"`
ExpirationInterval internal.Duration `toml:"expiration_interval"`
Path string `toml:"path"`
CollectorsExclude []string `toml:"collectors_exclude"`
StringAsLabel bool `toml:"string_as_label"`
ExportTimestamp bool `toml:"export_timestamp"`
tlsint.ServerConfig
Log telegraf.Logger `toml:"-"`
server *http.Server
url *url.URL
collector Collector
wg sync.WaitGroup
}
func (p *PrometheusClient) Description() string {
return "Configuration for the Prometheus client to spawn"
}
func (p *PrometheusClient) SampleConfig() string {
return sampleConfig
}
func (p *PrometheusClient) Init() error {
defaultCollectors := map[string]bool{
"gocollector": true,
"process": true,
@ -181,421 +128,137 @@ func (p *PrometheusClient) Connect() error {
}
}
err := registry.Register(p)
if err != nil {
return err
switch p.MetricVersion {
default:
fallthrough
case 1:
p.Log.Warnf("Use of deprecated configuration: metric_version = 1; please update to metric_version = 2")
p.collector = v1.NewCollector(p.ExpirationInterval.Duration, p.StringAsLabel, p.Log)
err := registry.Register(p.collector)
if err != nil {
return err
}
case 2:
p.collector = v2.NewCollector(p.ExpirationInterval.Duration, p.StringAsLabel)
err := registry.Register(p.collector)
if err != nil {
return err
}
}
if p.Listen == "" {
p.Listen = "localhost:9273"
ipRange := make([]*net.IPNet, 0, len(p.IPRange))
for _, cidr := range p.IPRange {
_, ipNet, err := net.ParseCIDR(cidr)
if err != nil {
return fmt.Errorf("error parsing ip_range: %v", err)
}
ipRange = append(ipRange, ipNet)
}
if p.Path == "" {
p.Path = "/metrics"
}
authHandler := internal.AuthHandler(p.BasicUsername, p.BasicPassword, onAuthError)
rangeHandler := internal.IPRangeHandler(ipRange, onError)
promHandler := promhttp.HandlerFor(registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError})
mux := http.NewServeMux()
mux.Handle(p.Path, p.auth(promhttp.HandlerFor(
registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError})))
if p.Path == "" {
p.Path = "/"
}
mux.Handle(p.Path, authHandler(rangeHandler(promHandler)))
tlsConfig, err := p.TLSConfig()
if err != nil {
return err
}
p.server = &http.Server{
Addr: p.Listen,
Handler: mux,
TLSConfig: tlsConfig,
}
var listener net.Listener
if tlsConfig != nil {
listener, err = tls.Listen("tcp", p.Listen, tlsConfig)
return nil
}
func (p *PrometheusClient) listen() (net.Listener, error) {
if p.server.TLSConfig != nil {
return tls.Listen("tcp", p.Listen, p.server.TLSConfig)
} else {
listener, err = net.Listen("tcp", p.Listen)
return net.Listen("tcp", p.Listen)
}
}
func (p *PrometheusClient) Connect() error {
listener, err := p.listen()
if err != nil {
return err
}
p.url = createURL(tlsConfig, listener, p.Path)
scheme := "http"
if p.server.TLSConfig != nil {
scheme = "https"
}
p.url = &url.URL{
Scheme: scheme,
Host: listener.Addr().String(),
Path: p.Path,
}
p.Log.Infof("Listening on %s", p.URL())
p.wg.Add(1)
go func() {
defer p.wg.Done()
err := p.server.Serve(listener)
if err != nil && err != http.ErrServerClosed {
log.Printf("E! Error creating prometheus metric endpoint, err: %s\n",
err.Error())
p.Log.Errorf("Server error: %v", err)
}
}()
return nil
}
func onAuthError(rw http.ResponseWriter, code int) {
rw.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`)
http.Error(rw, http.StatusText(code), code)
}
func onError(rw http.ResponseWriter, code int) {
http.Error(rw, http.StatusText(code), code)
}
// Address returns the address the plugin is listening on. If not listening
// an empty string is returned.
func (p *PrometheusClient) URL() string {
return p.url
}
func createURL(tlsConfig *tls.Config, listener net.Listener, path string) string {
u := url.URL{
Scheme: "http",
Host: listener.Addr().String(),
Path: path,
if p.url != nil {
return p.url.String()
}
if tlsConfig != nil {
u.Scheme = "https"
}
return u.String()
return ""
}
func (p *PrometheusClient) Close() error {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
err := p.server.Shutdown(ctx)
prometheus.Unregister(p)
p.url = ""
p.wg.Wait()
p.url = nil
prometheus.Unregister(p.collector)
return err
}
func (p *PrometheusClient) SampleConfig() string {
return sampleConfig
}
func (p *PrometheusClient) Description() string {
return "Configuration for the Prometheus client to spawn"
}
// Implements prometheus.Collector
func (p *PrometheusClient) Describe(ch chan<- *prometheus.Desc) {
prometheus.NewGauge(prometheus.GaugeOpts{Name: "Dummy", Help: "Dummy"}).Describe(ch)
}
// Expire removes Samples that have expired.
func (p *PrometheusClient) Expire() {
now := p.now()
for name, family := range p.fam {
for key, sample := range family.Samples {
if p.ExpirationInterval.Duration != 0 && now.After(sample.Expiration) {
for k := range sample.Labels {
family.LabelSet[k]--
}
delete(family.Samples, key)
if len(family.Samples) == 0 {
delete(p.fam, name)
}
}
}
}
}
// Collect implements prometheus.Collector
func (p *PrometheusClient) Collect(ch chan<- prometheus.Metric) {
p.Lock()
defer p.Unlock()
p.Expire()
for name, family := range p.fam {
// Get list of all labels on MetricFamily
var labelNames []string
for k, v := range family.LabelSet {
if v > 0 {
labelNames = append(labelNames, k)
}
}
desc := prometheus.NewDesc(name, "Telegraf collected metric", labelNames, nil)
for _, sample := range family.Samples {
// Get labels for this sample; unset labels will be set to the
// empty string
var labels []string
for _, label := range labelNames {
v := sample.Labels[label]
labels = append(labels, v)
}
var metric prometheus.Metric
var err error
switch family.TelegrafValueType {
case telegraf.Summary:
metric, err = prometheus.NewConstSummary(desc, sample.Count, sample.Sum, sample.SummaryValue, labels...)
case telegraf.Histogram:
metric, err = prometheus.NewConstHistogram(desc, sample.Count, sample.Sum, sample.HistogramValue, labels...)
default:
metric, err = prometheus.NewConstMetric(desc, getPromValueType(family.TelegrafValueType), sample.Value, labels...)
}
if err != nil {
log.Printf("E! Error creating prometheus metric, "+
"key: %s, labels: %v,\nerr: %s\n",
name, labels, err.Error())
continue
}
if p.ExportTimestamp {
metric = prometheus.NewMetricWithTimestamp(sample.Timestamp, metric)
}
ch <- metric
}
}
}
func sanitize(value string) string {
return invalidNameCharRE.ReplaceAllString(value, "_")
}
func isValidTagName(tag string) bool {
return validNameCharRE.MatchString(tag)
}
func getPromValueType(tt telegraf.ValueType) prometheus.ValueType {
switch tt {
case telegraf.Counter:
return prometheus.CounterValue
case telegraf.Gauge:
return prometheus.GaugeValue
default:
return prometheus.UntypedValue
}
}
// CreateSampleID creates a SampleID based on the tags of a telegraf.Metric.
func CreateSampleID(tags map[string]string) SampleID {
pairs := make([]string, 0, len(tags))
for k, v := range tags {
pairs = append(pairs, fmt.Sprintf("%s=%s", k, v))
}
sort.Strings(pairs)
return SampleID(strings.Join(pairs, ","))
}
func addSample(fam *MetricFamily, sample *Sample, sampleID SampleID) {
for k := range sample.Labels {
fam.LabelSet[k]++
}
fam.Samples[sampleID] = sample
}
func (p *PrometheusClient) addMetricFamily(point telegraf.Metric, sample *Sample, mname string, sampleID SampleID) {
var fam *MetricFamily
var ok bool
if fam, ok = p.fam[mname]; !ok {
fam = &MetricFamily{
Samples: make(map[SampleID]*Sample),
TelegrafValueType: point.Type(),
LabelSet: make(map[string]int),
}
p.fam[mname] = fam
}
addSample(fam, sample, sampleID)
}
// Sorted returns a copy of the metrics in time ascending order. A copy is
// made to avoid modifying the input metric slice since doing so is not
// allowed.
func sorted(metrics []telegraf.Metric) []telegraf.Metric {
batch := make([]telegraf.Metric, 0, len(metrics))
for i := len(metrics) - 1; i >= 0; i-- {
batch = append(batch, metrics[i])
}
sort.Slice(batch, func(i, j int) bool {
return batch[i].Time().Before(batch[j].Time())
})
return batch
}
func (p *PrometheusClient) Write(metrics []telegraf.Metric) error {
p.Lock()
defer p.Unlock()
now := p.now()
for _, point := range sorted(metrics) {
tags := point.Tags()
sampleID := CreateSampleID(tags)
labels := make(map[string]string)
for k, v := range tags {
tName := sanitize(k)
if !isValidTagName(tName) {
continue
}
labels[tName] = v
}
// Prometheus doesn't have a string value type, so convert string
// fields to labels if enabled.
if p.StringAsLabel {
for fn, fv := range point.Fields() {
switch fv := fv.(type) {
case string:
tName := sanitize(fn)
if !isValidTagName(tName) {
continue
}
labels[tName] = fv
}
}
}
switch point.Type() {
case telegraf.Summary:
var mname string
var sum float64
var count uint64
summaryvalue := make(map[float64]float64)
for fn, fv := range point.Fields() {
var value float64
switch fv := fv.(type) {
case int64:
value = float64(fv)
case uint64:
value = float64(fv)
case float64:
value = fv
default:
continue
}
switch fn {
case "sum":
sum = value
case "count":
count = uint64(value)
default:
limit, err := strconv.ParseFloat(fn, 64)
if err == nil {
summaryvalue[limit] = value
}
}
}
sample := &Sample{
Labels: labels,
SummaryValue: summaryvalue,
Count: count,
Sum: sum,
Timestamp: point.Time(),
Expiration: now.Add(p.ExpirationInterval.Duration),
}
mname = sanitize(point.Name())
if !isValidTagName(mname) {
continue
}
p.addMetricFamily(point, sample, mname, sampleID)
case telegraf.Histogram:
var mname string
var sum float64
var count uint64
histogramvalue := make(map[float64]uint64)
for fn, fv := range point.Fields() {
var value float64
switch fv := fv.(type) {
case int64:
value = float64(fv)
case uint64:
value = float64(fv)
case float64:
value = fv
default:
continue
}
switch fn {
case "sum":
sum = value
case "count":
count = uint64(value)
default:
limit, err := strconv.ParseFloat(fn, 64)
if err == nil {
histogramvalue[limit] = uint64(value)
}
}
}
sample := &Sample{
Labels: labels,
HistogramValue: histogramvalue,
Count: count,
Sum: sum,
Timestamp: point.Time(),
Expiration: now.Add(p.ExpirationInterval.Duration),
}
mname = sanitize(point.Name())
if !isValidTagName(mname) {
continue
}
p.addMetricFamily(point, sample, mname, sampleID)
default:
for fn, fv := range point.Fields() {
// Ignore string and bool fields.
var value float64
switch fv := fv.(type) {
case int64:
value = float64(fv)
case uint64:
value = float64(fv)
case float64:
value = fv
default:
continue
}
sample := &Sample{
Labels: labels,
Value: value,
Timestamp: point.Time(),
Expiration: now.Add(p.ExpirationInterval.Duration),
}
// Special handling of value field; supports passthrough from
// the prometheus input.
var mname string
switch point.Type() {
case telegraf.Counter:
if fn == "counter" {
mname = sanitize(point.Name())
}
case telegraf.Gauge:
if fn == "gauge" {
mname = sanitize(point.Name())
}
}
if mname == "" {
if fn == "value" {
mname = sanitize(point.Name())
} else {
mname = sanitize(fmt.Sprintf("%s_%s", point.Name(), fn))
}
}
if !isValidTagName(mname) {
continue
}
p.addMetricFamily(point, sample, mname, sampleID)
}
}
}
return nil
return p.collector.Add(metrics)
}
func init() {
outputs.Add("prometheus_client", func() telegraf.Output {
return &PrometheusClient{
ExpirationInterval: internal.Duration{Duration: time.Second * 60},
Listen: defaultListen,
Path: defaultPath,
ExpirationInterval: defaultExpirationInterval,
StringAsLabel: true,
fam: make(map[string]*MetricFamily),
now: time.Now,
}
})
}

View File

@ -1,693 +1,304 @@
package prometheus_client
package prometheus
import (
"io/ioutil"
"net/http"
"strings"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/metric"
prometheus_input "github.com/influxdata/telegraf/plugins/inputs/prometheus"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
func setUnixTime(client *PrometheusClient, sec int64) {
client.now = func() time.Time {
return time.Unix(sec, 0)
}
}
// NewClient initializes a PrometheusClient.
func NewClient() *PrometheusClient {
return &PrometheusClient{
ExpirationInterval: internal.Duration{Duration: time.Second * 60},
StringAsLabel: true,
fam: make(map[string]*MetricFamily),
now: time.Now,
}
}
func TestWrite_Basic(t *testing.T) {
now := time.Now()
pt1, err := metric.New(
"foo",
make(map[string]string),
map[string]interface{}{"value": 0.0},
now)
var metrics = []telegraf.Metric{
pt1,
}
client := NewClient()
err = client.Write(metrics)
require.NoError(t, err)
fam, ok := client.fam["foo"]
require.True(t, ok)
require.Equal(t, telegraf.Untyped, fam.TelegrafValueType)
require.Equal(t, map[string]int{}, fam.LabelSet)
sample, ok := fam.Samples[CreateSampleID(pt1.Tags())]
require.True(t, ok)
require.Equal(t, 0.0, sample.Value)
require.True(t, now.Before(sample.Expiration))
}
func TestWrite_IntField(t *testing.T) {
client := NewClient()
p1, err := metric.New(
"foo",
make(map[string]string),
map[string]interface{}{"value": 42},
time.Now())
err = client.Write([]telegraf.Metric{p1})
require.NoError(t, err)
fam, ok := client.fam["foo"]
require.True(t, ok)
for _, v := range fam.Samples {
require.Equal(t, 42.0, v.Value)
}
}
func TestWrite_FieldNotValue(t *testing.T) {
client := NewClient()
p1, err := metric.New(
"foo",
make(map[string]string),
map[string]interface{}{"howdy": 0.0},
time.Now())
err = client.Write([]telegraf.Metric{p1})
require.NoError(t, err)
fam, ok := client.fam["foo_howdy"]
require.True(t, ok)
for _, v := range fam.Samples {
require.Equal(t, 0.0, v.Value)
}
}
func TestWrite_SkipNonNumberField(t *testing.T) {
client := NewClient()
p1, err := metric.New(
"foo",
make(map[string]string),
map[string]interface{}{"value": "howdy"},
time.Now())
err = client.Write([]telegraf.Metric{p1})
require.NoError(t, err)
_, ok := client.fam["foo"]
require.False(t, ok)
}
func TestWrite_Counters(t *testing.T) {
type args struct {
measurement string
tags map[string]string
fields map[string]interface{}
valueType telegraf.ValueType
}
var tests = []struct {
name string
args args
err error
metricName string
valueType telegraf.ValueType
func TestMetricVersion1(t *testing.T) {
tests := []struct {
name string
output *PrometheusClient
metrics []telegraf.Metric
expected []byte
}{
{
name: "field named value is not added to metric name",
args: args{
measurement: "foo",
fields: map[string]interface{}{"value": 42},
valueType: telegraf.Counter,
name: "simple",
output: &PrometheusClient{
Listen: ":0",
MetricVersion: 1,
CollectorsExclude: []string{"gocollector", "process"},
Path: "/metrics",
Log: testutil.Logger{},
},
metricName: "foo",
valueType: telegraf.Counter,
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"host": "example.org",
},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(0, 0),
),
},
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle untyped
cpu_time_idle{host="example.org"} 42
`),
},
{
name: "field named counter is not added to metric name",
args: args{
measurement: "foo",
fields: map[string]interface{}{"counter": 42},
valueType: telegraf.Counter,
name: "prometheus untyped",
output: &PrometheusClient{
Listen: ":0",
MetricVersion: 1,
CollectorsExclude: []string{"gocollector", "process"},
Path: "/metrics",
Log: testutil.Logger{},
},
metricName: "foo",
valueType: telegraf.Counter,
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu_time_idle",
map[string]string{
"host": "example.org",
},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
},
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle untyped
cpu_time_idle{host="example.org"} 42
`),
},
{
name: "field with any other name is added to metric name",
args: args{
measurement: "foo",
fields: map[string]interface{}{"other": 42},
valueType: telegraf.Counter,
name: "prometheus counter",
output: &PrometheusClient{
Listen: ":0",
MetricVersion: 1,
CollectorsExclude: []string{"gocollector", "process"},
Path: "/metrics",
Log: testutil.Logger{},
},
metricName: "foo_other",
valueType: telegraf.Counter,
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu_time_idle",
map[string]string{
"host": "example.org",
},
map[string]interface{}{
"counter": 42.0,
},
time.Unix(0, 0),
telegraf.Counter,
),
},
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle counter
cpu_time_idle{host="example.org"} 42
`),
},
{
name: "uint64 fields are output",
args: args{
measurement: "foo",
fields: map[string]interface{}{"value": uint64(42)},
valueType: telegraf.Counter,
name: "prometheus gauge",
output: &PrometheusClient{
Listen: ":0",
MetricVersion: 1,
CollectorsExclude: []string{"gocollector", "process"},
Path: "/metrics",
Log: testutil.Logger{},
},
metricName: "foo",
valueType: telegraf.Counter,
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu_time_idle",
map[string]string{
"host": "example.org",
},
map[string]interface{}{
"gauge": 42.0,
},
time.Unix(0, 0),
telegraf.Gauge,
),
},
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle gauge
cpu_time_idle{host="example.org"} 42
`),
},
{
name: "prometheus histogram",
output: &PrometheusClient{
Listen: ":0",
MetricVersion: 1,
CollectorsExclude: []string{"gocollector", "process"},
Path: "/metrics",
Log: testutil.Logger{},
},
metrics: []telegraf.Metric{
testutil.MustMetric(
"http_request_duration_seconds",
map[string]string{},
map[string]interface{}{
"sum": 53423,
"0.05": 24054,
"0.1": 33444,
"0.2": 100392,
"0.5": 129389,
"1": 133988,
"+Inf": 144320,
"count": 144320,
},
time.Unix(0, 0),
telegraf.Histogram,
),
},
expected: []byte(`
# HELP http_request_duration_seconds Telegraf collected metric
# TYPE http_request_duration_seconds histogram
http_request_duration_seconds_bucket{le="0.05"} 24054
http_request_duration_seconds_bucket{le="0.1"} 33444
http_request_duration_seconds_bucket{le="0.2"} 100392
http_request_duration_seconds_bucket{le="0.5"} 129389
http_request_duration_seconds_bucket{le="1"} 133988
http_request_duration_seconds_bucket{le="+Inf"} 144320
http_request_duration_seconds_sum 53423
http_request_duration_seconds_count 144320
`),
},
{
name: "prometheus summary",
output: &PrometheusClient{
Listen: ":0",
MetricVersion: 1,
CollectorsExclude: []string{"gocollector", "process"},
Path: "/metrics",
Log: testutil.Logger{},
},
metrics: []telegraf.Metric{
testutil.MustMetric(
"rpc_duration_seconds",
map[string]string{},
map[string]interface{}{
"0.01": 3102,
"0.05": 3272,
"0.5": 4773,
"0.9": 9001,
"0.99": 76656,
"count": 2693,
"sum": 17560473,
},
time.Unix(0, 0),
telegraf.Summary,
),
},
expected: []byte(`
# HELP rpc_duration_seconds Telegraf collected metric
# TYPE rpc_duration_seconds summary
rpc_duration_seconds{quantile="0.01"} 3102
rpc_duration_seconds{quantile="0.05"} 3272
rpc_duration_seconds{quantile="0.5"} 4773
rpc_duration_seconds{quantile="0.9"} 9001
rpc_duration_seconds{quantile="0.99"} 76656
rpc_duration_seconds_sum 1.7560473e+07
rpc_duration_seconds_count 2693
`),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
m, err := metric.New(
tt.args.measurement,
tt.args.tags,
tt.args.fields,
time.Now(),
tt.args.valueType,
)
client := NewClient()
err = client.Write([]telegraf.Metric{m})
require.Equal(t, tt.err, err)
err := tt.output.Init()
require.NoError(t, err)
fam, ok := client.fam[tt.metricName]
require.True(t, ok)
require.Equal(t, tt.valueType, fam.TelegrafValueType)
err = tt.output.Connect()
require.NoError(t, err)
defer func() {
err := tt.output.Close()
require.NoError(t, err)
}()
err = tt.output.Write(tt.metrics)
require.NoError(t, err)
resp, err := http.Get(tt.output.URL())
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t,
strings.TrimSpace(string(tt.expected)),
strings.TrimSpace(string(body)))
})
}
}
func TestWrite_Sanitize(t *testing.T) {
client := NewClient()
p1, err := metric.New(
"foo.bar:colon",
map[string]string{"tag-with-dash": "localhost.local"},
map[string]interface{}{"field-with-dash-and:colon": 42},
time.Now(),
telegraf.Counter)
err = client.Write([]telegraf.Metric{p1})
require.NoError(t, err)
fam, ok := client.fam["foo_bar:colon_field_with_dash_and:colon"]
require.True(t, ok)
require.Equal(t, map[string]int{"tag_with_dash": 1}, fam.LabelSet)
sample1, ok := fam.Samples[CreateSampleID(p1.Tags())]
require.True(t, ok)
require.Equal(t, map[string]string{
"tag_with_dash": "localhost.local"}, sample1.Labels)
}
func TestWrite_Gauge(t *testing.T) {
type args struct {
measurement string
tags map[string]string
fields map[string]interface{}
valueType telegraf.ValueType
}
var tests = []struct {
name string
args args
err error
metricName string
valueType telegraf.ValueType
func TestMetricVersion2(t *testing.T) {
tests := []struct {
name string
output *PrometheusClient
metrics []telegraf.Metric
expected []byte
}{
{
name: "field named value is not added to metric name",
args: args{
measurement: "foo",
fields: map[string]interface{}{"value": 42},
valueType: telegraf.Gauge,
name: "simple",
output: &PrometheusClient{
Listen: ":0",
MetricVersion: 2,
CollectorsExclude: []string{"gocollector", "process"},
Path: "/metrics",
Log: testutil.Logger{},
},
metricName: "foo",
valueType: telegraf.Gauge,
},
{
name: "field named gauge is not added to metric name",
args: args{
measurement: "foo",
fields: map[string]interface{}{"gauge": 42},
valueType: telegraf.Gauge,
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"host": "example.org",
},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(0, 0),
),
},
metricName: "foo",
valueType: telegraf.Gauge,
},
{
name: "field with any other name is added to metric name",
args: args{
measurement: "foo",
fields: map[string]interface{}{"other": 42},
valueType: telegraf.Gauge,
},
metricName: "foo_other",
valueType: telegraf.Gauge,
},
{
name: "uint64 fields are output",
args: args{
measurement: "foo",
fields: map[string]interface{}{"value": uint64(42)},
valueType: telegraf.Counter,
},
metricName: "foo",
valueType: telegraf.Counter,
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle untyped
cpu_time_idle{host="example.org"} 42
`),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
m, err := metric.New(
tt.args.measurement,
tt.args.tags,
tt.args.fields,
time.Now(),
tt.args.valueType,
)
client := NewClient()
err = client.Write([]telegraf.Metric{m})
require.Equal(t, tt.err, err)
err := tt.output.Init()
require.NoError(t, err)
fam, ok := client.fam[tt.metricName]
require.True(t, ok)
require.Equal(t, tt.valueType, fam.TelegrafValueType)
err = tt.output.Connect()
require.NoError(t, err)
defer func() {
err := tt.output.Close()
require.NoError(t, err)
}()
err = tt.output.Write(tt.metrics)
require.NoError(t, err)
resp, err := http.Get(tt.output.URL())
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t,
strings.TrimSpace(string(tt.expected)),
strings.TrimSpace(string(body)))
})
}
}
func TestWrite_Summary(t *testing.T) {
client := NewClient()
p1, err := metric.New(
"foo",
make(map[string]string),
map[string]interface{}{"sum": 84, "count": 42, "0": 2, "0.5": 3, "1": 4},
time.Now(),
telegraf.Summary)
err = client.Write([]telegraf.Metric{p1})
require.NoError(t, err)
fam, ok := client.fam["foo"]
require.True(t, ok)
require.Equal(t, 1, len(fam.Samples))
sample1, ok := fam.Samples[CreateSampleID(p1.Tags())]
require.True(t, ok)
require.Equal(t, 84.0, sample1.Sum)
require.Equal(t, uint64(42), sample1.Count)
require.Equal(t, 3, len(sample1.SummaryValue))
}
func TestWrite_Histogram(t *testing.T) {
client := NewClient()
p1, err := metric.New(
"foo",
make(map[string]string),
map[string]interface{}{"sum": 84, "count": 42, "0": 2, "0.5": 3, "1": 4},
time.Now(),
telegraf.Histogram)
err = client.Write([]telegraf.Metric{p1})
require.NoError(t, err)
fam, ok := client.fam["foo"]
require.True(t, ok)
require.Equal(t, 1, len(fam.Samples))
sample1, ok := fam.Samples[CreateSampleID(p1.Tags())]
require.True(t, ok)
require.Equal(t, 84.0, sample1.Sum)
require.Equal(t, uint64(42), sample1.Count)
require.Equal(t, 3, len(sample1.HistogramValue))
}
func TestWrite_MixedValueType(t *testing.T) {
now := time.Now()
p1, err := metric.New(
"foo",
make(map[string]string),
map[string]interface{}{"value": 1.0},
now,
telegraf.Counter)
p2, err := metric.New(
"foo",
make(map[string]string),
map[string]interface{}{"value": 2.0},
now,
telegraf.Gauge)
var metrics = []telegraf.Metric{p1, p2}
client := NewClient()
err = client.Write(metrics)
require.NoError(t, err)
fam, ok := client.fam["foo"]
require.True(t, ok)
require.Equal(t, 1, len(fam.Samples))
}
func TestWrite_MixedValueTypeUpgrade(t *testing.T) {
now := time.Now()
p1, err := metric.New(
"foo",
map[string]string{"a": "x"},
map[string]interface{}{"value": 1.0},
now,
telegraf.Untyped)
p2, err := metric.New(
"foo",
map[string]string{"a": "y"},
map[string]interface{}{"value": 2.0},
now,
telegraf.Gauge)
var metrics = []telegraf.Metric{p1, p2}
client := NewClient()
err = client.Write(metrics)
require.NoError(t, err)
fam, ok := client.fam["foo"]
require.True(t, ok)
require.Equal(t, 2, len(fam.Samples))
}
func TestWrite_MixedValueTypeDowngrade(t *testing.T) {
now := time.Now()
p1, err := metric.New(
"foo",
map[string]string{"a": "x"},
map[string]interface{}{"value": 1.0},
now,
telegraf.Gauge)
p2, err := metric.New(
"foo",
map[string]string{"a": "y"},
map[string]interface{}{"value": 2.0},
now,
telegraf.Untyped)
var metrics = []telegraf.Metric{p1, p2}
client := NewClient()
err = client.Write(metrics)
require.NoError(t, err)
fam, ok := client.fam["foo"]
require.True(t, ok)
require.Equal(t, 2, len(fam.Samples))
}
func TestWrite_Tags(t *testing.T) {
now := time.Now()
p1, err := metric.New(
"foo",
make(map[string]string),
map[string]interface{}{"value": 1.0},
now)
p2, err := metric.New(
"foo",
map[string]string{"host": "localhost"},
map[string]interface{}{"value": 2.0},
now)
var metrics = []telegraf.Metric{p1, p2}
client := NewClient()
err = client.Write(metrics)
require.NoError(t, err)
fam, ok := client.fam["foo"]
require.True(t, ok)
require.Equal(t, telegraf.Untyped, fam.TelegrafValueType)
require.Equal(t, map[string]int{"host": 1}, fam.LabelSet)
sample1, ok := fam.Samples[CreateSampleID(p1.Tags())]
require.True(t, ok)
require.Equal(t, 1.0, sample1.Value)
require.True(t, now.Before(sample1.Expiration))
sample2, ok := fam.Samples[CreateSampleID(p2.Tags())]
require.True(t, ok)
require.Equal(t, 2.0, sample2.Value)
require.True(t, now.Before(sample2.Expiration))
}
func TestWrite_StringFields(t *testing.T) {
now := time.Now()
p1, err := metric.New(
"foo",
make(map[string]string),
map[string]interface{}{"value": 1.0, "status": "good"},
now,
telegraf.Counter)
p2, err := metric.New(
"bar",
make(map[string]string),
map[string]interface{}{"status": "needs numeric field"},
now,
telegraf.Gauge)
var metrics = []telegraf.Metric{p1, p2}
client := NewClient()
err = client.Write(metrics)
require.NoError(t, err)
fam, ok := client.fam["foo"]
require.True(t, ok)
require.Equal(t, 1, fam.LabelSet["status"])
fam, ok = client.fam["bar"]
require.False(t, ok)
}
func TestDoNotWrite_StringFields(t *testing.T) {
now := time.Now()
p1, err := metric.New(
"foo",
make(map[string]string),
map[string]interface{}{"value": 1.0, "status": "good"},
now,
telegraf.Counter)
p2, err := metric.New(
"bar",
make(map[string]string),
map[string]interface{}{"status": "needs numeric field"},
now,
telegraf.Gauge)
var metrics = []telegraf.Metric{p1, p2}
client := &PrometheusClient{
ExpirationInterval: internal.Duration{Duration: time.Second * 60},
StringAsLabel: false,
fam: make(map[string]*MetricFamily),
now: time.Now,
}
err = client.Write(metrics)
require.NoError(t, err)
fam, ok := client.fam["foo"]
require.True(t, ok)
require.Equal(t, 0, fam.LabelSet["status"])
fam, ok = client.fam["bar"]
require.False(t, ok)
}
func TestExpire(t *testing.T) {
client := NewClient()
p1, err := metric.New(
"foo",
make(map[string]string),
map[string]interface{}{"value": 1.0},
time.Now())
setUnixTime(client, 0)
err = client.Write([]telegraf.Metric{p1})
require.NoError(t, err)
p2, err := metric.New(
"bar",
make(map[string]string),
map[string]interface{}{"value": 2.0},
time.Now())
setUnixTime(client, 1)
err = client.Write([]telegraf.Metric{p2})
setUnixTime(client, 61)
require.Equal(t, 2, len(client.fam))
client.Expire()
require.Equal(t, 1, len(client.fam))
}
func TestExpire_TagsNoDecrement(t *testing.T) {
client := NewClient()
p1, err := metric.New(
"foo",
make(map[string]string),
map[string]interface{}{"value": 1.0},
time.Now())
setUnixTime(client, 0)
err = client.Write([]telegraf.Metric{p1})
require.NoError(t, err)
p2, err := metric.New(
"foo",
map[string]string{"host": "localhost"},
map[string]interface{}{"value": 2.0},
time.Now())
setUnixTime(client, 1)
err = client.Write([]telegraf.Metric{p2})
setUnixTime(client, 61)
fam, ok := client.fam["foo"]
require.True(t, ok)
require.Equal(t, 2, len(fam.Samples))
client.Expire()
require.Equal(t, 1, len(fam.Samples))
require.Equal(t, map[string]int{"host": 1}, fam.LabelSet)
}
func TestExpire_TagsWithDecrement(t *testing.T) {
client := NewClient()
p1, err := metric.New(
"foo",
map[string]string{"host": "localhost"},
map[string]interface{}{"value": 1.0},
time.Now())
setUnixTime(client, 0)
err = client.Write([]telegraf.Metric{p1})
require.NoError(t, err)
p2, err := metric.New(
"foo",
make(map[string]string),
map[string]interface{}{"value": 2.0},
time.Now())
setUnixTime(client, 1)
err = client.Write([]telegraf.Metric{p2})
setUnixTime(client, 61)
fam, ok := client.fam["foo"]
require.True(t, ok)
require.Equal(t, 2, len(fam.Samples))
client.Expire()
require.Equal(t, 1, len(fam.Samples))
require.Equal(t, map[string]int{"host": 0}, fam.LabelSet)
}
var pTesting *PrometheusClient
func TestPrometheusWritePointEmptyTag(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
pClient, p, err := setupPrometheus()
require.NoError(t, err)
defer pClient.Close()
now := time.Now()
tags := make(map[string]string)
pt1, _ := metric.New(
"test_point_1",
tags,
map[string]interface{}{"value": 0.0},
now)
pt2, _ := metric.New(
"test_point_2",
tags,
map[string]interface{}{"value": 1.0},
now)
var metrics = []telegraf.Metric{
pt1,
pt2,
}
require.NoError(t, pClient.Write(metrics))
expected := []struct {
name string
value float64
tags map[string]string
}{
{"test_point_1", 0.0, tags},
{"test_point_2", 1.0, tags},
}
var acc testutil.Accumulator
require.NoError(t, p.Gather(&acc))
for _, e := range expected {
acc.AssertContainsFields(t, e.name,
map[string]interface{}{"value": e.value})
}
tags = make(map[string]string)
tags["testtag"] = "testvalue"
pt3, _ := metric.New(
"test_point_3",
tags,
map[string]interface{}{"value": 0.0},
now)
pt4, _ := metric.New(
"test_point_4",
tags,
map[string]interface{}{"value": 1.0},
now)
metrics = []telegraf.Metric{
pt3,
pt4,
}
require.NoError(t, pClient.Write(metrics))
expected2 := []struct {
name string
value float64
}{
{"test_point_3", 0.0},
{"test_point_4", 1.0},
}
require.NoError(t, p.Gather(&acc))
for _, e := range expected2 {
acc.AssertContainsFields(t, e.name,
map[string]interface{}{"value": e.value})
}
}
func setupPrometheus() (*PrometheusClient, *prometheus_input.Prometheus, error) {
if pTesting == nil {
pTesting = NewClient()
pTesting.Listen = "localhost:9127"
pTesting.Path = "/metrics"
err := pTesting.Connect()
if err != nil {
return nil, nil, err
}
} else {
pTesting.fam = make(map[string]*MetricFamily)
}
time.Sleep(time.Millisecond * 200)
p := &prometheus_input.Prometheus{
URLs: []string{"http://localhost:9127/metrics"},
}
return pTesting, p, nil
}

View File

@ -0,0 +1,391 @@
package v1
import (
"fmt"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/prometheus/client_golang/prometheus"
)
var (
invalidNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9_:]`)
validNameCharRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*`)
)
// SampleID uniquely identifies a Sample
type SampleID string
// Sample represents the current value of a series.
type Sample struct {
// Labels are the Prometheus labels.
Labels map[string]string
// Value is the value in the Prometheus output. Only one of these will populated.
Value float64
HistogramValue map[float64]uint64
SummaryValue map[float64]float64
// Histograms and Summaries need a count and a sum
Count uint64
Sum float64
// Metric timestamp
Timestamp time.Time
// Expiration is the deadline that this Sample is valid until.
Expiration time.Time
}
// MetricFamily contains the data required to build valid prometheus Metrics.
type MetricFamily struct {
// Samples are the Sample belonging to this MetricFamily.
Samples map[SampleID]*Sample
// Need the telegraf ValueType because there isn't a Prometheus ValueType
// representing Histogram or Summary
TelegrafValueType telegraf.ValueType
// LabelSet is the label counts for all Samples.
LabelSet map[string]int
}
type Collector struct {
ExpirationInterval time.Duration
StringAsLabel bool
ExportTimestamp bool
Log telegraf.Logger
sync.Mutex
fam map[string]*MetricFamily
}
func NewCollector(expire time.Duration, stringsAsLabel bool, logger telegraf.Logger) *Collector {
return &Collector{
ExpirationInterval: expire,
StringAsLabel: stringsAsLabel,
Log: logger,
fam: make(map[string]*MetricFamily),
}
}
func (c *Collector) Describe(ch chan<- *prometheus.Desc) {
prometheus.NewGauge(prometheus.GaugeOpts{Name: "Dummy", Help: "Dummy"}).Describe(ch)
}
func (c *Collector) Collect(ch chan<- prometheus.Metric) {
c.Lock()
defer c.Unlock()
c.Expire(time.Now(), c.ExpirationInterval)
for name, family := range c.fam {
// Get list of all labels on MetricFamily
var labelNames []string
for k, v := range family.LabelSet {
if v > 0 {
labelNames = append(labelNames, k)
}
}
desc := prometheus.NewDesc(name, "Telegraf collected metric", labelNames, nil)
for _, sample := range family.Samples {
// Get labels for this sample; unset labels will be set to the
// empty string
var labels []string
for _, label := range labelNames {
v := sample.Labels[label]
labels = append(labels, v)
}
var metric prometheus.Metric
var err error
switch family.TelegrafValueType {
case telegraf.Summary:
metric, err = prometheus.NewConstSummary(desc, sample.Count, sample.Sum, sample.SummaryValue, labels...)
case telegraf.Histogram:
metric, err = prometheus.NewConstHistogram(desc, sample.Count, sample.Sum, sample.HistogramValue, labels...)
default:
metric, err = prometheus.NewConstMetric(desc, getPromValueType(family.TelegrafValueType), sample.Value, labels...)
}
if err != nil {
c.Log.Errorf("Error creating prometheus metric: "+
"key: %s, labels: %v, err: %v",
name, labels, err)
continue
}
if c.ExportTimestamp {
metric = prometheus.NewMetricWithTimestamp(sample.Timestamp, metric)
}
ch <- metric
}
}
}
func sanitize(value string) string {
return invalidNameCharRE.ReplaceAllString(value, "_")
}
func isValidTagName(tag string) bool {
return validNameCharRE.MatchString(tag)
}
func getPromValueType(tt telegraf.ValueType) prometheus.ValueType {
switch tt {
case telegraf.Counter:
return prometheus.CounterValue
case telegraf.Gauge:
return prometheus.GaugeValue
default:
return prometheus.UntypedValue
}
}
// CreateSampleID creates a SampleID based on the tags of a telegraf.Metric.
func CreateSampleID(tags map[string]string) SampleID {
pairs := make([]string, 0, len(tags))
for k, v := range tags {
pairs = append(pairs, fmt.Sprintf("%s=%s", k, v))
}
sort.Strings(pairs)
return SampleID(strings.Join(pairs, ","))
}
func addSample(fam *MetricFamily, sample *Sample, sampleID SampleID) {
for k := range sample.Labels {
fam.LabelSet[k]++
}
fam.Samples[sampleID] = sample
}
func (c *Collector) addMetricFamily(point telegraf.Metric, sample *Sample, mname string, sampleID SampleID) {
var fam *MetricFamily
var ok bool
if fam, ok = c.fam[mname]; !ok {
fam = &MetricFamily{
Samples: make(map[SampleID]*Sample),
TelegrafValueType: point.Type(),
LabelSet: make(map[string]int),
}
c.fam[mname] = fam
}
addSample(fam, sample, sampleID)
}
// Sorted returns a copy of the metrics in time ascending order. A copy is
// made to avoid modifying the input metric slice since doing so is not
// allowed.
func sorted(metrics []telegraf.Metric) []telegraf.Metric {
batch := make([]telegraf.Metric, 0, len(metrics))
for i := len(metrics) - 1; i >= 0; i-- {
batch = append(batch, metrics[i])
}
sort.Slice(batch, func(i, j int) bool {
return batch[i].Time().Before(batch[j].Time())
})
return batch
}
func (c *Collector) Add(metrics []telegraf.Metric) error {
c.Lock()
defer c.Unlock()
now := time.Now()
for _, point := range sorted(metrics) {
tags := point.Tags()
sampleID := CreateSampleID(tags)
labels := make(map[string]string)
for k, v := range tags {
tName := sanitize(k)
if !isValidTagName(tName) {
continue
}
labels[tName] = v
}
// Prometheus doesn't have a string value type, so convert string
// fields to labels if enabled.
if c.StringAsLabel {
for fn, fv := range point.Fields() {
switch fv := fv.(type) {
case string:
tName := sanitize(fn)
if !isValidTagName(tName) {
continue
}
labels[tName] = fv
}
}
}
switch point.Type() {
case telegraf.Summary:
var mname string
var sum float64
var count uint64
summaryvalue := make(map[float64]float64)
for fn, fv := range point.Fields() {
var value float64
switch fv := fv.(type) {
case int64:
value = float64(fv)
case uint64:
value = float64(fv)
case float64:
value = fv
default:
continue
}
switch fn {
case "sum":
sum = value
case "count":
count = uint64(value)
default:
limit, err := strconv.ParseFloat(fn, 64)
if err == nil {
summaryvalue[limit] = value
}
}
}
sample := &Sample{
Labels: labels,
SummaryValue: summaryvalue,
Count: count,
Sum: sum,
Timestamp: point.Time(),
Expiration: now.Add(c.ExpirationInterval),
}
mname = sanitize(point.Name())
if !isValidTagName(mname) {
continue
}
c.addMetricFamily(point, sample, mname, sampleID)
case telegraf.Histogram:
var mname string
var sum float64
var count uint64
histogramvalue := make(map[float64]uint64)
for fn, fv := range point.Fields() {
var value float64
switch fv := fv.(type) {
case int64:
value = float64(fv)
case uint64:
value = float64(fv)
case float64:
value = fv
default:
continue
}
switch fn {
case "sum":
sum = value
case "count":
count = uint64(value)
default:
limit, err := strconv.ParseFloat(fn, 64)
if err == nil {
histogramvalue[limit] = uint64(value)
}
}
}
sample := &Sample{
Labels: labels,
HistogramValue: histogramvalue,
Count: count,
Sum: sum,
Timestamp: point.Time(),
Expiration: now.Add(c.ExpirationInterval),
}
mname = sanitize(point.Name())
if !isValidTagName(mname) {
continue
}
c.addMetricFamily(point, sample, mname, sampleID)
default:
for fn, fv := range point.Fields() {
// Ignore string and bool fields.
var value float64
switch fv := fv.(type) {
case int64:
value = float64(fv)
case uint64:
value = float64(fv)
case float64:
value = fv
default:
continue
}
sample := &Sample{
Labels: labels,
Value: value,
Timestamp: point.Time(),
Expiration: now.Add(c.ExpirationInterval),
}
// Special handling of value field; supports passthrough from
// the prometheus input.
var mname string
switch point.Type() {
case telegraf.Counter:
if fn == "counter" {
mname = sanitize(point.Name())
}
case telegraf.Gauge:
if fn == "gauge" {
mname = sanitize(point.Name())
}
}
if mname == "" {
if fn == "value" {
mname = sanitize(point.Name())
} else {
mname = sanitize(fmt.Sprintf("%s_%s", point.Name(), fn))
}
}
if !isValidTagName(mname) {
continue
}
c.addMetricFamily(point, sample, mname, sampleID)
}
}
}
return nil
}
func (c *Collector) Expire(now time.Time, age time.Duration) {
if age == 0 {
return
}
for name, family := range c.fam {
for key, sample := range family.Samples {
if age != 0 && now.After(sample.Expiration) {
for k := range sample.Labels {
family.LabelSet[k]--
}
delete(family.Samples, key)
if len(family.Samples) == 0 {
delete(c.fam, name)
}
}
}
}
}

View File

@ -0,0 +1,87 @@
package v2
import (
"sync"
"time"
"github.com/influxdata/telegraf"
serializer "github.com/influxdata/telegraf/plugins/serializers/prometheus"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
)
type Metric struct {
family *dto.MetricFamily
metric *dto.Metric
}
func (m *Metric) Desc() *prometheus.Desc {
labelNames := make([]string, 0, len(m.metric.Label))
for _, label := range m.metric.Label {
labelNames = append(labelNames, *label.Name)
}
desc := prometheus.NewDesc(*m.family.Name, *m.family.Help, labelNames, nil)
return desc
}
func (m *Metric) Write(out *dto.Metric) error {
out.Label = m.metric.Label
out.Counter = m.metric.Counter
out.Untyped = m.metric.Untyped
out.Gauge = m.metric.Gauge
out.Histogram = m.metric.Histogram
out.Summary = m.metric.Summary
out.TimestampMs = m.metric.TimestampMs
return nil
}
type Collector struct {
sync.Mutex
expireDuration time.Duration
coll *serializer.Collection
}
func NewCollector(expire time.Duration, stringsAsLabel bool) *Collector {
config := serializer.FormatConfig{}
if stringsAsLabel {
config.StringHandling = serializer.StringAsLabel
}
return &Collector{
expireDuration: expire,
coll: serializer.NewCollection(config),
}
}
func (c *Collector) Describe(ch chan<- *prometheus.Desc) {
// Sending no descriptor at all marks the Collector as "unchecked",
// i.e. no checks will be performed at registration time, and the
// Collector may yield any Metric it sees fit in its Collect method.
return
}
func (c *Collector) Collect(ch chan<- prometheus.Metric) {
c.Lock()
defer c.Unlock()
for _, family := range c.coll.GetProto() {
for _, metric := range family.Metric {
ch <- &Metric{family: family, metric: metric}
}
}
}
func (c *Collector) Add(metrics []telegraf.Metric) error {
c.Lock()
defer c.Unlock()
for _, metric := range metrics {
c.coll.Add(metric)
}
if c.expireDuration != 0 {
c.coll.Expire(time.Now(), c.expireDuration)
}
return nil
}

View File

@ -0,0 +1,68 @@
# Prometheus
The `prometheus` data format converts metrics into the Prometheus text
exposition format. When used with the `prometheus` input, the input should be
use the `metric_version = 2` option in order to properly round trip metrics.
**Warning**: When generating histogram and summary types, output may
not be correct if the metric spans multiple batches. This issue can be
somewhat, but not fully, mitigated by using outputs that support writing in
"batch format". When using histogram and summary types, it is recommended to
use only the `prometheus_client` output.
## Configuration
```toml
[[outputs.file]]
files = ["stdout"]
use_batch_format = true
## Include the metric timestamp on each sample.
prometheus_export_timestamp = false
## Sort prometheus metric families and metric samples. Useful for
## debugging.
prometheus_sort_metrics = false
## Output string fields as metric labels; when false string fields are
## discarded.
prometheus_string_as_label = false
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "prometheus"
```
### Example
**Example Input**
```
cpu,cpu=cpu0 time_guest=8022.6,time_system=26145.98,time_user=92512.89 1574317740000000000
cpu,cpu=cpu1 time_guest=8097.88,time_system=25223.35,time_user=96519.58 1574317740000000000
cpu,cpu=cpu2 time_guest=7386.28,time_system=24870.37,time_user=95631.59 1574317740000000000
cpu,cpu=cpu3 time_guest=7434.19,time_system=24843.71,time_user=93753.88 1574317740000000000
```
**Example Output**
```
# HELP cpu_time_guest Telegraf collected metric
# TYPE cpu_time_guest counter
cpu_time_guest{cpu="cpu0"} 9582.54
cpu_time_guest{cpu="cpu1"} 9660.88
cpu_time_guest{cpu="cpu2"} 8946.45
cpu_time_guest{cpu="cpu3"} 9002.31
# HELP cpu_time_system Telegraf collected metric
# TYPE cpu_time_system counter
cpu_time_system{cpu="cpu0"} 28675.47
cpu_time_system{cpu="cpu1"} 27779.34
cpu_time_system{cpu="cpu2"} 27406.18
cpu_time_system{cpu="cpu3"} 27404.97
# HELP cpu_time_user Telegraf collected metric
# TYPE cpu_time_user counter
cpu_time_user{cpu="cpu0"} 99551.84
cpu_time_user{cpu="cpu1"} 103468.52
cpu_time_user{cpu="cpu2"} 102591.45
cpu_time_user{cpu="cpu3"} 100717.05
```

View File

@ -0,0 +1,464 @@
package prometheus
import (
"hash/fnv"
"sort"
"strconv"
"strings"
"time"
"github.com/gogo/protobuf/proto"
"github.com/influxdata/telegraf"
dto "github.com/prometheus/client_model/go"
)
const helpString = "Telegraf collected metric"
type MetricFamily struct {
Name string
Type telegraf.ValueType
}
type Metric struct {
Labels []LabelPair
Time time.Time
Scaler *Scaler
Histogram *Histogram
Summary *Summary
}
type LabelPair struct {
Name string
Value string
}
type Scaler struct {
Value float64
}
type Bucket struct {
Bound float64
Count uint64
}
type Quantile struct {
Quantile float64
Value float64
}
type Histogram struct {
Buckets []Bucket
Count uint64
Sum float64
}
type Summary struct {
Quantiles []Quantile
Count uint64
Sum float64
}
type MetricKey uint64
func MakeMetricKey(labels []LabelPair) MetricKey {
h := fnv.New64a()
for _, label := range labels {
h.Write([]byte(label.Name))
h.Write([]byte("\x00"))
h.Write([]byte(label.Value))
h.Write([]byte("\x00"))
}
return MetricKey(h.Sum64())
}
type Entry struct {
Family MetricFamily
Metrics map[MetricKey]*Metric
}
type Collection struct {
config FormatConfig
Entries map[MetricFamily]Entry
}
func NewCollection(config FormatConfig) *Collection {
cache := &Collection{
config: config,
Entries: make(map[MetricFamily]Entry),
}
return cache
}
func hasLabel(name string, labels []LabelPair) bool {
for _, label := range labels {
if name == label.Name {
return true
}
}
return false
}
func (c *Collection) createLabels(metric telegraf.Metric) []LabelPair {
labels := make([]LabelPair, 0, len(metric.TagList()))
for _, tag := range metric.TagList() {
// Ignore special tags for histogram and summary types.
switch metric.Type() {
case telegraf.Histogram:
if tag.Key == "le" {
continue
}
case telegraf.Summary:
if tag.Key == "quantile" {
continue
}
}
name, ok := SanitizeName(tag.Key)
if !ok {
continue
}
labels = append(labels, LabelPair{Name: name, Value: tag.Value})
}
if c.config.StringHandling != StringAsLabel {
return labels
}
addedFieldLabel := false
for _, field := range metric.FieldList() {
value, ok := field.Value.(string)
if !ok {
continue
}
name, ok := SanitizeName(field.Key)
if !ok {
continue
}
// If there is a tag with the same name as the string field, discard
// the field and use the tag instead.
if hasLabel(name, labels) {
continue
}
labels = append(labels, LabelPair{Name: name, Value: value})
addedFieldLabel = true
}
if addedFieldLabel {
sort.Slice(labels, func(i, j int) bool {
return labels[i].Name < labels[j].Name
})
}
return labels
}
func (c *Collection) Add(metric telegraf.Metric) {
labels := c.createLabels(metric)
for _, field := range metric.FieldList() {
metricName := MetricName(metric.Name(), field.Key, metric.Type())
metricName, ok := SanitizeName(metricName)
if !ok {
continue
}
family := MetricFamily{
Name: metricName,
Type: metric.Type(),
}
entry, ok := c.Entries[family]
if !ok {
entry = Entry{
Family: family,
Metrics: make(map[MetricKey]*Metric),
}
c.Entries[family] = entry
}
metricKey := MakeMetricKey(labels)
m, ok := entry.Metrics[metricKey]
if ok {
// A batch of metrics can contain multiple values for a single
// Prometheus sample. If this metric is older than the existing
// sample then we can skip over it.
if metric.Time().Before(m.Time) {
continue
}
}
switch metric.Type() {
case telegraf.Counter:
fallthrough
case telegraf.Gauge:
fallthrough
case telegraf.Untyped:
value, ok := SampleValue(field.Value)
if !ok {
continue
}
m = &Metric{
Labels: labels,
Time: metric.Time(),
Scaler: &Scaler{Value: value},
}
// what if already here
entry.Metrics[metricKey] = m
case telegraf.Histogram:
if m == nil {
m = &Metric{
Labels: labels,
Time: metric.Time(),
Histogram: &Histogram{},
}
}
switch {
case strings.HasSuffix(field.Key, "_bucket"):
le, ok := metric.GetTag("le")
if !ok {
continue
}
bound, err := strconv.ParseFloat(le, 64)
if err != nil {
continue
}
count, ok := SampleCount(field.Value)
if !ok {
continue
}
m.Histogram.Buckets = append(m.Histogram.Buckets, Bucket{
Bound: bound,
Count: count,
})
case strings.HasSuffix(field.Key, "_sum"):
sum, ok := SampleSum(field.Value)
if !ok {
continue
}
m.Histogram.Sum = sum
case strings.HasSuffix(field.Key, "_count"):
count, ok := SampleCount(field.Value)
if !ok {
continue
}
m.Histogram.Count = count
default:
continue
}
entry.Metrics[metricKey] = m
case telegraf.Summary:
if m == nil {
m = &Metric{
Labels: labels,
Time: metric.Time(),
Summary: &Summary{},
}
}
switch {
case strings.HasSuffix(field.Key, "_sum"):
sum, ok := SampleSum(field.Value)
if !ok {
continue
}
m.Summary.Sum = sum
case strings.HasSuffix(field.Key, "_count"):
count, ok := SampleCount(field.Value)
if !ok {
continue
}
m.Summary.Count = count
default:
quantileTag, ok := metric.GetTag("quantile")
if !ok {
continue
}
quantile, err := strconv.ParseFloat(quantileTag, 64)
if err != nil {
continue
}
value, ok := SampleValue(field.Value)
if !ok {
continue
}
m.Summary.Quantiles = append(m.Summary.Quantiles, Quantile{
Quantile: quantile,
Value: value,
})
}
entry.Metrics[metricKey] = m
}
}
}
func (c *Collection) Expire(now time.Time, age time.Duration) {
expireTime := now.Add(-age)
for _, entry := range c.Entries {
for key, metric := range entry.Metrics {
if metric.Time.Before(expireTime) {
delete(entry.Metrics, key)
if len(entry.Metrics) == 0 {
delete(c.Entries, entry.Family)
}
}
}
}
}
func (c *Collection) GetEntries(order MetricSortOrder) []Entry {
entries := make([]Entry, 0, len(c.Entries))
for _, entry := range c.Entries {
entries = append(entries, entry)
}
switch order {
case SortMetrics:
sort.Slice(entries, func(i, j int) bool {
lhs := entries[i].Family
rhs := entries[j].Family
if lhs.Name != rhs.Name {
return lhs.Name < rhs.Name
}
return lhs.Type < rhs.Type
})
}
return entries
}
func (c *Collection) GetMetrics(entry Entry, order MetricSortOrder) []*Metric {
metrics := make([]*Metric, 0, len(entry.Metrics))
for _, metric := range entry.Metrics {
metrics = append(metrics, metric)
}
switch order {
case SortMetrics:
sort.Slice(metrics, func(i, j int) bool {
lhs := metrics[i].Labels
rhs := metrics[j].Labels
if len(lhs) != len(rhs) {
return len(lhs) < len(rhs)
}
for index := range lhs {
l := lhs[index]
r := rhs[index]
if l.Name != r.Name {
return l.Name < r.Name
}
if l.Value != r.Value {
return l.Value < r.Value
}
}
return false
})
}
return metrics
}
func (c *Collection) GetProto() []*dto.MetricFamily {
result := make([]*dto.MetricFamily, 0, len(c.Entries))
for _, entry := range c.GetEntries(c.config.MetricSortOrder) {
mf := &dto.MetricFamily{
Name: proto.String(entry.Family.Name),
Help: proto.String(helpString),
Type: MetricType(entry.Family.Type),
}
for _, metric := range c.GetMetrics(entry, c.config.MetricSortOrder) {
l := make([]*dto.LabelPair, 0, len(metric.Labels))
for _, label := range metric.Labels {
l = append(l, &dto.LabelPair{
Name: proto.String(label.Name),
Value: proto.String(label.Value),
})
}
m := &dto.Metric{
Label: l,
}
if c.config.TimestampExport == ExportTimestamp {
m.TimestampMs = proto.Int64(metric.Time.UnixNano() / int64(time.Millisecond))
}
switch entry.Family.Type {
case telegraf.Gauge:
m.Gauge = &dto.Gauge{Value: proto.Float64(metric.Scaler.Value)}
case telegraf.Counter:
m.Counter = &dto.Counter{Value: proto.Float64(metric.Scaler.Value)}
case telegraf.Untyped:
m.Untyped = &dto.Untyped{Value: proto.Float64(metric.Scaler.Value)}
case telegraf.Histogram:
buckets := make([]*dto.Bucket, 0, len(metric.Histogram.Buckets))
for _, bucket := range metric.Histogram.Buckets {
buckets = append(buckets, &dto.Bucket{
UpperBound: proto.Float64(bucket.Bound),
CumulativeCount: proto.Uint64(bucket.Count),
})
}
if len(buckets) == 0 {
continue
}
m.Histogram = &dto.Histogram{
Bucket: buckets,
SampleCount: proto.Uint64(metric.Histogram.Count),
SampleSum: proto.Float64(metric.Histogram.Sum),
}
case telegraf.Summary:
quantiles := make([]*dto.Quantile, 0, len(metric.Summary.Quantiles))
for _, quantile := range metric.Summary.Quantiles {
quantiles = append(quantiles, &dto.Quantile{
Quantile: proto.Float64(quantile.Quantile),
Value: proto.Float64(quantile.Value),
})
}
if len(quantiles) == 0 {
continue
}
m.Summary = &dto.Summary{
Quantile: quantiles,
SampleCount: proto.Uint64(metric.Summary.Count),
SampleSum: proto.Float64(metric.Summary.Sum),
}
default:
panic("unknown telegraf.ValueType")
}
mf.Metric = append(mf.Metric, m)
}
if len(mf.Metric) != 0 {
result = append(result, mf)
}
}
return result
}

View File

@ -0,0 +1,116 @@
package prometheus
import (
"testing"
"time"
"github.com/gogo/protobuf/proto"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
dto "github.com/prometheus/client_model/go"
"github.com/stretchr/testify/require"
)
func TestCollectionExpire(t *testing.T) {
tests := []struct {
name string
now time.Time
age time.Duration
metrics []telegraf.Metric
expected []*dto.MetricFamily
}{
{
name: "not expired",
now: time.Unix(1, 0),
age: 10 * time.Second,
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(0, 0),
),
},
expected: []*dto.MetricFamily{
{
Name: proto.String("cpu_time_idle"),
Help: proto.String(helpString),
Type: dto.MetricType_UNTYPED.Enum(),
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{},
Untyped: &dto.Untyped{Value: proto.Float64(42.0)},
},
},
},
},
},
{
name: "expired single metric in metric family",
now: time.Unix(20, 0),
age: 10 * time.Second,
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(0, 0),
),
},
expected: []*dto.MetricFamily{},
},
{
name: "expired one metric in metric family",
now: time.Unix(20, 0),
age: 10 * time.Second,
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(0, 0),
),
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"time_guest": 42.0,
},
time.Unix(15, 0),
),
},
expected: []*dto.MetricFamily{
{
Name: proto.String("cpu_time_guest"),
Help: proto.String(helpString),
Type: dto.MetricType_UNTYPED.Enum(),
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{},
Untyped: &dto.Untyped{Value: proto.Float64(42.0)},
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewCollection(FormatConfig{})
for _, metric := range tt.metrics {
c.Add(metric)
}
c.Expire(tt.now, tt.age)
actual := c.GetProto()
require.Equal(t, tt.expected, actual)
})
}
}

View File

@ -0,0 +1,175 @@
package prometheus
import (
"strings"
"unicode"
"github.com/influxdata/telegraf"
dto "github.com/prometheus/client_model/go"
)
var FirstTable = &unicode.RangeTable{
R16: []unicode.Range16{
{0x0041, 0x005A, 1}, // A-Z
{0x005F, 0x005F, 1}, // _
{0x0061, 0x007A, 1}, // a-z
},
LatinOffset: 3,
}
var RestTable = &unicode.RangeTable{
R16: []unicode.Range16{
{0x0030, 0x0039, 1}, // 0-9
{0x0041, 0x005A, 1}, // A-Z
{0x005F, 0x005F, 1}, // _
{0x0061, 0x007A, 1}, // a-z
},
LatinOffset: 4,
}
func isValid(name string) bool {
if name == "" {
return false
}
for i, r := range name {
switch {
case i == 0:
if !unicode.In(r, FirstTable) {
return false
}
default:
if !unicode.In(r, RestTable) {
return false
}
}
}
return true
}
// SanitizeName check if the name is a valid Prometheus metric name and label
// name. If not, it attempts to replaces invalid runes with an underscore to
// create a valid name. Returns the metric name and true if the name is valid
// to use.
func SanitizeName(name string) (string, bool) {
if isValid(name) {
return name, true
}
var b strings.Builder
for i, r := range name {
switch {
case i == 0:
if unicode.In(r, FirstTable) {
b.WriteRune(r)
}
default:
if unicode.In(r, RestTable) {
b.WriteRune(r)
} else {
b.WriteString("_")
}
}
}
name = strings.Trim(b.String(), "_")
if name == "" {
return "", false
}
return name, true
}
// MetricName returns the Prometheus metric name.
func MetricName(measurement, fieldKey string, valueType telegraf.ValueType) string {
switch valueType {
case telegraf.Histogram, telegraf.Summary:
switch {
case strings.HasSuffix(fieldKey, "_bucket"):
fieldKey = strings.TrimSuffix(fieldKey, "_bucket")
case strings.HasSuffix(fieldKey, "_sum"):
fieldKey = strings.TrimSuffix(fieldKey, "_sum")
case strings.HasSuffix(fieldKey, "_count"):
fieldKey = strings.TrimSuffix(fieldKey, "_count")
}
}
if measurement == "prometheus" {
return fieldKey
}
return measurement + "_" + fieldKey
}
func MetricType(valueType telegraf.ValueType) *dto.MetricType {
switch valueType {
case telegraf.Counter:
return dto.MetricType_COUNTER.Enum()
case telegraf.Gauge:
return dto.MetricType_GAUGE.Enum()
case telegraf.Summary:
return dto.MetricType_SUMMARY.Enum()
case telegraf.Untyped:
return dto.MetricType_UNTYPED.Enum()
case telegraf.Histogram:
return dto.MetricType_HISTOGRAM.Enum()
default:
panic("unknown telegraf.ValueType")
}
}
// SampleValue converts a field value into a value suitable for a simple sample value.
func SampleValue(value interface{}) (float64, bool) {
switch v := value.(type) {
case float64:
return v, true
case int64:
return float64(v), true
case uint64:
return float64(v), true
case bool:
if v {
return 1.0, true
}
return 0.0, true
default:
return 0, false
}
}
// SampleCount converts a field value into a count suitable for a metric family
// of the Histogram or Summary type.
func SampleCount(value interface{}) (uint64, bool) {
switch v := value.(type) {
case float64:
if v < 0 {
return 0, false
}
return uint64(v), true
case int64:
if v < 0 {
return 0, false
}
return uint64(v), true
case uint64:
return v, true
default:
return 0, false
}
}
// SampleSum converts a field value into a sum suitable for a metric family
// of the Histogram or Summary type.
func SampleSum(value interface{}) (float64, bool) {
switch v := value.(type) {
case float64:
return v, true
case int64:
return float64(v), true
case uint64:
return float64(v), true
default:
return 0, false
}
}

View File

@ -0,0 +1,69 @@
package prometheus
import (
"bytes"
"github.com/influxdata/telegraf"
"github.com/prometheus/common/expfmt"
)
// TimestampExport controls if the output contains timestamps.
type TimestampExport int
const (
NoExportTimestamp TimestampExport = iota
ExportTimestamp
)
// MetricSortOrder controls if the output is sorted.
type MetricSortOrder int
const (
NoSortMetrics MetricSortOrder = iota
SortMetrics
)
// StringHandling defines how to process string fields.
type StringHandling int
const (
DiscardStrings StringHandling = iota
StringAsLabel
)
type FormatConfig struct {
TimestampExport TimestampExport
MetricSortOrder MetricSortOrder
StringHandling StringHandling
}
type Serializer struct {
config FormatConfig
}
func NewSerializer(config FormatConfig) (*Serializer, error) {
s := &Serializer{config: config}
return s, nil
}
func (s *Serializer) Serialize(metric telegraf.Metric) ([]byte, error) {
return s.SerializeBatch([]telegraf.Metric{metric})
}
func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
coll := NewCollection(s.config)
for _, metric := range metrics {
coll.Add(metric)
}
var buf bytes.Buffer
for _, mf := range coll.GetProto() {
enc := expfmt.NewEncoder(&buf, expfmt.FmtText)
err := enc.Encode(mf)
if err != nil {
return nil, err
}
}
return buf.Bytes(), nil
}

View File

@ -0,0 +1,589 @@
package prometheus
import (
"strings"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
func TestSerialize(t *testing.T) {
tests := []struct {
name string
config FormatConfig
metric telegraf.Metric
expected []byte
}{
{
name: "simple",
metric: testutil.MustMetric(
"cpu",
map[string]string{
"host": "example.org",
},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(0, 0),
),
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle untyped
cpu_time_idle{host="example.org"} 42
`),
},
{
name: "prometheus input untyped",
metric: testutil.MustMetric(
"prometheus",
map[string]string{
"code": "400",
"method": "post",
},
map[string]interface{}{
"http_requests_total": 3.0,
},
time.Unix(0, 0),
telegraf.Untyped,
),
expected: []byte(`
# HELP http_requests_total Telegraf collected metric
# TYPE http_requests_total untyped
http_requests_total{code="400",method="post"} 3
`),
},
{
name: "prometheus input counter",
metric: testutil.MustMetric(
"prometheus",
map[string]string{
"code": "400",
"method": "post",
},
map[string]interface{}{
"http_requests_total": 3.0,
},
time.Unix(0, 0),
telegraf.Counter,
),
expected: []byte(`
# HELP http_requests_total Telegraf collected metric
# TYPE http_requests_total counter
http_requests_total{code="400",method="post"} 3
`),
},
{
name: "prometheus input gauge",
metric: testutil.MustMetric(
"prometheus",
map[string]string{
"code": "400",
"method": "post",
},
map[string]interface{}{
"http_requests_total": 3.0,
},
time.Unix(0, 0),
telegraf.Gauge,
),
expected: []byte(`
# HELP http_requests_total Telegraf collected metric
# TYPE http_requests_total gauge
http_requests_total{code="400",method="post"} 3
`),
},
{
name: "prometheus input histogram no buckets",
metric: testutil.MustMetric(
"prometheus",
map[string]string{},
map[string]interface{}{
"http_request_duration_seconds_sum": 53423,
"http_request_duration_seconds_count": 144320,
},
time.Unix(0, 0),
telegraf.Histogram,
),
expected: []byte(`
`),
},
{
name: "prometheus input histogram only bucket",
metric: testutil.MustMetric(
"prometheus",
map[string]string{
"le": "0.5",
},
map[string]interface{}{
"http_request_duration_seconds_bucket": 129389.0,
},
time.Unix(0, 0),
telegraf.Histogram,
),
expected: []byte(`
# HELP http_request_duration_seconds Telegraf collected metric
# TYPE http_request_duration_seconds histogram
http_request_duration_seconds_bucket{le="0.5"} 129389
http_request_duration_seconds_bucket{le="+Inf"} 0
http_request_duration_seconds_sum 0
http_request_duration_seconds_count 0
`),
},
{
name: "simple with timestamp",
config: FormatConfig{
TimestampExport: ExportTimestamp,
},
metric: testutil.MustMetric(
"cpu",
map[string]string{
"host": "example.org",
},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(1574279268, 0),
),
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle untyped
cpu_time_idle{host="example.org"} 42 1574279268000
`),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s, err := NewSerializer(FormatConfig{
MetricSortOrder: SortMetrics,
TimestampExport: tt.config.TimestampExport,
StringHandling: tt.config.StringHandling,
})
require.NoError(t, err)
actual, err := s.Serialize(tt.metric)
require.NoError(t, err)
require.Equal(t, strings.TrimSpace(string(tt.expected)),
strings.TrimSpace(string(actual)))
})
}
}
func TestSerializeBatch(t *testing.T) {
tests := []struct {
name string
config FormatConfig
metrics []telegraf.Metric
expected []byte
}{
{
name: "simple",
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"host": "one.example.org",
},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(0, 0),
),
testutil.MustMetric(
"cpu",
map[string]string{
"host": "two.example.org",
},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(0, 0),
),
},
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle untyped
cpu_time_idle{host="one.example.org"} 42
cpu_time_idle{host="two.example.org"} 42
`),
},
{
name: "multiple metric families",
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"host": "one.example.org",
},
map[string]interface{}{
"time_idle": 42.0,
"time_guest": 42.0,
},
time.Unix(0, 0),
),
},
expected: []byte(`
# HELP cpu_time_guest Telegraf collected metric
# TYPE cpu_time_guest untyped
cpu_time_guest{host="one.example.org"} 42
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle untyped
cpu_time_idle{host="one.example.org"} 42
`),
},
{
name: "histogram",
metrics: []telegraf.Metric{
testutil.MustMetric(
"prometheus",
map[string]string{},
map[string]interface{}{
"http_request_duration_seconds_sum": 53423,
"http_request_duration_seconds_count": 144320,
},
time.Unix(0, 0),
telegraf.Histogram,
),
testutil.MustMetric(
"prometheus",
map[string]string{"le": "0.05"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 24054.0,
},
time.Unix(0, 0),
telegraf.Histogram,
),
testutil.MustMetric(
"prometheus",
map[string]string{"le": "0.1"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 33444.0,
},
time.Unix(0, 0),
telegraf.Histogram,
),
testutil.MustMetric(
"prometheus",
map[string]string{"le": "0.2"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 100392.0,
},
time.Unix(0, 0),
telegraf.Histogram,
),
testutil.MustMetric(
"prometheus",
map[string]string{"le": "0.5"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 129389.0,
},
time.Unix(0, 0),
telegraf.Histogram,
),
testutil.MustMetric(
"prometheus",
map[string]string{"le": "1.0"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 133988.0,
},
time.Unix(0, 0),
telegraf.Histogram,
),
testutil.MustMetric(
"prometheus",
map[string]string{"le": "+Inf"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 144320.0,
},
time.Unix(0, 0),
telegraf.Histogram,
),
},
expected: []byte(`
# HELP http_request_duration_seconds Telegraf collected metric
# TYPE http_request_duration_seconds histogram
http_request_duration_seconds_bucket{le="0.05"} 24054
http_request_duration_seconds_bucket{le="0.1"} 33444
http_request_duration_seconds_bucket{le="0.2"} 100392
http_request_duration_seconds_bucket{le="0.5"} 129389
http_request_duration_seconds_bucket{le="1"} 133988
http_request_duration_seconds_bucket{le="+Inf"} 144320
http_request_duration_seconds_sum 53423
http_request_duration_seconds_count 144320
`),
},
{
name: "",
metrics: []telegraf.Metric{
testutil.MustMetric(
"prometheus",
map[string]string{},
map[string]interface{}{
"rpc_duration_seconds_sum": 1.7560473e+07,
"rpc_duration_seconds_count": 2693,
},
time.Unix(0, 0),
telegraf.Summary,
),
testutil.MustMetric(
"prometheus",
map[string]string{"quantile": "0.01"},
map[string]interface{}{
"rpc_duration_seconds": 3102.0,
},
time.Unix(0, 0),
telegraf.Summary,
),
testutil.MustMetric(
"prometheus",
map[string]string{"quantile": "0.05"},
map[string]interface{}{
"rpc_duration_seconds": 3272.0,
},
time.Unix(0, 0),
telegraf.Summary,
),
testutil.MustMetric(
"prometheus",
map[string]string{"quantile": "0.5"},
map[string]interface{}{
"rpc_duration_seconds": 4773.0,
},
time.Unix(0, 0),
telegraf.Summary,
),
testutil.MustMetric(
"prometheus",
map[string]string{"quantile": "0.9"},
map[string]interface{}{
"rpc_duration_seconds": 9001.0,
},
time.Unix(0, 0),
telegraf.Summary,
),
testutil.MustMetric(
"prometheus",
map[string]string{"quantile": "0.99"},
map[string]interface{}{
"rpc_duration_seconds": 76656.0,
},
time.Unix(0, 0),
telegraf.Summary,
),
},
expected: []byte(`
# HELP rpc_duration_seconds Telegraf collected metric
# TYPE rpc_duration_seconds summary
rpc_duration_seconds{quantile="0.01"} 3102
rpc_duration_seconds{quantile="0.05"} 3272
rpc_duration_seconds{quantile="0.5"} 4773
rpc_duration_seconds{quantile="0.9"} 9001
rpc_duration_seconds{quantile="0.99"} 76656
rpc_duration_seconds_sum 1.7560473e+07
rpc_duration_seconds_count 2693
`),
},
{
name: "newer sample",
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"time_idle": 43.0,
},
time.Unix(1, 0),
),
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(0, 0),
),
},
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle untyped
cpu_time_idle 43
`),
},
{
name: "invalid label",
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"host-name": "example.org",
},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(0, 0),
),
},
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle untyped
cpu_time_idle{host_name="example.org"} 42
`),
},
{
name: "discard strings",
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"time_idle": 42.0,
"cpu": "cpu0",
},
time.Unix(0, 0),
),
},
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle untyped
cpu_time_idle 42
`),
},
{
name: "string as label",
config: FormatConfig{
StringHandling: StringAsLabel,
},
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"time_idle": 42.0,
"cpu": "cpu0",
},
time.Unix(0, 0),
),
},
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle untyped
cpu_time_idle{cpu="cpu0"} 42
`),
},
{
name: "string as label duplicate tag",
config: FormatConfig{
StringHandling: StringAsLabel,
},
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 42.0,
"cpu": "cpu1",
},
time.Unix(0, 0),
),
},
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle untyped
cpu_time_idle{cpu="cpu0"} 42
`),
},
{
name: "multiple fields grouping",
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_guest": 8106.04,
"time_system": 26271.4,
"time_user": 92904.33,
},
time.Unix(0, 0),
),
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu1",
},
map[string]interface{}{
"time_guest": 8181.63,
"time_system": 25351.49,
"time_user": 96912.57,
},
time.Unix(0, 0),
),
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu2",
},
map[string]interface{}{
"time_guest": 7470.04,
"time_system": 24998.43,
"time_user": 96034.08,
},
time.Unix(0, 0),
),
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu3",
},
map[string]interface{}{
"time_guest": 7517.95,
"time_system": 24970.82,
"time_user": 94148,
},
time.Unix(0, 0),
),
},
expected: []byte(`
# HELP cpu_time_guest Telegraf collected metric
# TYPE cpu_time_guest untyped
cpu_time_guest{cpu="cpu0"} 8106.04
cpu_time_guest{cpu="cpu1"} 8181.63
cpu_time_guest{cpu="cpu2"} 7470.04
cpu_time_guest{cpu="cpu3"} 7517.95
# HELP cpu_time_system Telegraf collected metric
# TYPE cpu_time_system untyped
cpu_time_system{cpu="cpu0"} 26271.4
cpu_time_system{cpu="cpu1"} 25351.49
cpu_time_system{cpu="cpu2"} 24998.43
cpu_time_system{cpu="cpu3"} 24970.82
# HELP cpu_time_user Telegraf collected metric
# TYPE cpu_time_user untyped
cpu_time_user{cpu="cpu0"} 92904.33
cpu_time_user{cpu="cpu1"} 96912.57
cpu_time_user{cpu="cpu2"} 96034.08
cpu_time_user{cpu="cpu3"} 94148
`),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s, err := NewSerializer(FormatConfig{
MetricSortOrder: SortMetrics,
TimestampExport: tt.config.TimestampExport,
StringHandling: tt.config.StringHandling,
})
require.NoError(t, err)
actual, err := s.SerializeBatch(tt.metrics)
require.NoError(t, err)
require.Equal(t,
strings.TrimSpace(string(tt.expected)),
strings.TrimSpace(string(actual)))
})
}
}

View File

@ -10,6 +10,7 @@ import (
"github.com/influxdata/telegraf/plugins/serializers/influx"
"github.com/influxdata/telegraf/plugins/serializers/json"
"github.com/influxdata/telegraf/plugins/serializers/nowmetric"
"github.com/influxdata/telegraf/plugins/serializers/prometheus"
"github.com/influxdata/telegraf/plugins/serializers/splunkmetric"
"github.com/influxdata/telegraf/plugins/serializers/wavefront"
)
@ -45,43 +46,54 @@ type Serializer interface {
// and can be used to instantiate _any_ of the serializers.
type Config struct {
// Dataformat can be one of the serializer types listed in NewSerializer.
DataFormat string
DataFormat string `toml:"data_format"`
// Support tags in graphite protocol
GraphiteTagSupport bool
GraphiteTagSupport bool `toml:"graphite_tag_support"`
// Maximum line length in bytes; influx format only
InfluxMaxLineBytes int
InfluxMaxLineBytes int `toml:"influx_max_line_bytes"`
// Sort field keys, set to true only when debugging as it less performant
// than unsorted fields; influx format only
InfluxSortFields bool
InfluxSortFields bool `toml:"influx_sort_fields"`
// Support unsigned integer output; influx format only
InfluxUintSupport bool
InfluxUintSupport bool `toml:"influx_uint_support"`
// Prefix to add to all measurements, only supports Graphite
Prefix string
Prefix string `toml:"prefix"`
// Template for converting telegraf metrics into Graphite
// only supports Graphite
Template string
Template string `toml:"template"`
// Timestamp units to use for JSON formatted output
TimestampUnits time.Duration
TimestampUnits time.Duration `toml:"timestamp_units"`
// Include HEC routing fields for splunkmetric output
HecRouting bool
HecRouting bool `toml:"hec_routing"`
// Enable Splunk MultiMetric output (Splunk 8.0+)
SplunkmetricMultiMetric bool
SplunkmetricMultiMetric bool `toml:"splunkmetric_multi_metric"`
// Point tags to use as the source name for Wavefront (if none found, host will be used).
WavefrontSourceOverride []string
WavefrontSourceOverride []string `toml:"wavefront_source_override"`
// Use Strict rules to sanitize metric and tag names from invalid characters for Wavefront
// When enabled forward slash (/) and comma (,) will be accepted
WavefrontUseStrict bool
WavefrontUseStrict bool `toml:"wavefront_use_strict"`
// Include the metric timestamp on each sample.
PrometheusExportTimestamp bool `toml:"prometheus_export_timestamp"`
// Sort prometheus metric families and metric samples. Useful for
// debugging.
PrometheusSortMetrics bool `toml:"prometheus_sort_metrics"`
// Output string fields as metric labels; when false string fields are
// discarded.
PrometheusStringAsLabel bool `toml:"prometheus_string_as_label"`
}
// NewSerializer a Serializer interface based on the given config.
@ -103,12 +115,37 @@ func NewSerializer(config *Config) (Serializer, error) {
serializer, err = NewCarbon2Serializer()
case "wavefront":
serializer, err = NewWavefrontSerializer(config.Prefix, config.WavefrontUseStrict, config.WavefrontSourceOverride)
case "prometheus":
serializer, err = NewPrometheusSerializer(config)
default:
err = fmt.Errorf("Invalid data format: %s", config.DataFormat)
}
return serializer, err
}
func NewPrometheusSerializer(config *Config) (Serializer, error) {
exportTimestamp := prometheus.NoExportTimestamp
if config.PrometheusExportTimestamp {
exportTimestamp = prometheus.ExportTimestamp
}
sortMetrics := prometheus.NoSortMetrics
if config.PrometheusExportTimestamp {
sortMetrics = prometheus.SortMetrics
}
stringAsLabels := prometheus.DiscardStrings
if config.PrometheusStringAsLabel {
stringAsLabels = prometheus.StringAsLabel
}
return prometheus.NewSerializer(prometheus.FormatConfig{
TimestampExport: exportTimestamp,
MetricSortOrder: sortMetrics,
StringHandling: stringAsLabels,
})
}
func NewWavefrontSerializer(prefix string, useStrict bool, sourceOverride []string) (Serializer, error) {
return wavefront.NewSerializer(prefix, useStrict, sourceOverride)
}