Add new line protocol parser and serializer, influxdb output (#3924)

This commit is contained in:
Daniel Nelson
2018-03-27 17:30:51 -07:00
committed by GitHub
parent 720c27559c
commit 222a68d72e
70 changed files with 26827 additions and 6533 deletions

View File

@@ -144,83 +144,6 @@ func TestCommandError(t *testing.T) {
assert.Equal(t, acc.NFields(), 0, "No new points should have been added")
}
func TestLineProtocolParse(t *testing.T) {
parser, _ := parsers.NewInfluxParser()
e := &Exec{
runner: newRunnerMock([]byte(lineProtocol), nil),
Commands: []string{"line-protocol"},
parser: parser,
}
var acc testutil.Accumulator
require.NoError(t, acc.GatherError(e.Gather))
fields := map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}
tags := map[string]string{
"host": "foo",
"datacenter": "us-east",
}
acc.AssertContainsTaggedFields(t, "cpu", fields, tags)
}
func TestLineProtocolEmptyParse(t *testing.T) {
parser, _ := parsers.NewInfluxParser()
e := &Exec{
runner: newRunnerMock([]byte(lineProtocolEmpty), nil),
Commands: []string{"line-protocol"},
parser: parser,
}
var acc testutil.Accumulator
err := e.Gather(&acc)
require.NoError(t, err)
}
func TestLineProtocolShortParse(t *testing.T) {
parser, _ := parsers.NewInfluxParser()
e := &Exec{
runner: newRunnerMock([]byte(lineProtocolShort), nil),
Commands: []string{"line-protocol"},
parser: parser,
}
var acc testutil.Accumulator
err := acc.GatherError(e.Gather)
require.Error(t, err)
assert.Contains(t, err.Error(), "buffer too short", "A buffer too short error was expected")
}
func TestLineProtocolParseMultiple(t *testing.T) {
parser, _ := parsers.NewInfluxParser()
e := &Exec{
runner: newRunnerMock([]byte(lineProtocolMulti), nil),
Commands: []string{"line-protocol"},
parser: parser,
}
var acc testutil.Accumulator
err := acc.GatherError(e.Gather)
require.NoError(t, err)
fields := map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}
tags := map[string]string{
"host": "foo",
"datacenter": "us-east",
}
cpuTags := []string{"cpu0", "cpu1", "cpu2", "cpu3", "cpu4", "cpu5", "cpu6"}
for _, cpu := range cpuTags {
tags["cpu"] = cpu
acc.AssertContainsTaggedFields(t, "cpu", fields, tags)
}
}
func TestExecCommandWithGlob(t *testing.T) {
parser, _ := parsers.NewValueParser("metric", "string", nil)
e := NewExec()

View File

@@ -53,9 +53,10 @@ type HTTPListener struct {
listener net.Listener
parser influx.InfluxParser
acc telegraf.Accumulator
pool *pool
handler *influx.MetricHandler
parser *influx.Parser
acc telegraf.Accumulator
pool *pool
BytesRecv selfstat.Stat
RequestsServed selfstat.Stat
@@ -176,6 +177,9 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
h.listener = listener
h.Port = listener.Addr().(*net.TCPAddr).Port
h.handler = influx.NewMetricHandler()
h.parser = influx.NewParser(h.handler)
h.wg.Add(1)
go func() {
defer h.wg.Done()
@@ -336,7 +340,11 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
}
func (h *HTTPListener) parse(b []byte, t time.Time, precision string) error {
metrics, err := h.parser.ParseWithDefaultTimePrecision(b, t, precision)
h.handler.SetPrecision(getPrecisionMultiplier(precision))
metrics, err := h.parser.Parse(b)
if err != nil {
return err
}
for _, m := range metrics {
h.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
@@ -408,6 +416,23 @@ func (h *HTTPListener) AuthenticateIfSet(handler http.HandlerFunc, res http.Resp
}
}
func getPrecisionMultiplier(precision string) time.Duration {
d := time.Nanosecond
switch precision {
case "u":
d = time.Microsecond
case "ms":
d = time.Millisecond
case "s":
d = time.Second
case "m":
d = time.Minute
case "h":
d = time.Hour
}
return d
}
func init() {
inputs.Add("http_listener", func() telegraf.Input {
return &HTTPListener{

View File

@@ -326,6 +326,10 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
}
}
if len(fields) == 0 {
return nil, fmt.Errorf("logparser_grok: must have one or more fields")
}
return metric.New(p.Measurement, tags, fields, p.tsModder.tsMod(timestamp))
}

View File

@@ -799,7 +799,7 @@ func TestTimezoneEmptyCompileFileAndParse(t *testing.T) {
},
metricA.Fields())
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
assert.Equal(t, int64(1465040505000000000), metricA.UnixNano())
assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`)
require.NotNil(t, metricB)
@@ -812,7 +812,7 @@ func TestTimezoneEmptyCompileFileAndParse(t *testing.T) {
},
metricB.Fields())
assert.Equal(t, map[string]string{}, metricB.Tags())
assert.Equal(t, int64(1465044105000000000), metricB.UnixNano())
assert.Equal(t, int64(1465044105000000000), metricB.Time().UnixNano())
}
func TestTimezoneMalformedCompileFileAndParse(t *testing.T) {
@@ -835,7 +835,7 @@ func TestTimezoneMalformedCompileFileAndParse(t *testing.T) {
},
metricA.Fields())
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
assert.Equal(t, int64(1465040505000000000), metricA.UnixNano())
assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`)
require.NotNil(t, metricB)
@@ -848,7 +848,7 @@ func TestTimezoneMalformedCompileFileAndParse(t *testing.T) {
},
metricB.Fields())
assert.Equal(t, map[string]string{}, metricB.Tags())
assert.Equal(t, int64(1465044105000000000), metricB.UnixNano())
assert.Equal(t, int64(1465044105000000000), metricB.Time().UnixNano())
}
func TestTimezoneEuropeCompileFileAndParse(t *testing.T) {
@@ -871,7 +871,7 @@ func TestTimezoneEuropeCompileFileAndParse(t *testing.T) {
},
metricA.Fields())
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
assert.Equal(t, int64(1465040505000000000), metricA.UnixNano())
assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`)
require.NotNil(t, metricB)
@@ -884,7 +884,7 @@ func TestTimezoneEuropeCompileFileAndParse(t *testing.T) {
},
metricB.Fields())
assert.Equal(t, map[string]string{}, metricB.Tags())
assert.Equal(t, int64(1465036905000000000), metricB.UnixNano())
assert.Equal(t, int64(1465036905000000000), metricB.Time().UnixNano())
}
func TestTimezoneAmericasCompileFileAndParse(t *testing.T) {
@@ -907,7 +907,7 @@ func TestTimezoneAmericasCompileFileAndParse(t *testing.T) {
},
metricA.Fields())
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
assert.Equal(t, int64(1465040505000000000), metricA.UnixNano())
assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`)
require.NotNil(t, metricB)
@@ -920,7 +920,7 @@ func TestTimezoneAmericasCompileFileAndParse(t *testing.T) {
},
metricB.Fields())
assert.Equal(t, map[string]string{}, metricB.Tags())
assert.Equal(t, int64(1465058505000000000), metricB.UnixNano())
assert.Equal(t, int64(1465058505000000000), metricB.Time().UnixNano())
}
func TestTimezoneLocalCompileFileAndParse(t *testing.T) {
@@ -943,7 +943,7 @@ func TestTimezoneLocalCompileFileAndParse(t *testing.T) {
},
metricA.Fields())
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
assert.Equal(t, int64(1465040505000000000), metricA.UnixNano())
assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`)
require.NotNil(t, metricB)
@@ -956,5 +956,5 @@ func TestTimezoneLocalCompileFileAndParse(t *testing.T) {
},
metricB.Fields())
assert.Equal(t, map[string]string{}, metricB.Tags())
assert.Equal(t, time.Date(2016, time.June, 4, 12, 41, 45, 0, time.Local).UnixNano(), metricB.UnixNano())
assert.Equal(t, time.Date(2016, time.June, 4, 12, 41, 45, 0, time.Local).UnixNano(), metricB.Time().UnixNano())
}

View File

@@ -59,7 +59,7 @@ func TestRunParserInvalidMsg(t *testing.T) {
in <- natsMsg(invalidMsg)
acc.WaitError(1)
assert.Contains(t, acc.Errors[0].Error(), "E! subject: telegraf, error: metric parsing error")
assert.Contains(t, acc.Errors[0].Error(), "E! subject: telegraf, error: metric parse error")
assert.EqualValues(t, 0, acc.NMetrics())
}

View File

@@ -111,9 +111,11 @@ func TestParseValidPrometheus(t *testing.T) {
"gauge": float64(1),
}, metrics[0].Fields())
assert.Equal(t, map[string]string{
"osVersion": "CentOS Linux 7 (Core)",
"dockerVersion": "1.8.2",
"kernelVersion": "3.10.0-229.20.1.el7.x86_64",
"osVersion": "CentOS Linux 7 (Core)",
"cadvisorRevision": "",
"cadvisorVersion": "",
"dockerVersion": "1.8.2",
"kernelVersion": "3.10.0-229.20.1.el7.x86_64",
}, metrics[0].Tags())
// Counter value

View File

@@ -47,11 +47,13 @@ func TestHighTrafficUDP(t *testing.T) {
ServiceAddress: ":8126",
AllowedPendingMessages: 100000,
}
listener.parser, _ = parsers.NewInfluxParser()
var err error
listener.parser, err = parsers.NewInfluxParser()
require.NoError(t, err)
acc := &testutil.Accumulator{}
// send multiple messages to socket
err := listener.Start(acc)
err = listener.Start(acc)
require.NoError(t, err)
conn, err := net.Dial("udp", "127.0.0.1:8126")

View File

@@ -97,7 +97,7 @@ func (f *File) Write(metrics []telegraf.Metric) error {
}
_, err = f.writer.Write(b)
if err != nil {
return fmt.Errorf("failed to write message: %s, %s", metric.Serialize(), err)
return fmt.Errorf("failed to write message: %s, %s", b, err)
}
}
return nil

View File

@@ -7,12 +7,13 @@ import (
"encoding/binary"
ejson "encoding/json"
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs"
"io"
"math"
"net"
"os"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs"
)
const (
@@ -212,7 +213,7 @@ func serialize(metric telegraf.Metric) ([]string, error) {
m := make(map[string]interface{})
m["version"] = "1.1"
m["timestamp"] = metric.UnixNano() / 1000000000
m["timestamp"] = metric.Time().UnixNano() / 1000000000
m["short_message"] = "telegraf"
m["name"] = metric.Name()

View File

@@ -1,35 +1,40 @@
# InfluxDB Output Plugin
This plugin writes to [InfluxDB](https://www.influxdb.com) via HTTP or UDP.
This InfluxDB output plugin writes metrics to the [InfluxDB](https://github.com/influxdata/influxdb) HTTP or UDP service.
### Configuration:
```toml
# Configuration for influxdb server to send metrics to
# Configuration for sending metrics to InfluxDB
[[outputs.influxdb]]
## The full HTTP or UDP URL for your InfluxDB instance.
##
## Multiple urls can be specified as part of the same cluster,
## this means that only ONE of the urls will be written to each interval.
# urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
urls = ["http://127.0.0.1:8086"] # required
## The target database for metrics (telegraf will create it if not exists).
database = "telegraf" # required
## Multiple URLs can be specified for a single cluster, only ONE of the
## urls will be written to each interval.
# urls = ["udp://127.0.0.1:8089"]
# urls = ["http://127.0.0.1:8086"]
## The target database for metrics; will be created as needed.
# database = "telegraf"
## Name of existing retention policy to write to. Empty string writes to
## the default retention policy.
retention_policy = ""
## Write consistency (clusters only), can be: "any", "one", "quorum", "all"
write_consistency = "any"
# retention_policy = ""
## Write timeout (for the InfluxDB client), formatted as a string.
## If not provided, will default to 5s. 0s means no timeout (not recommended).
timeout = "5s"
## Write consistency (clusters only), can be: "any", "one", "quorum", "all"
# write_consistency = "any"
## Timeout for HTTP messages.
# timeout = "5s"
## HTTP Basic Auth
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
## Set the user agent for HTTP POSTs (can be useful for log differentiation)
## HTTP User-Agent
# user_agent = "telegraf"
## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
## UDP payload size is the maximum packet size to send.
# udp_payload = 512
## Optional SSL Config
@@ -39,37 +44,14 @@ This plugin writes to [InfluxDB](https://www.influxdb.com) via HTTP or UDP.
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## HTTP Proxy Config
## HTTP Proxy override, if unset values the standard proxy environment
## variables are consulted to determine which proxy, if any, should be used.
# http_proxy = "http://corporate.proxy:3128"
## Optional HTTP headers
## Additional HTTP headers
# http_headers = {"X-Special-Header" = "Special-Value"}
## Compress each HTTP request payload using GZIP.
# content_encoding = "gzip"
## HTTP Content-Encoding for write request body, can be set to "gzip" to
## compress body or "identity" to apply no encoding.
# content_encoding = "identity"
```
### Required parameters:
* `urls`: List of strings, this is for InfluxDB clustering
support. On each flush interval, Telegraf will randomly choose one of the urls
to write to. Each URL should start with either `http://` or `udp://`
* `database`: The name of the database to write to.
### Optional parameters:
* `write_consistency`: Write consistency (clusters only), can be: "any", "one", "quorum", "all".
* `retention_policy`: Name of existing retention policy to write to. Empty string writes to the default retention policy.
* `timeout`: Write timeout (for the InfluxDB client), formatted as a string. If not provided, will default to 5s. 0s means no timeout (not recommended).
* `username`: Username for influxdb
* `password`: Password for influxdb
* `user_agent`: Set the user agent for HTTP POSTs (can be useful for log differentiation)
* `udp_payload`: Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
* `ssl_ca`: SSL CA
* `ssl_cert`: SSL CERT
* `ssl_key`: SSL key
* `insecure_skip_verify`: Use SSL but skip chain & host verification (default: false)
* `http_proxy`: HTTP Proxy URI
* `http_headers`: HTTP headers to add to each HTTP request
* `content_encoding`: Compress each HTTP request payload using gzip if set to: "gzip"

View File

@@ -1,16 +0,0 @@
package client
import "io"
type Client interface {
Query(command string) error
WriteStream(b io.Reader) error
Close() error
}
type WriteParams struct {
Database string
RetentionPolicy string
Precision string
Consistency string
}

View File

@@ -1,277 +0,0 @@
package client
import (
"bytes"
"compress/gzip"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
"time"
)
var (
defaultRequestTimeout = time.Second * 5
)
func NewHTTP(config HTTPConfig, defaultWP WriteParams) (Client, error) {
// validate required parameters:
if len(config.URL) == 0 {
return nil, fmt.Errorf("config.URL is required to create an HTTP client")
}
if len(defaultWP.Database) == 0 {
return nil, fmt.Errorf("A default database is required to create an HTTP client")
}
// set defaults:
if config.Timeout == 0 {
config.Timeout = defaultRequestTimeout
}
// parse URL:
u, err := url.Parse(config.URL)
if err != nil {
return nil, fmt.Errorf("error parsing config.URL: %s", err)
}
if u.Scheme != "http" && u.Scheme != "https" {
return nil, fmt.Errorf("config.URL scheme must be http(s), got %s", u.Scheme)
}
var transport http.Transport
if len(config.HTTPProxy) > 0 {
proxyURL, err := url.Parse(config.HTTPProxy)
if err != nil {
return nil, fmt.Errorf("error parsing config.HTTPProxy: %s", err)
}
transport = http.Transport{
Proxy: http.ProxyURL(proxyURL),
TLSClientConfig: config.TLSConfig,
}
} else {
transport = http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: config.TLSConfig,
}
}
return &httpClient{
writeURL: writeURL(u, defaultWP),
config: config,
url: u,
client: &http.Client{
Timeout: config.Timeout,
Transport: &transport,
},
}, nil
}
type HTTPHeaders map[string]string
type HTTPConfig struct {
// URL should be of the form "http://host:port" (REQUIRED)
URL string
// UserAgent sets the User-Agent header.
UserAgent string
// Timeout specifies a time limit for requests made by this
// Client. The timeout includes connection time, any
// redirects, and reading the response body. The timer remains
// running after Get, Head, Post, or Do return and will
// interrupt reading of the Response.Body.
//
// A Timeout of zero means no timeout.
Timeout time.Duration
// Username is the basic auth username for the server.
Username string
// Password is the basic auth password for the server.
Password string
// TLSConfig is the tls auth settings to use for each request.
TLSConfig *tls.Config
// Proxy URL should be of the form "http://host:port"
HTTPProxy string
// HTTP headers to append to HTTP requests.
HTTPHeaders HTTPHeaders
// The content encoding mechanism to use for each request.
ContentEncoding string
}
// Response represents a list of statement results.
type Response struct {
// ignore Results:
Results []interface{} `json:"-"`
Err string `json:"error,omitempty"`
}
// Error returns the first error from any statement.
// Returns nil if no errors occurred on any statements.
func (r *Response) Error() error {
if r.Err != "" {
return fmt.Errorf(r.Err)
}
return nil
}
type httpClient struct {
writeURL string
config HTTPConfig
client *http.Client
url *url.URL
}
func (c *httpClient) Query(command string) error {
req, err := c.makeRequest(queryURL(c.url, command), bytes.NewReader([]byte("")))
if err != nil {
return err
}
return c.doRequest(req, http.StatusOK)
}
func (c *httpClient) WriteStream(r io.Reader) error {
req, err := c.makeWriteRequest(r, c.writeURL)
if err != nil {
return err
}
return c.doRequest(req, http.StatusNoContent)
}
func (c *httpClient) doRequest(
req *http.Request,
expectedCode int,
) error {
resp, err := c.client.Do(req)
if err != nil {
return err
}
code := resp.StatusCode
// If it's a "no content" response, then release and return nil
if code == http.StatusNoContent {
return nil
}
// not a "no content" response, so parse the result:
var response Response
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("Fatal error reading body: %s", err)
}
decErr := json.Unmarshal(body, &response)
// If we got a JSON decode error, send that back
if decErr != nil {
err = fmt.Errorf("Unable to decode json: received status code %d err: %s", code, decErr)
}
// Unexpected response code OR error in JSON response body overrides
// a JSON decode error:
if code != expectedCode || response.Error() != nil {
err = fmt.Errorf("Response Error: Status Code [%d], expected [%d], [%v]",
code, expectedCode, response.Error())
}
return err
}
func (c *httpClient) makeWriteRequest(
body io.Reader,
writeURL string,
) (*http.Request, error) {
req, err := c.makeRequest(writeURL, body)
if err != nil {
return nil, err
}
if c.config.ContentEncoding == "gzip" {
req.Header.Set("Content-Encoding", "gzip")
}
return req, nil
}
func (c *httpClient) makeRequest(uri string, body io.Reader) (*http.Request, error) {
var req *http.Request
var err error
if c.config.ContentEncoding == "gzip" {
body, err = compressWithGzip(body)
if err != nil {
return nil, err
}
}
req, err = http.NewRequest("POST", uri, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "text/plain; charset=utf-8")
for header, value := range c.config.HTTPHeaders {
req.Header.Set(header, value)
}
req.Header.Set("User-Agent", c.config.UserAgent)
if c.config.Username != "" && c.config.Password != "" {
req.SetBasicAuth(c.config.Username, c.config.Password)
}
return req, nil
}
func compressWithGzip(data io.Reader) (io.Reader, error) {
pr, pw := io.Pipe()
gw := gzip.NewWriter(pw)
var err error
go func() {
_, err = io.Copy(gw, data)
gw.Close()
pw.Close()
}()
return pr, err
}
func (c *httpClient) Close() error {
// Nothing to do.
return nil
}
func writeURL(u *url.URL, wp WriteParams) string {
params := url.Values{}
params.Set("db", wp.Database)
if wp.RetentionPolicy != "" {
params.Set("rp", wp.RetentionPolicy)
}
if wp.Precision != "n" && wp.Precision != "" {
params.Set("precision", wp.Precision)
}
if wp.Consistency != "one" && wp.Consistency != "" {
params.Set("consistency", wp.Consistency)
}
u.RawQuery = params.Encode()
p := u.Path
u.Path = path.Join(p, "write")
s := u.String()
u.Path = p
return s
}
func queryURL(u *url.URL, command string) string {
params := url.Values{}
params.Set("q", command)
u.RawQuery = params.Encode()
p := u.Path
u.Path = path.Join(p, "query")
s := u.String()
u.Path = p
return s
}

View File

@@ -1,335 +0,0 @@
package client
import (
"bytes"
"compress/gzip"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/assert"
)
func TestHTTPClient_Write(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/write":
// test form values:
if r.FormValue("db") != "test" {
w.WriteHeader(http.StatusTeapot)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, `{"results":[{}],"error":"wrong db name"}`)
}
if r.FormValue("rp") != "policy" {
w.WriteHeader(http.StatusTeapot)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, `{"results":[{}],"error":"wrong rp name"}`)
}
if r.FormValue("precision") != "ns" {
w.WriteHeader(http.StatusTeapot)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, `{"results":[{}],"error":"wrong precision"}`)
}
if r.FormValue("consistency") != "all" {
w.WriteHeader(http.StatusTeapot)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, `{"results":[{}],"error":"wrong consistency"}`)
}
// test that user agent is set properly
if r.UserAgent() != "test-agent" {
w.WriteHeader(http.StatusTeapot)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, `{"results":[{}],"error":"wrong agent name"}`)
}
// test basic auth params
user, pass, ok := r.BasicAuth()
if !ok {
w.WriteHeader(http.StatusTeapot)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, `{"results":[{}],"error":"basic auth not set"}`)
}
if user != "test-user" || pass != "test-password" {
w.WriteHeader(http.StatusTeapot)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, `{"results":[{}],"error":"basic auth incorrect"}`)
}
// test that user-specified http header is set properly
if r.Header.Get("X-Test-Header") != "Test-Value" {
w.WriteHeader(http.StatusTeapot)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, `{"results":[{}],"error":"wrong http header value"}`)
}
// Validate Content-Length Header
if r.ContentLength != 13 {
w.WriteHeader(http.StatusTeapot)
w.Header().Set("Content-Type", "application/json")
msg := fmt.Sprintf(`{"results":[{}],"error":"Content-Length: expected [13], got [%d]"}`, r.ContentLength)
fmt.Fprintln(w, msg)
}
// Validate the request body:
buf := make([]byte, 100)
n, _ := r.Body.Read(buf)
expected := "cpu value=99"
got := string(buf[0 : n-1])
if expected != got {
w.WriteHeader(http.StatusTeapot)
w.Header().Set("Content-Type", "application/json")
msg := fmt.Sprintf(`{"results":[{}],"error":"expected [%s], got [%s]"}`, expected, got)
fmt.Fprintln(w, msg)
}
w.WriteHeader(http.StatusNoContent)
w.Header().Set("Content-Type", "application/json")
case "/query":
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, `{"results":[{}]}`)
}
}))
defer ts.Close()
config := HTTPConfig{
URL: ts.URL,
UserAgent: "test-agent",
Username: "test-user",
Password: "test-password",
HTTPHeaders: HTTPHeaders{
"X-Test-Header": "Test-Value",
},
}
wp := WriteParams{
Database: "test",
RetentionPolicy: "policy",
Precision: "ns",
Consistency: "all",
}
client, err := NewHTTP(config, wp)
defer client.Close()
assert.NoError(t, err)
err = client.WriteStream(bytes.NewReader([]byte("cpu value=99\n")))
assert.NoError(t, err)
}
func TestHTTPClient_Write_Errors(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/write":
w.WriteHeader(http.StatusTeapot)
case "/query":
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, `{"results":[{}]}`)
}
}))
defer ts.Close()
config := HTTPConfig{
URL: ts.URL,
}
defaultWP := WriteParams{
Database: "test",
}
client, err := NewHTTP(config, defaultWP)
defer client.Close()
assert.NoError(t, err)
lp := []byte("cpu value=99\n")
err = client.WriteStream(bytes.NewReader(lp))
assert.Error(t, err)
}
func TestNewHTTPErrors(t *testing.T) {
// No URL:
config := HTTPConfig{}
defaultWP := WriteParams{
Database: "test",
}
client, err := NewHTTP(config, defaultWP)
assert.Error(t, err)
assert.Nil(t, client)
// No Database:
config = HTTPConfig{
URL: "http://localhost:8086",
}
defaultWP = WriteParams{}
client, err = NewHTTP(config, defaultWP)
assert.Nil(t, client)
assert.Error(t, err)
// Invalid URL:
config = HTTPConfig{
URL: "http://192.168.0.%31:8080/",
}
defaultWP = WriteParams{
Database: "test",
}
client, err = NewHTTP(config, defaultWP)
assert.Nil(t, client)
assert.Error(t, err)
// Invalid URL scheme:
config = HTTPConfig{
URL: "mailto://localhost:8086",
}
defaultWP = WriteParams{
Database: "test",
}
client, err = NewHTTP(config, defaultWP)
assert.Nil(t, client)
assert.Error(t, err)
}
func TestHTTPClient_Query(t *testing.T) {
command := "CREATE DATABASE test"
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/write":
w.WriteHeader(http.StatusNoContent)
case "/query":
// validate the create database command is correct
got := r.FormValue("q")
if got != command {
w.WriteHeader(http.StatusTeapot)
w.Header().Set("Content-Type", "application/json")
msg := fmt.Sprintf(`{"results":[{}],"error":"got %s, expected %s"}`, got, command)
fmt.Fprintln(w, msg)
}
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, `{"results":[{}]}`)
}
}))
defer ts.Close()
config := HTTPConfig{
URL: ts.URL,
}
defaultWP := WriteParams{
Database: "test",
}
client, err := NewHTTP(config, defaultWP)
defer client.Close()
assert.NoError(t, err)
err = client.Query(command)
assert.NoError(t, err)
}
func TestHTTPClient_Query_ResponseError(t *testing.T) {
command := "CREATE DATABASE test"
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/write":
w.WriteHeader(http.StatusNoContent)
case "/query":
w.WriteHeader(http.StatusTeapot)
w.Header().Set("Content-Type", "application/json")
msg := fmt.Sprintf(`{"results":[{}],"error":"couldnt create database"}`)
fmt.Fprintln(w, msg)
}
}))
defer ts.Close()
config := HTTPConfig{
URL: ts.URL,
}
defaultWP := WriteParams{
Database: "test",
}
client, err := NewHTTP(config, defaultWP)
defer client.Close()
assert.NoError(t, err)
err = client.Query(command)
assert.Error(t, err)
}
func TestHTTPClient_Query_JSONDecodeError(t *testing.T) {
command := "CREATE DATABASE test"
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/write":
w.WriteHeader(http.StatusNoContent)
case "/query":
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
// write JSON missing a ']'
msg := fmt.Sprintf(`{"results":[{}}`)
fmt.Fprintln(w, msg)
}
}))
defer ts.Close()
config := HTTPConfig{
URL: ts.URL,
}
defaultWP := WriteParams{
Database: "test",
}
client, err := NewHTTP(config, defaultWP)
defer client.Close()
assert.NoError(t, err)
err = client.Query(command)
assert.Error(t, err)
assert.Contains(t, err.Error(), "json")
}
func TestGzipCompression(t *testing.T) {
influxLine := "cpu value=99\n"
// Compress the payload using GZIP.
payload := bytes.NewReader([]byte(influxLine))
compressed, err := compressWithGzip(payload)
assert.Nil(t, err)
// Decompress the compressed payload and make sure
// that its original value has not changed.
gr, err := gzip.NewReader(compressed)
assert.Nil(t, err)
gr.Close()
var uncompressed bytes.Buffer
_, err = uncompressed.ReadFrom(gr)
assert.Nil(t, err)
assert.Equal(t, []byte(influxLine), uncompressed.Bytes())
}
func TestHTTPClient_PathPrefix(t *testing.T) {
prefix := "/some/random/prefix"
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case prefix + "/write":
w.WriteHeader(http.StatusNoContent)
w.Header().Set("Content-Type", "application/json")
case prefix + "/query":
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, `{"results":[{}]}`)
default:
w.WriteHeader(http.StatusNotFound)
msg := fmt.Sprintf("Path not found: %s", r.URL.Path)
fmt.Fprintln(w, msg)
}
}))
defer ts.Close()
config := HTTPConfig{
URL: ts.URL + prefix,
}
wp := WriteParams{
Database: "test",
}
client, err := NewHTTP(config, wp)
defer client.Close()
assert.NoError(t, err)
err = client.Query("CREATE DATABASE test")
assert.NoError(t, err)
err = client.WriteStream(bytes.NewReader([]byte("cpu value=99\n")))
assert.NoError(t, err)
}

View File

@@ -1,105 +0,0 @@
package client
import (
"fmt"
"io"
"log"
"net"
"net/url"
)
const (
// UDPPayloadSize is a reasonable default payload size for UDP packets that
// could be travelling over the internet.
UDPPayloadSize = 512
)
// UDPConfig is the config data needed to create a UDP Client
type UDPConfig struct {
// URL should be of the form "udp://host:port"
// or "udp://[ipv6-host%zone]:port".
URL string
// PayloadSize is the maximum size of a UDP client message, optional
// Tune this based on your network. Defaults to UDPPayloadSize.
PayloadSize int
}
// NewUDP will return an instance of the telegraf UDP output plugin for influxdb
func NewUDP(config UDPConfig) (Client, error) {
p, err := url.Parse(config.URL)
if err != nil {
return nil, fmt.Errorf("Error parsing UDP url [%s]: %s", config.URL, err)
}
udpAddr, err := net.ResolveUDPAddr("udp", p.Host)
if err != nil {
return nil, fmt.Errorf("Error resolving UDP Address [%s]: %s", p.Host, err)
}
conn, err := net.DialUDP("udp", nil, udpAddr)
if err != nil {
return nil, fmt.Errorf("Error dialing UDP address [%s]: %s",
udpAddr.String(), err)
}
size := config.PayloadSize
if size == 0 {
size = UDPPayloadSize
}
buf := make([]byte, size)
return &udpClient{conn: conn, buffer: buf}, nil
}
type udpClient struct {
conn *net.UDPConn
buffer []byte
}
// Query will send the provided query command to the client, returning an error if any issues arise
func (c *udpClient) Query(command string) error {
return nil
}
// WriteStream will send the provided data through to the client, contentLength is ignored by the UDP client
func (c *udpClient) WriteStream(r io.Reader) error {
var totaln int
for {
nR, err := r.Read(c.buffer)
if nR == 0 {
break
}
if err != io.EOF && err != nil {
return err
}
if c.buffer[nR-1] == uint8('\n') {
nW, err := c.conn.Write(c.buffer[0:nR])
totaln += nW
if err != nil {
return err
}
} else {
log.Printf("E! Could not fit point into UDP payload; dropping")
// Scan forward until next line break to realign.
for {
nR, err := r.Read(c.buffer)
if nR == 0 {
break
}
if err != io.EOF && err != nil {
return err
}
if c.buffer[nR-1] == uint8('\n') {
break
}
}
}
}
return nil
}
// Close will terminate the provided client connection
func (c *udpClient) Close() error {
return c.conn.Close()
}

View File

@@ -1,89 +0,0 @@
package client
import (
"net"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/stretchr/testify/assert"
)
func TestUDPClient(t *testing.T) {
config := UDPConfig{
URL: "udp://localhost:8089",
}
client, err := NewUDP(config)
assert.NoError(t, err)
err = client.Query("ANY QUERY RETURNS NIL")
assert.NoError(t, err)
assert.NoError(t, client.Close())
}
func TestNewUDPClient_Errors(t *testing.T) {
// url.Parse Error
config := UDPConfig{
URL: "udp://localhost%35:8089",
}
_, err := NewUDP(config)
assert.Error(t, err)
// ResolveUDPAddr Error
config = UDPConfig{
URL: "udp://localhost:999999",
}
_, err = NewUDP(config)
assert.Error(t, err)
}
func TestUDPClient_Write(t *testing.T) {
config := UDPConfig{
URL: "udp://localhost:8199",
}
client, err := NewUDP(config)
assert.NoError(t, err)
packets := make(chan string, 100)
address, err := net.ResolveUDPAddr("udp", "localhost:8199")
assert.NoError(t, err)
listener, err := net.ListenUDP("udp", address)
defer listener.Close()
assert.NoError(t, err)
go func() {
buf := make([]byte, 200)
for {
n, _, err := listener.ReadFromUDP(buf)
if err != nil {
packets <- err.Error()
}
packets <- string(buf[0:n])
}
}()
assert.NoError(t, client.Close())
config = UDPConfig{
URL: "udp://localhost:8199",
PayloadSize: 40,
}
client4, err := NewUDP(config)
assert.NoError(t, err)
ts := time.Unix(1484142943, 0)
m1, _ := metric.New("test", map[string]string{},
map[string]interface{}{"this_is_a_very_long_field_name": 1.1}, ts)
m2, _ := metric.New("test", map[string]string{},
map[string]interface{}{"value": 1.1}, ts)
ms := []telegraf.Metric{m1, m2}
reader := metric.NewReader(ms)
err = client4.WriteStream(reader)
assert.NoError(t, err)
pkt := <-packets
assert.Equal(t, "test value=1.1 1484142943000000000\n", pkt)
assert.NoError(t, client4.Close())
}

View File

@@ -0,0 +1,404 @@
package influxdb
import (
"compress/gzip"
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"net/url"
"path"
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/serializers/influx"
)
type APIErrorType int
const (
_ APIErrorType = iota
DatabaseNotFound
)
const (
defaultRequestTimeout = time.Second * 5
defaultDatabase = "telegraf"
defaultUserAgent = "telegraf"
errStringDatabaseNotFound = "database not found"
errStringHintedHandoffNotEmpty = "hinted handoff queue not empty"
errStringPartialWrite = "partial write"
errStringPointsBeyondRP = "points beyond retention policy"
errStringUnableToParse = "unable to parse"
)
var (
// Escape an identifier in InfluxQL.
escapeIdentifier = strings.NewReplacer(
"\n", `\n`,
`\`, `\\`,
`"`, `\"`,
)
)
// APIError is an error reported by the InfluxDB server
type APIError struct {
StatusCode int
Title string
Description string
Type APIErrorType
}
func (e APIError) Error() string {
if e.Description != "" {
return fmt.Sprintf("%s: %s", e.Title, e.Description)
}
return e.Title
}
// QueryResponse is the response body from the /query endpoint
type QueryResponse struct {
Results []QueryResult `json:"results"`
}
type QueryResult struct {
Err string `json:"error,omitempty"`
}
func (r QueryResponse) Error() string {
if len(r.Results) > 0 {
return r.Results[0].Err
}
return ""
}
// WriteResponse is the response body from the /write endpoint
type WriteResponse struct {
Err string `json:"error,omitempty"`
}
func (r WriteResponse) Error() string {
return r.Err
}
type HTTPConfig struct {
URL *url.URL
UserAgent string
Timeout time.Duration
Username string
Password string
TLSConfig *tls.Config
Proxy *url.URL
Headers map[string]string
ContentEncoding string
Database string
RetentionPolicy string
Consistency string
Serializer *influx.Serializer
}
type httpClient struct {
WriteURL string
QueryURL string
ContentEncoding string
Timeout time.Duration
Username string
Password string
Headers map[string]string
client *http.Client
serializer *influx.Serializer
url *url.URL
database string
}
func NewHTTPClient(config *HTTPConfig) (*httpClient, error) {
if config.URL == nil {
return nil, ErrMissingURL
}
database := config.Database
if database == "" {
database = defaultDatabase
}
timeout := config.Timeout
if timeout == 0 {
timeout = defaultRequestTimeout
}
userAgent := config.UserAgent
if userAgent == "" {
userAgent = defaultUserAgent
}
var headers = make(map[string]string, len(config.Headers)+1)
headers["User-Agent"] = userAgent
for k, v := range config.Headers {
headers[k] = v
}
var proxy func(*http.Request) (*url.URL, error)
if config.Proxy != nil {
proxy = http.ProxyURL(config.Proxy)
} else {
proxy = http.ProxyFromEnvironment
}
serializer := config.Serializer
if serializer == nil {
serializer = influx.NewSerializer()
}
writeURL := makeWriteURL(
config.URL,
database,
config.RetentionPolicy,
config.Consistency)
queryURL := makeQueryURL(config.URL)
client := &httpClient{
serializer: serializer,
client: &http.Client{
Timeout: timeout,
Transport: &http.Transport{
Proxy: proxy,
TLSClientConfig: config.TLSConfig,
},
},
database: database,
url: config.URL,
WriteURL: writeURL,
QueryURL: queryURL,
ContentEncoding: config.ContentEncoding,
Timeout: timeout,
Username: config.Username,
Password: config.Password,
Headers: headers,
}
return client, nil
}
// URL returns the origin URL that this client connects too.
func (c *httpClient) URL() string {
return c.url.String()
}
// URL returns the database that this client connects too.
func (c *httpClient) Database() string {
return c.database
}
// CreateDatabase attemps to create a new database in the InfluxDB server.
// Note that some names are not allowed by the server, notably those with
// non-printable characters or slashes.
func (c *httpClient) CreateDatabase(ctx context.Context) error {
query := fmt.Sprintf(`CREATE DATABASE "%s"`,
escapeIdentifier.Replace(c.database))
req, err := c.makeQueryRequest(query)
resp, err := c.client.Do(req.WithContext(ctx))
if err != nil {
return err
}
defer resp.Body.Close()
queryResp := &QueryResponse{}
dec := json.NewDecoder(resp.Body)
err = dec.Decode(queryResp)
if err != nil {
if resp.StatusCode == 200 {
return nil
}
return &APIError{
StatusCode: resp.StatusCode,
Title: resp.Status,
}
}
// Even with a 200 response there can be an error
if resp.StatusCode == http.StatusOK && queryResp.Error() == "" {
return nil
}
return &APIError{
StatusCode: resp.StatusCode,
Title: resp.Status,
Description: queryResp.Error(),
}
}
// Write sends the metrics to InfluxDB
func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error {
var err error
reader := influx.NewReader(metrics, c.serializer)
req, err := c.makeWriteRequest(reader)
if err != nil {
return err
}
resp, err := c.client.Do(req.WithContext(ctx))
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNoContent {
return nil
}
writeResp := &WriteResponse{}
dec := json.NewDecoder(resp.Body)
var desc string
err = dec.Decode(writeResp)
if err == nil {
desc = writeResp.Err
}
if strings.Contains(desc, errStringDatabaseNotFound) {
return &APIError{
StatusCode: resp.StatusCode,
Title: resp.Status,
Description: desc,
Type: DatabaseNotFound,
}
}
// This "error" is an informational message about the state of the
// InfluxDB cluster.
if strings.Contains(desc, errStringHintedHandoffNotEmpty) {
return nil
}
// Points beyond retention policy is returned when points are immediately
// discarded for being older than the retention policy. Usually this not
// a cause for concern and we don't want to retry.
if strings.Contains(desc, errStringPointsBeyondRP) {
log.Printf("W! [outputs.influxdb]: when writing to [%s]: received error %v",
c.URL(), desc)
return nil
}
// Other partial write errors, such as "field type conflict", are not
// correctable at this point and so the point is dropped instead of
// retrying.
if strings.Contains(desc, errStringPartialWrite) {
log.Printf("E! [outputs.influxdb]: when writing to [%s]: received error %v; discarding points",
c.URL(), desc)
return nil
}
// This error indicates a bug in either Telegraf line protocol
// serialization, retries would not be successful.
if strings.Contains(desc, errStringUnableToParse) {
log.Printf("E! [outputs.influxdb]: when writing to [%s]: received error %v; discarding points",
c.URL(), desc)
return nil
}
return &APIError{
StatusCode: resp.StatusCode,
Title: resp.Status,
Description: desc,
}
}
func (c *httpClient) makeQueryRequest(query string) (*http.Request, error) {
params := url.Values{}
params.Set("q", query)
form := strings.NewReader(params.Encode())
req, err := http.NewRequest("POST", c.QueryURL, form)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
c.addHeaders(req)
return req, nil
}
func (c *httpClient) makeWriteRequest(body io.Reader) (*http.Request, error) {
var err error
if c.ContentEncoding == "gzip" {
body, err = compressWithGzip(body)
if err != nil {
return nil, err
}
}
req, err := http.NewRequest("POST", c.WriteURL, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "text/plain; charset=utf-8")
c.addHeaders(req)
if c.ContentEncoding == "gzip" {
req.Header.Set("Content-Encoding", "gzip")
}
return req, nil
}
func compressWithGzip(data io.Reader) (io.Reader, error) {
pr, pw := io.Pipe()
gw := gzip.NewWriter(pw)
var err error
go func() {
_, err = io.Copy(gw, data)
gw.Close()
pw.Close()
}()
return pr, err
}
func (c *httpClient) addHeaders(req *http.Request) {
if c.Username != "" || c.Password != "" {
req.SetBasicAuth(c.Username, c.Password)
}
for header, value := range c.Headers {
req.Header.Set(header, value)
}
}
func makeWriteURL(loc *url.URL, db, rp, consistency string) string {
params := url.Values{}
params.Set("db", db)
if rp != "" {
params.Set("rp", rp)
}
if consistency != "one" && consistency != "" {
params.Set("consistency", consistency)
}
u := *loc
u.Path = path.Join(u.Path, "write")
u.RawQuery = params.Encode()
return u.String()
}
func makeQueryURL(loc *url.URL) string {
u := *loc
u.Path = path.Join(u.Path, "query")
return u.String()
}

View File

@@ -0,0 +1,558 @@
package influxdb_test
import (
"bytes"
"compress/gzip"
"context"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/outputs/influxdb"
"github.com/stretchr/testify/require"
)
func getHTTPURL() *url.URL {
u, err := url.Parse("http://localhost")
if err != nil {
panic(err)
}
return u
}
func TestHTTP_EmptyConfig(t *testing.T) {
config := &influxdb.HTTPConfig{}
_, err := influxdb.NewHTTPClient(config)
require.Error(t, err)
require.Contains(t, err.Error(), influxdb.ErrMissingURL.Error())
}
func TestHTTP_MinimalConfig(t *testing.T) {
config := &influxdb.HTTPConfig{
URL: getHTTPURL(),
}
_, err := influxdb.NewHTTPClient(config)
require.NoError(t, err)
}
func TestHTTP_CreateDatabase(t *testing.T) {
ts := httptest.NewServer(http.NotFoundHandler())
defer ts.Close()
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
require.NoError(t, err)
successResponse := []byte(`{"results": [{"statement_id": 0}]}`)
tests := []struct {
name string
config *influxdb.HTTPConfig
database string
queryHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request)
errFunc func(t *testing.T, err error)
}{
{
name: "success",
config: &influxdb.HTTPConfig{
URL: u,
Database: "xyzzy",
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
require.Equal(t, `CREATE DATABASE "xyzzy"`, r.FormValue("q"))
w.WriteHeader(http.StatusOK)
w.Write(successResponse)
},
},
{
name: "send basic auth",
config: &influxdb.HTTPConfig{
URL: u,
Username: "guy",
Password: "smiley",
Database: "telegraf",
},
database: "telegraf",
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
username, password, ok := r.BasicAuth()
require.True(t, ok)
require.Equal(t, "guy", username)
require.Equal(t, "smiley", password)
w.WriteHeader(http.StatusOK)
w.Write(successResponse)
},
},
{
name: "send user agent",
config: &influxdb.HTTPConfig{
URL: u,
Headers: map[string]string{
"A": "B",
"C": "D",
},
Database: "telegraf",
},
database: `a " b`,
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
require.Equal(t, r.Header.Get("A"), "B")
require.Equal(t, r.Header.Get("C"), "D")
w.WriteHeader(http.StatusOK)
w.Write(successResponse)
},
},
{
name: "send headers",
config: &influxdb.HTTPConfig{
URL: u,
Headers: map[string]string{
"A": "B",
"C": "D",
},
Database: "telegraf",
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
require.Equal(t, r.Header.Get("A"), "B")
require.Equal(t, r.Header.Get("C"), "D")
w.WriteHeader(http.StatusOK)
w.Write(successResponse)
},
},
{
name: "database default",
config: &influxdb.HTTPConfig{
URL: u,
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
require.Equal(t, `CREATE DATABASE "telegraf"`, r.FormValue("q"))
w.WriteHeader(http.StatusOK)
w.Write(successResponse)
},
},
{
name: "database name is escaped",
config: &influxdb.HTTPConfig{
URL: u,
Database: `a " b`,
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
require.Equal(t, `CREATE DATABASE "a \" b"`, r.FormValue("q"))
w.WriteHeader(http.StatusOK)
w.Write(successResponse)
},
},
{
name: "invalid database name creates api error",
config: &influxdb.HTTPConfig{
URL: u,
Database: `a \\ b`,
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
// Yes, 200 OK is the correct response...
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"results": [{"error": "invalid name", "statement_id": 0}]}`))
},
errFunc: func(t *testing.T, err error) {
expected := &influxdb.APIError{
StatusCode: 200,
Title: "200 OK",
Description: "invalid name",
}
require.Equal(t, expected, err)
},
},
{
name: "error with no response body",
config: &influxdb.HTTPConfig{
URL: u,
Database: "telegraf",
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNotFound)
},
errFunc: func(t *testing.T, err error) {
expected := &influxdb.APIError{
StatusCode: 404,
Title: "404 Not Found",
}
require.Equal(t, expected, err)
},
},
{
name: "ok with no response body",
config: &influxdb.HTTPConfig{
URL: u,
Database: "telegraf",
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/query":
tt.queryHandlerFunc(t, w, r)
return
default:
w.WriteHeader(http.StatusNotFound)
return
}
})
ctx := context.Background()
client, err := influxdb.NewHTTPClient(tt.config)
require.NoError(t, err)
err = client.CreateDatabase(ctx)
if tt.errFunc != nil {
tt.errFunc(t, err)
} else {
require.NoError(t, err)
}
})
}
}
func TestHTTP_Write(t *testing.T) {
ts := httptest.NewServer(http.NotFoundHandler())
defer ts.Close()
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
require.NoError(t, err)
tests := []struct {
name string
config *influxdb.HTTPConfig
queryHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request)
errFunc func(t *testing.T, err error)
logFunc func(t *testing.T, str string)
}{
{
name: "success",
config: &influxdb.HTTPConfig{
URL: u,
Database: "telegraf",
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
require.Equal(t, r.FormValue("db"), "telegraf")
body, err := ioutil.ReadAll(r.Body)
require.NoError(t, err)
require.Contains(t, string(body), "cpu value=42")
w.WriteHeader(http.StatusNoContent)
},
},
{
name: "send basic auth",
config: &influxdb.HTTPConfig{
URL: u,
Database: "telegraf",
Username: "guy",
Password: "smiley",
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
username, password, ok := r.BasicAuth()
require.True(t, ok)
require.Equal(t, "guy", username)
require.Equal(t, "smiley", password)
w.WriteHeader(http.StatusNoContent)
},
},
{
name: "send user agent",
config: &influxdb.HTTPConfig{
URL: u,
Database: "telegraf",
UserAgent: "telegraf",
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
require.Equal(t, r.Header.Get("User-Agent"), "telegraf")
w.WriteHeader(http.StatusNoContent)
},
},
{
name: "default database",
config: &influxdb.HTTPConfig{
URL: u,
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
require.Equal(t, "telegraf", r.FormValue("db"))
w.WriteHeader(http.StatusNoContent)
},
},
{
name: "send headers",
config: &influxdb.HTTPConfig{
URL: u,
Headers: map[string]string{
"A": "B",
"C": "D",
},
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
require.Equal(t, r.Header.Get("A"), "B")
require.Equal(t, r.Header.Get("C"), "D")
w.WriteHeader(http.StatusNoContent)
},
},
{
name: "send retention policy",
config: &influxdb.HTTPConfig{
URL: u,
Database: "telegraf",
RetentionPolicy: "foo",
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
require.Equal(t, "foo", r.FormValue("rp"))
w.WriteHeader(http.StatusNoContent)
},
},
{
name: "send consistency",
config: &influxdb.HTTPConfig{
URL: u,
Database: "telegraf",
Consistency: "all",
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
require.Equal(t, "all", r.FormValue("consistency"))
w.WriteHeader(http.StatusNoContent)
},
},
{
name: "hinted handoff not empty no log no error",
config: &influxdb.HTTPConfig{
URL: u,
Database: "telegraf",
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"error": "write failed: hinted handoff queue not empty"}`))
},
logFunc: func(t *testing.T, str string) {
require.False(t, strings.Contains(str, "hinted handoff queue not empty"))
},
},
{
name: "partial write errors are logged no error",
config: &influxdb.HTTPConfig{
URL: u,
Database: "telegraf",
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"error": "partial write: field type conflict:"}`))
},
logFunc: func(t *testing.T, str string) {
require.Contains(t, str, "partial write")
},
},
{
name: "parse errors are logged no error",
config: &influxdb.HTTPConfig{
URL: u,
Database: "telegraf",
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"error": "unable to parse 'cpu value': invalid field format"}`))
},
logFunc: func(t *testing.T, str string) {
require.Contains(t, str, "unable to parse")
},
},
{
name: "http error",
config: &influxdb.HTTPConfig{
URL: u,
Database: "telegraf",
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadGateway)
},
errFunc: func(t *testing.T, err error) {
expected := &influxdb.APIError{
StatusCode: 502,
Title: "502 Bad Gateway",
}
require.Equal(t, expected, err)
},
},
{
name: "http error with desc",
config: &influxdb.HTTPConfig{
URL: u,
Database: "telegraf",
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusServiceUnavailable)
w.Write([]byte(`{"error": "unknown error"}`))
},
errFunc: func(t *testing.T, err error) {
expected := &influxdb.APIError{
StatusCode: 503,
Title: "503 Service Unavailable",
Description: "unknown error",
}
require.Equal(t, expected, err)
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/write":
tt.queryHandlerFunc(t, w, r)
return
default:
w.WriteHeader(http.StatusNotFound)
return
}
})
var b bytes.Buffer
if tt.logFunc != nil {
log.SetOutput(&b)
}
ctx := context.Background()
m, err := metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
)
require.NoError(t, err)
metrics := []telegraf.Metric{m}
client, err := influxdb.NewHTTPClient(tt.config)
require.NoError(t, err)
err = client.Write(ctx, metrics)
if tt.errFunc != nil {
tt.errFunc(t, err)
} else {
require.NoError(t, err)
}
if tt.logFunc != nil {
tt.logFunc(t, b.String())
}
})
}
}
func TestHTTP_WritePathPrefix(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/x/y/z/query":
w.WriteHeader(http.StatusOK)
return
case "/x/y/z/write":
w.WriteHeader(http.StatusNoContent)
return
default:
w.WriteHeader(http.StatusNotFound)
return
}
},
),
)
defer ts.Close()
u, err := url.Parse(fmt.Sprintf("http://%s/x/y/z", ts.Listener.Addr().String()))
require.NoError(t, err)
ctx := context.Background()
m, err := metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
)
require.NoError(t, err)
metrics := []telegraf.Metric{m}
config := &influxdb.HTTPConfig{
URL: u,
Database: "telegraf",
}
client, err := influxdb.NewHTTPClient(config)
require.NoError(t, err)
err = client.CreateDatabase(ctx)
require.NoError(t, err)
err = client.Write(ctx, metrics)
require.NoError(t, err)
}
func TestHTTP_WriteContentEncodingGzip(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/write":
require.Equal(t, r.Header.Get("Content-Encoding"), "gzip")
gr, err := gzip.NewReader(r.Body)
require.NoError(t, err)
body, err := ioutil.ReadAll(gr)
require.NoError(t, err)
require.Contains(t, string(body), "cpu value=42")
w.WriteHeader(http.StatusNoContent)
return
default:
w.WriteHeader(http.StatusNotFound)
return
}
},
),
)
defer ts.Close()
u, err := url.Parse(fmt.Sprintf("http://%s/", ts.Listener.Addr().String()))
require.NoError(t, err)
ctx := context.Background()
m, err := metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
)
require.NoError(t, err)
metrics := []telegraf.Metric{m}
config := &influxdb.HTTPConfig{
URL: u,
Database: "telegraf",
ContentEncoding: "gzip",
}
client, err := influxdb.NewHTTPClient(config)
require.NoError(t, err)
err = client.Write(ctx, metrics)
require.NoError(t, err)
}

View File

@@ -1,29 +1,37 @@
package influxdb
import (
"context"
"errors"
"fmt"
"log"
"math/rand"
"strings"
"net/url"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/outputs/influxdb/client"
"github.com/influxdata/telegraf/plugins/serializers/influx"
)
var (
// Quote Ident replacer.
qiReplacer = strings.NewReplacer("\n", `\n`, `\`, `\\`, `"`, `\"`)
defaultURL = "http://localhost:8086"
ErrMissingURL = errors.New("missing URL")
)
type Client interface {
Write(context.Context, []telegraf.Metric) error
CreateDatabase(ctx context.Context) error
URL() string
Database() string
}
// InfluxDB struct is the primary data structure for the plugin
type InfluxDB struct {
// URL is only for backwards compatibility
URL string
URL string // url deprecated in 0.1.9; use urls
URLs []string `toml:"urls"`
Username string
Password string
@@ -46,36 +54,45 @@ type InfluxDB struct {
// Use SSL but skip chain & host verification
InsecureSkipVerify bool
// Precision is only here for legacy support. It will be ignored.
Precision string
Precision string // precision deprecated in 1.0; value is ignored
clients []client.Client
clients []Client
CreateHTTPClientF func(config *HTTPConfig) (Client, error)
CreateUDPClientF func(config *UDPConfig) (Client, error)
serializer *influx.Serializer
}
var sampleConfig = `
## The full HTTP or UDP URL for your InfluxDB instance.
##
## Multiple urls can be specified as part of the same cluster,
## this means that only ONE of the urls will be written to each interval.
# urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
urls = ["http://127.0.0.1:8086"] # required
## The target database for metrics (telegraf will create it if not exists).
database = "telegraf" # required
## Multiple URLs can be specified for a single cluster, only ONE of the
## urls will be written to each interval.
# urls = ["udp://127.0.0.1:8089"]
# urls = ["http://127.0.0.1:8086"]
## The target database for metrics; will be created as needed.
# database = "telegraf"
## Name of existing retention policy to write to. Empty string writes to
## the default retention policy.
retention_policy = ""
## Write consistency (clusters only), can be: "any", "one", "quorum", "all"
write_consistency = "any"
# retention_policy = ""
## Write timeout (for the InfluxDB client), formatted as a string.
## If not provided, will default to 5s. 0s means no timeout (not recommended).
timeout = "5s"
## Write consistency (clusters only), can be: "any", "one", "quorum", "all"
# write_consistency = "any"
## Timeout for HTTP messages.
# timeout = "5s"
## HTTP Basic Auth
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
## Set the user agent for HTTP POSTs (can be useful for log differentiation)
## HTTP User-Agent
# user_agent = "telegraf"
## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
## UDP payload size is the maximum packet size to send.
# udp_payload = 512
## Optional SSL Config
@@ -85,170 +102,181 @@ var sampleConfig = `
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## HTTP Proxy Config
## HTTP Proxy override, if unset values the standard proxy environment
## variables are consulted to determine which proxy, if any, should be used.
# http_proxy = "http://corporate.proxy:3128"
## Optional HTTP headers
## Additional HTTP headers
# http_headers = {"X-Special-Header" = "Special-Value"}
## Compress each HTTP request payload using GZIP.
# content_encoding = "gzip"
## HTTP Content-Encoding for write request body, can be set to "gzip" to
## compress body or "identity" to apply no encoding.
# content_encoding = "identity"
`
// Connect initiates the primary connection to the range of provided URLs
func (i *InfluxDB) Connect() error {
var urls []string
urls = append(urls, i.URLs...)
ctx := context.Background()
// Backward-compatibility with single Influx URL config files
// This could eventually be removed in favor of specifying the urls as a list
urls := make([]string, 0, len(i.URLs))
urls = append(urls, i.URLs...)
if i.URL != "" {
urls = append(urls, i.URL)
}
tlsConfig, err := internal.GetTLSConfig(
i.SSLCert, i.SSLKey, i.SSLCA, i.InsecureSkipVerify)
if err != nil {
return err
if len(urls) == 0 {
urls = append(urls, defaultURL)
}
i.serializer = influx.NewSerializer()
for _, u := range urls {
switch {
case strings.HasPrefix(u, "udp"):
config := client.UDPConfig{
URL: u,
PayloadSize: i.UDPPayload,
}
c, err := client.NewUDP(config)
u, err := url.Parse(u)
if err != nil {
return fmt.Errorf("error parsing url [%s]: %v", u, err)
}
var proxy *url.URL
if len(i.HTTPProxy) > 0 {
proxy, err = url.Parse(i.HTTPProxy)
if err != nil {
return fmt.Errorf("Error creating UDP Client [%s]: %s", u, err)
return fmt.Errorf("error parsing proxy_url [%s]: %v", proxy, err)
}
}
switch u.Scheme {
case "udp", "udp4", "udp6":
c, err := i.udpClient(u)
if err != nil {
return err
}
i.clients = append(i.clients, c)
case "http", "https":
c, err := i.httpClient(ctx, u, proxy)
if err != nil {
return err
}
i.clients = append(i.clients, c)
default:
// If URL doesn't start with "udp", assume HTTP client
config := client.HTTPConfig{
URL: u,
Timeout: i.Timeout.Duration,
TLSConfig: tlsConfig,
UserAgent: i.UserAgent,
Username: i.Username,
Password: i.Password,
HTTPProxy: i.HTTPProxy,
HTTPHeaders: client.HTTPHeaders{},
ContentEncoding: i.ContentEncoding,
}
for header, value := range i.HTTPHeaders {
config.HTTPHeaders[header] = value
}
wp := client.WriteParams{
Database: i.Database,
RetentionPolicy: i.RetentionPolicy,
Consistency: i.WriteConsistency,
}
c, err := client.NewHTTP(config, wp)
if err != nil {
return fmt.Errorf("Error creating HTTP Client [%s]: %s", u, err)
}
i.clients = append(i.clients, c)
err = c.Query(fmt.Sprintf(`CREATE DATABASE "%s"`, qiReplacer.Replace(i.Database)))
if err != nil {
if !strings.Contains(err.Error(), "Status Code [403]") {
log.Println("I! Database creation failed: " + err.Error())
}
continue
}
return fmt.Errorf("unsupported scheme [%s]: %q", u, u.Scheme)
}
}
rand.Seed(time.Now().UnixNano())
return nil
}
// Close will terminate the session to the backend, returning error if an issue arises
func (i *InfluxDB) Close() error {
return nil
}
// SampleConfig returns the formatted sample configuration for the plugin
func (i *InfluxDB) Description() string {
return "Configuration for sending metrics to InfluxDB"
}
func (i *InfluxDB) SampleConfig() string {
return sampleConfig
}
// Description returns the human-readable function definition of the plugin
func (i *InfluxDB) Description() string {
return "Configuration for influxdb server to send metrics to"
}
// Write will choose a random server in the cluster to write to until a successful write
// occurs, logging each unsuccessful. If all servers fail, return error.
// Write sends metrics to one of the configured servers, logging each
// unsuccessful. If all servers fail, return an error.
func (i *InfluxDB) Write(metrics []telegraf.Metric) error {
r := metric.NewReader(metrics)
// This will get set to nil if a successful write occurs
err := fmt.Errorf("Could not write to any InfluxDB server in cluster")
ctx := context.Background()
var err error
p := rand.Perm(len(i.clients))
for _, n := range p {
if e := i.clients[n].WriteStream(r); e != nil {
// If the database was not found, try to recreate it:
if strings.Contains(e.Error(), "database not found") {
errc := i.clients[n].Query(fmt.Sprintf(`CREATE DATABASE "%s"`, qiReplacer.Replace(i.Database)))
if errc != nil {
log.Printf("E! Error: Database %s not found and failed to recreate\n",
i.Database)
client := i.clients[n]
err = client.Write(ctx, metrics)
if err == nil {
return nil
}
switch apiError := err.(type) {
case APIError:
if apiError.Type == DatabaseNotFound {
err := client.CreateDatabase(ctx)
if err != nil {
log.Printf("E! [outputs.influxdb] when writing to [%s]: database %q not found and failed to recreate",
client.URL(), client.Database())
}
}
if strings.Contains(e.Error(), "field type conflict") {
log.Printf("E! Field type conflict, dropping conflicted points: %s", e)
// setting err to nil, otherwise we will keep retrying and points
// w/ conflicting types will get stuck in the buffer forever.
err = nil
break
}
if strings.Contains(e.Error(), "points beyond retention policy") {
log.Printf("W! Points beyond retention policy: %s", e)
// This error is indicates the point is older than the
// retention policy permits, and is probably not a cause for
// concern. Retrying will not help unless the retention
// policy is modified.
err = nil
break
}
if strings.Contains(e.Error(), "unable to parse") {
log.Printf("E! Parse error; dropping points: %s", e)
// This error indicates a bug in Telegraf or InfluxDB parsing
// of line protocol. Retries will not be successful.
err = nil
break
}
if strings.Contains(e.Error(), "hinted handoff queue not empty") {
// This is an informational message
err = nil
break
}
// Log write failure
log.Printf("E! InfluxDB Output Error: %s", e)
} else {
err = nil
break
}
log.Printf("E! [outputs.influxdb]: when writing to [%s]: %v", client.URL(), err)
}
return err
return errors.New("could not write any address")
}
func newInflux() *InfluxDB {
return &InfluxDB{
Timeout: internal.Duration{Duration: time.Second * 5},
func (i *InfluxDB) udpClient(url *url.URL) (Client, error) {
config := &UDPConfig{
URL: url,
MaxPayloadSize: i.UDPPayload,
Serializer: i.serializer,
}
c, err := i.CreateUDPClientF(config)
if err != nil {
return nil, fmt.Errorf("error creating UDP client [%s]: %v", url, err)
}
return c, nil
}
func (i *InfluxDB) httpClient(ctx context.Context, url *url.URL, proxy *url.URL) (Client, error) {
tlsConfig, err := internal.GetTLSConfig(
i.SSLCert, i.SSLKey, i.SSLCA, i.InsecureSkipVerify)
if err != nil {
return nil, err
}
config := &HTTPConfig{
URL: url,
Timeout: i.Timeout.Duration,
TLSConfig: tlsConfig,
UserAgent: i.UserAgent,
Username: i.Username,
Password: i.Password,
Proxy: proxy,
ContentEncoding: i.ContentEncoding,
Headers: i.HTTPHeaders,
Database: i.Database,
RetentionPolicy: i.RetentionPolicy,
Consistency: i.WriteConsistency,
Serializer: i.serializer,
}
c, err := i.CreateHTTPClientF(config)
if err != nil {
return nil, fmt.Errorf("error creating HTTP client [%s]: %v", url, err)
}
err = c.CreateDatabase(ctx)
if err != nil {
if err, ok := err.(APIError); ok {
if err.StatusCode == 503 {
return c, nil
}
}
log.Printf("W! [outputs.influxdb] when writing to [%s]: database %q creation failed: %v",
c.URL(), c.Database(), err)
}
return c, nil
}
func init() {
outputs.Add("influxdb", func() telegraf.Output { return newInflux() })
outputs.Add("influxdb", func() telegraf.Output {
return &InfluxDB{
Timeout: internal.Duration{Duration: time.Second * 5},
CreateHTTPClientF: func(config *HTTPConfig) (Client, error) {
return NewHTTPClient(config)
},
CreateUDPClientF: func(config *UDPConfig) (Client, error) {
return NewUDPClient(config)
},
}
})
}

View File

@@ -1,313 +1,135 @@
package influxdb
package influxdb_test
import (
"fmt"
"io"
"net/http"
"net/http/httptest"
"context"
"testing"
"time"
"github.com/influxdata/telegraf/plugins/outputs/influxdb/client"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/outputs/influxdb"
"github.com/stretchr/testify/require"
)
func TestIdentQuoting(t *testing.T) {
var testCases = []struct {
database string
expected string
}{
{"x-y", `CREATE DATABASE "x-y"`},
{`x"y`, `CREATE DATABASE "x\"y"`},
{"x\ny", `CREATE DATABASE "x\ny"`},
{`x\y`, `CREATE DATABASE "x\\y"`},
}
for _, tc := range testCases {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
r.ParseForm()
q := r.Form.Get("q")
assert.Equal(t, tc.expected, q)
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, `{"results":[{}]}`)
}))
defer ts.Close()
i := InfluxDB{
URLs: []string{ts.URL},
Database: tc.database,
}
err := i.Connect()
require.NoError(t, err)
require.NoError(t, i.Close())
}
}
func TestUDPInflux(t *testing.T) {
i := InfluxDB{
URLs: []string{"udp://localhost:8089"},
}
err := i.Connect()
require.NoError(t, err)
err = i.Write(testutil.MockMetrics())
require.NoError(t, err)
require.NoError(t, i.Close())
}
func TestHTTPInflux(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/write":
// test that database is set properly
if r.FormValue("db") != "test" {
w.WriteHeader(http.StatusTeapot)
w.Header().Set("Content-Type", "application/json")
}
// test that user agent is set properly
if r.UserAgent() != "telegraf" {
w.WriteHeader(http.StatusTeapot)
w.Header().Set("Content-Type", "application/json")
}
w.WriteHeader(http.StatusNoContent)
w.Header().Set("Content-Type", "application/json")
case "/query":
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, `{"results":[{}]}`)
}
}))
defer ts.Close()
i := newInflux()
i.URLs = []string{ts.URL}
i.Database = "test"
i.UserAgent = "telegraf"
err := i.Connect()
require.NoError(t, err)
err = i.Write(testutil.MockMetrics())
require.NoError(t, err)
require.NoError(t, i.Close())
}
func TestUDPConnectError(t *testing.T) {
i := InfluxDB{
URLs: []string{"udp://foobar:8089"},
}
err := i.Connect()
require.Error(t, err)
i = InfluxDB{
URLs: []string{"udp://localhost:9999999"},
}
err = i.Connect()
require.Error(t, err)
}
func TestHTTPConnectError_InvalidURL(t *testing.T) {
i := InfluxDB{
URLs: []string{"http://foobar:8089"},
}
err := i.Connect()
require.Error(t, err)
i = InfluxDB{
URLs: []string{"http://localhost:9999999"},
}
err = i.Connect()
require.Error(t, err)
}
func TestHTTPConnectError_DatabaseCreateFail(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/query":
w.WriteHeader(http.StatusNotFound)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, `{"results":[{}],"error":"test error"}`)
}
}))
defer ts.Close()
i := InfluxDB{
URLs: []string{ts.URL},
Database: "test",
}
// database creation errors do not return an error from Connect
// they are only logged.
err := i.Connect()
require.NoError(t, err)
require.NoError(t, i.Close())
}
func TestHTTPError_DatabaseNotFound(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/write":
w.WriteHeader(http.StatusNotFound)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, `{"results":[{}],"error":"database not found"}`)
case "/query":
w.WriteHeader(http.StatusNotFound)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, `{"results":[{}],"error":"database not found"}`)
}
}))
defer ts.Close()
i := InfluxDB{
URLs: []string{ts.URL},
Database: "test",
}
err := i.Connect()
require.NoError(t, err)
err = i.Write(testutil.MockMetrics())
require.Error(t, err)
require.NoError(t, i.Close())
}
func TestHTTPError_WriteErrors(t *testing.T) {
var testCases = []struct {
name string
status int
contentType string
body string
err error
}{
{
// HTTP/1.1 400 Bad Request
// Content-Type: application/json
// X-Influxdb-Version: 1.3.3
//
// {
// "error": "partial write: points beyond retention policy dropped=1"
// }
name: "beyond retention policy is not an error",
status: http.StatusBadRequest,
contentType: "application/json",
body: `{"error":"partial write: points beyond retention policy dropped=1"}`,
err: nil,
},
{
// HTTP/1.1 400 Bad Request
// Content-Type: application/json
// X-Influxdb-Version: 1.3.3
//
// {
// "error": "unable to parse 'foo bar=': missing field value"
// }
name: "unable to parse is not an error",
status: http.StatusBadRequest,
contentType: "application/json",
body: `{"error":"unable to parse 'foo bar=': missing field value"}`,
err: nil,
},
{
// HTTP/1.1 400 Bad Request
// Content-Type: application/json
// X-Influxdb-Version: 1.3.3
//
// {
// "error": "partial write: field type conflict: input field \"bar\" on measurement \"foo\" is type float, already exists as type integer dropped=1"
// }
name: "field type conflict is not an error",
status: http.StatusBadRequest,
contentType: "application/json",
body: `{"error": "partial write: field type conflict: input field \"bar\" on measurement \"foo\" is type float, already exists as type integer dropped=1"}`,
err: nil,
},
{
// HTTP/1.1 500 Internal Server Error
// Content-Type: application/json
// X-Influxdb-Version: 1.3.3-c1.3.3
//
// {
// "error": "write failed: hinted handoff queue not empty"
// }
name: "hinted handoff queue not empty is not an error",
status: http.StatusInternalServerError,
contentType: "application/json",
body: `{"error":"write failed: hinted handoff queue not empty"}`,
err: nil,
},
{
// HTTP/1.1 500 Internal Server Error
// Content-Type: application/json
// X-Influxdb-Version: 1.3.3-c1.3.3
//
// {
// "error": "partial write"
// }
name: "plain partial write is an error",
status: http.StatusInternalServerError,
contentType: "application/json",
body: `{"error":"partial write"}`,
err: fmt.Errorf("Could not write to any InfluxDB server in cluster"),
},
}
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.WriteHeader(tt.status)
rw.Header().Set("Content-Type", tt.contentType)
fmt.Fprintln(rw, tt.body)
}))
defer ts.Close()
influx := InfluxDB{
URLs: []string{ts.URL},
Database: "test",
}
err := influx.Connect()
require.NoError(t, err)
err = influx.Write(testutil.MockMetrics())
require.Equal(t, tt.err, err)
require.NoError(t, influx.Close())
})
}
}
type MockClient struct {
writeStreamCalled int
contentLength int
URLF func() string
DatabaseF func() string
WriteF func(context.Context, []telegraf.Metric) error
CreateDatabaseF func(ctx context.Context) error
}
func (m *MockClient) Query(command string) error {
panic("not implemented")
func (c *MockClient) URL() string {
return c.URLF()
}
func (m *MockClient) Write(b []byte) (int, error) {
panic("not implemented")
func (c *MockClient) Database() string {
return c.DatabaseF()
}
func (m *MockClient) WriteWithParams(b []byte, params client.WriteParams) (int, error) {
panic("not implemented")
func (c *MockClient) Write(ctx context.Context, metrics []telegraf.Metric) error {
return c.WriteF(ctx, metrics)
}
func (m *MockClient) WriteStream(b io.Reader, contentLength int) (int, error) {
m.writeStreamCalled++
m.contentLength = contentLength
return 0, nil
func (c *MockClient) CreateDatabase(ctx context.Context) error {
return c.CreateDatabaseF(ctx)
}
func (m *MockClient) WriteStreamWithParams(b io.Reader, contentLength int, params client.WriteParams) (int, error) {
panic("not implemented")
func TestDeprecatedURLSupport(t *testing.T) {
var actual *influxdb.UDPConfig
output := influxdb.InfluxDB{
URL: "udp://localhost:8086",
CreateUDPClientF: func(config *influxdb.UDPConfig) (influxdb.Client, error) {
actual = config
return &MockClient{}, nil
},
}
err := output.Connect()
require.NoError(t, err)
require.Equal(t, "udp://localhost:8086", actual.URL.String())
}
func (m *MockClient) Close() error {
panic("not implemented")
func TestDefaultURL(t *testing.T) {
var actual *influxdb.HTTPConfig
output := influxdb.InfluxDB{
CreateHTTPClientF: func(config *influxdb.HTTPConfig) (influxdb.Client, error) {
actual = config
return &MockClient{
CreateDatabaseF: func(ctx context.Context) error {
return nil
},
}, nil
},
}
err := output.Connect()
require.NoError(t, err)
require.Equal(t, "http://localhost:8086", actual.URL.String())
}
func TestConnectUDPConfig(t *testing.T) {
var actual *influxdb.UDPConfig
output := influxdb.InfluxDB{
URLs: []string{"udp://localhost:8086"},
UDPPayload: 42,
CreateUDPClientF: func(config *influxdb.UDPConfig) (influxdb.Client, error) {
actual = config
return &MockClient{}, nil
},
}
err := output.Connect()
require.NoError(t, err)
require.Equal(t, "udp://localhost:8086", actual.URL.String())
require.Equal(t, 42, actual.MaxPayloadSize)
require.NotNil(t, actual.Serializer)
}
func TestConnectHTTPConfig(t *testing.T) {
var actual *influxdb.HTTPConfig
output := influxdb.InfluxDB{
URLs: []string{"http://localhost:8089"},
Database: "telegraf",
RetentionPolicy: "default",
WriteConsistency: "any",
Timeout: internal.Duration{Duration: 5 * time.Second},
Username: "guy",
Password: "smiley",
UserAgent: "telegraf",
HTTPProxy: "http://localhost:8089",
HTTPHeaders: map[string]string{
"x": "y",
},
ContentEncoding: "gzip",
InsecureSkipVerify: true,
CreateHTTPClientF: func(config *influxdb.HTTPConfig) (influxdb.Client, error) {
actual = config
return &MockClient{
CreateDatabaseF: func(ctx context.Context) error {
return nil
},
}, nil
},
}
err := output.Connect()
require.NoError(t, err)
require.Equal(t, output.URLs[0], actual.URL.String())
require.Equal(t, output.UserAgent, actual.UserAgent)
require.Equal(t, output.Timeout.Duration, actual.Timeout)
require.Equal(t, output.Username, actual.Username)
require.Equal(t, output.Password, actual.Password)
require.Equal(t, output.HTTPProxy, actual.Proxy.String())
require.Equal(t, output.HTTPHeaders, actual.Headers)
require.Equal(t, output.ContentEncoding, actual.ContentEncoding)
require.Equal(t, output.Database, actual.Database)
require.Equal(t, output.RetentionPolicy, actual.RetentionPolicy)
require.Equal(t, output.WriteConsistency, actual.Consistency)
require.NotNil(t, actual.TLSConfig)
require.NotNil(t, actual.Serializer)
require.Equal(t, output.Database, actual.Database)
}

View File

@@ -0,0 +1,116 @@
package influxdb
import (
"context"
"fmt"
"net"
"net/url"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/serializers"
"github.com/influxdata/telegraf/plugins/serializers/influx"
)
const (
// DefaultMaxPayloadSize is the maximum length of the UDP data payload
DefaultMaxPayloadSize = 512
)
type Dialer interface {
DialContext(ctx context.Context, network, address string) (Conn, error)
}
type Conn interface {
Write(b []byte) (int, error)
Close() error
}
type UDPConfig struct {
MaxPayloadSize int
URL *url.URL
Serializer serializers.Serializer
Dialer Dialer
}
func NewUDPClient(config *UDPConfig) (*udpClient, error) {
if config.URL == nil {
return nil, ErrMissingURL
}
size := config.MaxPayloadSize
if size == 0 {
size = DefaultMaxPayloadSize
}
serializer := config.Serializer
if serializer == nil {
s := influx.NewSerializer()
s.SetMaxLineBytes(config.MaxPayloadSize)
serializer = s
}
dialer := config.Dialer
if dialer == nil {
dialer = &netDialer{net.Dialer{}}
}
client := &udpClient{
url: config.URL,
serializer: serializer,
dialer: dialer,
}
return client, nil
}
type udpClient struct {
conn Conn
dialer Dialer
serializer serializers.Serializer
url *url.URL
}
func (c *udpClient) URL() string {
return c.url.String()
}
func (c *udpClient) Database() string {
return ""
}
func (c *udpClient) Write(ctx context.Context, metrics []telegraf.Metric) error {
if c.conn == nil {
conn, err := c.dialer.DialContext(ctx, c.url.Scheme, c.url.Host)
if err != nil {
return fmt.Errorf("error dialing address [%s]: %s", c.url, err)
}
c.conn = conn
}
for _, metric := range metrics {
octets, err := c.serializer.Serialize(metric)
if err != nil {
return fmt.Errorf("could not serialize metric: %v", err)
}
_, err = c.conn.Write(octets)
if err != nil {
c.conn.Close()
c.conn = nil
return err
}
}
return nil
}
func (c *udpClient) CreateDatabase(ctx context.Context) error {
return nil
}
type netDialer struct {
net.Dialer
}
func (d *netDialer) DialContext(ctx context.Context, network, address string) (Conn, error) {
return d.Dialer.DialContext(ctx, network, address)
}

View File

@@ -0,0 +1,241 @@
package influxdb_test
import (
"bytes"
"context"
"fmt"
"net"
"net/url"
"sync"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/outputs/influxdb"
"github.com/influxdata/telegraf/plugins/serializers/influx"
"github.com/stretchr/testify/require"
)
var (
metricString = "cpu value=42 0\n"
)
func getMetric() telegraf.Metric {
metric, err := metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
)
if err != nil {
panic(err)
}
return metric
}
func getURL() *url.URL {
u, err := url.Parse("udp://localhost:0")
if err != nil {
panic(err)
}
return u
}
type MockConn struct {
WriteF func(b []byte) (n int, err error)
CloseF func() error
}
func (c *MockConn) Write(b []byte) (n int, err error) {
return c.WriteF(b)
}
func (c *MockConn) Close() error {
return c.CloseF()
}
type MockDialer struct {
DialContextF func(network, address string) (influxdb.Conn, error)
}
func (d *MockDialer) DialContext(ctx context.Context, network string, address string) (influxdb.Conn, error) {
return d.DialContextF(network, address)
}
type MockSerializer struct {
SerializeF func(metric telegraf.Metric) ([]byte, error)
}
func (s *MockSerializer) Serialize(metric telegraf.Metric) ([]byte, error) {
return s.SerializeF(metric)
}
func TestUDP_NewUDPClientNoURL(t *testing.T) {
config := &influxdb.UDPConfig{}
_, err := influxdb.NewUDPClient(config)
require.Equal(t, err, influxdb.ErrMissingURL)
}
func TestUDP_URL(t *testing.T) {
u := getURL()
config := &influxdb.UDPConfig{
URL: u,
}
client, err := influxdb.NewUDPClient(config)
require.NoError(t, err)
require.Equal(t, u.String(), client.URL())
}
func TestUDP_Simple(t *testing.T) {
var buffer bytes.Buffer
config := &influxdb.UDPConfig{
URL: getURL(),
Dialer: &MockDialer{
DialContextF: func(network, address string) (influxdb.Conn, error) {
conn := &MockConn{
WriteF: func(b []byte) (n int, err error) {
buffer.Write(b)
return 0, nil
},
}
return conn, nil
},
},
}
client, err := influxdb.NewUDPClient(config)
require.NoError(t, err)
ctx := context.Background()
err = client.Write(ctx, []telegraf.Metric{
getMetric(),
getMetric(),
})
require.NoError(t, err)
require.Equal(t, metricString+metricString, buffer.String())
}
func TestUDP_DialError(t *testing.T) {
u, err := url.Parse("invalid://127.0.0.1:9999")
require.NoError(t, err)
config := &influxdb.UDPConfig{
URL: u,
Dialer: &MockDialer{
DialContextF: func(network, address string) (influxdb.Conn, error) {
return nil, fmt.Errorf(
`unsupported scheme [invalid://localhost:9999]: "invalid"`)
},
},
}
client, err := influxdb.NewUDPClient(config)
require.NoError(t, err)
ctx := context.Background()
err = client.Write(ctx, []telegraf.Metric{getMetric()})
require.Error(t, err)
}
func TestUDP_WriteError(t *testing.T) {
closed := false
config := &influxdb.UDPConfig{
URL: getURL(),
Dialer: &MockDialer{
DialContextF: func(network, address string) (influxdb.Conn, error) {
conn := &MockConn{
WriteF: func(b []byte) (n int, err error) {
return 0, fmt.Errorf(
"write udp 127.0.0.1:52190->127.0.0.1:9999: write: connection refused")
},
CloseF: func() error {
closed = true
return nil
},
}
return conn, nil
},
},
}
client, err := influxdb.NewUDPClient(config)
require.NoError(t, err)
ctx := context.Background()
err = client.Write(ctx, []telegraf.Metric{getMetric()})
require.Error(t, err)
require.True(t, closed)
}
func TestUDP_SerializeError(t *testing.T) {
config := &influxdb.UDPConfig{
URL: getURL(),
Dialer: &MockDialer{
DialContextF: func(network, address string) (influxdb.Conn, error) {
conn := &MockConn{}
return conn, nil
},
},
Serializer: &MockSerializer{
SerializeF: func(metric telegraf.Metric) ([]byte, error) {
return nil, influx.ErrNeedMoreSpace
},
},
}
client, err := influxdb.NewUDPClient(config)
require.NoError(t, err)
ctx := context.Background()
err = client.Write(ctx, []telegraf.Metric{getMetric()})
require.Error(t, err)
require.Contains(t, err.Error(), influx.ErrNeedMoreSpace.Error())
}
func TestUDP_WriteWithRealConn(t *testing.T) {
conn, err := net.ListenPacket("udp", ":0")
require.NoError(t, err)
metrics := []telegraf.Metric{
getMetric(),
getMetric(),
}
buf := make([]byte, 200)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
var total int
for _, _ = range metrics {
n, _, err := conn.ReadFrom(buf[total:])
if err != nil {
break
}
total += n
}
buf = buf[:total]
}()
addr := conn.LocalAddr()
u, err := url.Parse(fmt.Sprintf("%s://%s", addr.Network(), addr))
require.NoError(t, err)
config := &influxdb.UDPConfig{
URL: u,
}
client, err := influxdb.NewUDPClient(config)
require.NoError(t, err)
ctx := context.Background()
err = client.Write(ctx, metrics)
require.NoError(t, err)
wg.Wait()
require.Equal(t, metricString+metricString, string(buf))
}

View File

@@ -11,7 +11,6 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/serializers"
"github.com/influxdata/telegraf/plugins/serializers/graphite"
@@ -93,8 +92,6 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error {
var points []string
var metricType string
var toSerialize telegraf.Metric
var newTags map[string]string
for _, m := range metrics {
// Pull the metric_type out of the metric's tags. We don't want the type
@@ -108,18 +105,10 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error {
//
// increment some_prefix.host.tag1.tag2.tag3.counter.field value timestamp
//
newTags = m.Tags()
metricType = newTags["metric_type"]
delete(newTags, "metric_type")
metricType = m.Tags()["metric_type"]
m.RemoveTag("metric_type")
toSerialize, _ = metric.New(
m.Name(),
newTags,
m.Fields(),
m.Time(),
)
buf, err := s.Serialize(toSerialize)
buf, err := s.Serialize(m)
if err != nil {
log.Printf("E! Error serializing a metric to Instrumental: %s", err)
}

View File

@@ -187,9 +187,7 @@ func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) {
gauges := []*Gauge{}
if m.Time().Unix() == 0 {
return gauges, fmt.Errorf(
"Measure time must not be zero\n <%s> \n",
m.String())
return gauges, fmt.Errorf("time was zero %s", m.Name())
}
metricSource := graphite.InsertField(
graphite.SerializeBucketName("", m.Tags(), l.Template, ""),

View File

@@ -138,8 +138,7 @@ func (m *MQTT) Write(metrics []telegraf.Metric) error {
buf, err := m.serializer.Serialize(metric)
if err != nil {
return fmt.Errorf("MQTT Could not serialize metric: %s",
metric.String())
return err
}
err = m.publish(topic, buf)

View File

@@ -125,7 +125,7 @@ func (o *OpenTSDB) WriteHttp(metrics []telegraf.Metric, u *url.URL) error {
}
for _, m := range metrics {
now := m.UnixNano() / 1000000000
now := m.Time().UnixNano() / 1000000000
tags := cleanTags(m.Tags())
for fieldName, value := range m.Fields() {
@@ -170,7 +170,7 @@ func (o *OpenTSDB) WriteTelnet(metrics []telegraf.Metric, u *url.URL) error {
defer connection.Close()
for _, m := range metrics {
now := m.UnixNano() / 1000000000
now := m.Time().UnixNano() / 1000000000
tags := ToLineFormat(cleanTags(m.Tags()))
for fieldName, value := range m.Fields() {

View File

@@ -9,9 +9,10 @@ import (
"strconv"
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs"
"time"
)
type Wavefront struct {
@@ -159,7 +160,7 @@ func buildMetrics(m telegraf.Metric, w *Wavefront) []*MetricPoint {
metric := &MetricPoint{
Metric: name,
Timestamp: m.UnixNano() / 1000000000,
Timestamp: m.Time().Unix(),
}
metricValue, buildError := buildValue(value, metric.Metric, w)

View File

@@ -10,7 +10,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/templating"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/parsers/influx"
"github.com/tidwall/gjson"
)
@@ -195,7 +195,7 @@ func (p *Parser) unmarshalMetrics(buf []byte) (map[string]interface{}, error) {
return jsonOut, nil
}
func (p *Parser) readDWMetrics(metricType string, dwms interface{}, metrics []telegraf.Metric, time time.Time) []telegraf.Metric {
func (p *Parser) readDWMetrics(metricType string, dwms interface{}, metrics []telegraf.Metric, tm time.Time) []telegraf.Metric {
switch dwmsTyped := dwms.(type) {
case map[string]interface{}:
@@ -240,10 +240,15 @@ func (p *Parser) readDWMetrics(metricType string, dwms interface{}, metrics []te
metricsBuffer.WriteString(strings.Join(fields, ","))
metricsBuffer.WriteString("\n")
}
newMetrics, err := metric.ParseWithDefaultTime(metricsBuffer.Bytes(), time)
handler := influx.NewMetricHandler()
handler.SetTimeFunc(func() time.Time { return tm })
parser := influx.NewParser(handler)
newMetrics, err := parser.Parse(metricsBuffer.Bytes())
if err != nil {
log.Printf("W! failed to create metric of type '%s': %s\n", metricType, err)
}
return append(metrics, newMetrics...)
default:
return metrics

View File

@@ -89,7 +89,6 @@ func (p *GraphiteParser) Parse(buf []byte) ([]telegraf.Metric, error) {
if line == "" {
continue
}
metric, err := p.ParseLine(line)
if err == nil {
metrics = append(metrics, metric)

View File

@@ -10,6 +10,7 @@ import (
"github.com/influxdata/telegraf/metric"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func BenchmarkParse(b *testing.B) {
@@ -379,7 +380,7 @@ func TestFilterMatchDefault(t *testing.T) {
m, err := p.ParseLine("miss.servers.localhost.cpu_load 11 1435077219")
assert.NoError(t, err)
assert.Equal(t, exp.String(), m.String())
assert.Equal(t, exp, m)
}
func TestFilterMatchMultipleMeasurement(t *testing.T) {
@@ -397,7 +398,7 @@ func TestFilterMatchMultipleMeasurement(t *testing.T) {
m, err := p.ParseLine("servers.localhost.cpu.cpu_load.10 11 1435077219")
assert.NoError(t, err)
assert.Equal(t, exp.String(), m.String())
assert.Equal(t, exp, m)
}
func TestFilterMatchMultipleMeasurementSeparator(t *testing.T) {
@@ -416,7 +417,7 @@ func TestFilterMatchMultipleMeasurementSeparator(t *testing.T) {
m, err := p.ParseLine("servers.localhost.cpu.cpu_load.10 11 1435077219")
assert.NoError(t, err)
assert.Equal(t, exp.String(), m.String())
assert.Equal(t, exp, m)
}
func TestFilterMatchSingle(t *testing.T) {
@@ -433,7 +434,7 @@ func TestFilterMatchSingle(t *testing.T) {
m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219")
assert.NoError(t, err)
assert.Equal(t, exp.String(), m.String())
assert.Equal(t, exp, m)
}
func TestParseNoMatch(t *testing.T) {
@@ -451,7 +452,7 @@ func TestParseNoMatch(t *testing.T) {
m, err := p.ParseLine("servers.localhost.memory.VmallocChunk 11 1435077219")
assert.NoError(t, err)
assert.Equal(t, exp.String(), m.String())
assert.Equal(t, exp, m)
}
func TestFilterMatchWildcard(t *testing.T) {
@@ -469,7 +470,7 @@ func TestFilterMatchWildcard(t *testing.T) {
m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219")
assert.NoError(t, err)
assert.Equal(t, exp.String(), m.String())
assert.Equal(t, exp, m)
}
func TestFilterMatchExactBeforeWildcard(t *testing.T) {
@@ -489,7 +490,7 @@ func TestFilterMatchExactBeforeWildcard(t *testing.T) {
m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219")
assert.NoError(t, err)
assert.Equal(t, exp.String(), m.String())
assert.Equal(t, exp, m)
}
func TestFilterMatchMostLongestFilter(t *testing.T) {
@@ -508,8 +509,13 @@ func TestFilterMatchMostLongestFilter(t *testing.T) {
m, err := p.ParseLine("servers.localhost.cpu.cpu_load 11 1435077219")
assert.NoError(t, err)
assert.Contains(t, m.String(), ",host=localhost")
assert.Contains(t, m.String(), ",resource=cpu")
value, ok := m.GetTag("host")
require.True(t, ok)
require.Equal(t, "localhost", value)
value, ok = m.GetTag("resource")
require.True(t, ok)
require.Equal(t, "cpu", value)
}
func TestFilterMatchMultipleWildcards(t *testing.T) {
@@ -533,7 +539,7 @@ func TestFilterMatchMultipleWildcards(t *testing.T) {
m, err := p.ParseLine("servers.server01.cpu_load 11 1435077219")
assert.NoError(t, err)
assert.Equal(t, exp.String(), m.String())
assert.Equal(t, exp, m)
}
func TestParseDefaultTags(t *testing.T) {
@@ -549,9 +555,17 @@ func TestParseDefaultTags(t *testing.T) {
m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219")
assert.NoError(t, err)
assert.Contains(t, m.String(), ",host=localhost")
assert.Contains(t, m.String(), ",region=us-east")
assert.Contains(t, m.String(), ",zone=1c")
value, ok := m.GetTag("host")
require.True(t, ok)
require.Equal(t, "localhost", value)
value, ok = m.GetTag("region")
require.True(t, ok)
require.Equal(t, "us-east", value)
value, ok = m.GetTag("zone")
require.True(t, ok)
require.Equal(t, "1c", value)
}
func TestParseDefaultTemplateTags(t *testing.T) {
@@ -566,9 +580,17 @@ func TestParseDefaultTemplateTags(t *testing.T) {
m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219")
assert.NoError(t, err)
assert.Contains(t, m.String(), ",host=localhost")
assert.Contains(t, m.String(), ",region=us-east")
assert.Contains(t, m.String(), ",zone=1c")
value, ok := m.GetTag("host")
require.True(t, ok)
require.Equal(t, "localhost", value)
value, ok = m.GetTag("region")
require.True(t, ok)
require.Equal(t, "us-east", value)
value, ok = m.GetTag("zone")
require.True(t, ok)
require.Equal(t, "1c", value)
}
func TestParseDefaultTemplateTagsOverridGlobal(t *testing.T) {
@@ -581,11 +603,20 @@ func TestParseDefaultTemplateTagsOverridGlobal(t *testing.T) {
}
m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219")
_ = m
assert.NoError(t, err)
assert.Contains(t, m.String(), ",host=localhost")
assert.Contains(t, m.String(), ",region=us-east")
assert.Contains(t, m.String(), ",zone=1c")
value, ok := m.GetTag("host")
require.True(t, ok)
require.Equal(t, "localhost", value)
value, ok = m.GetTag("region")
require.True(t, ok)
require.Equal(t, "us-east", value)
value, ok = m.GetTag("zone")
require.True(t, ok)
require.Equal(t, "1c", value)
}
func TestParseTemplateWhitespace(t *testing.T) {
@@ -602,9 +633,17 @@ func TestParseTemplateWhitespace(t *testing.T) {
m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219")
assert.NoError(t, err)
assert.Contains(t, m.String(), ",host=localhost")
assert.Contains(t, m.String(), ",region=us-east")
assert.Contains(t, m.String(), ",zone=1c")
value, ok := m.GetTag("host")
require.True(t, ok)
require.Equal(t, "localhost", value)
value, ok = m.GetTag("region")
require.True(t, ok)
require.Equal(t, "us-east", value)
value, ok = m.GetTag("zone")
require.True(t, ok)
require.Equal(t, "1c", value)
}
// Test basic functionality of ApplyTemplate

View File

@@ -1,500 +0,0 @@
ctr,host=tars,some=tag-0 n=3i 1476437629342569532
ctr,host=tars,some=tag-1 n=3i 1476437629342569532
ctr,host=tars,some=tag-2 n=3i 1476437629342569532
ctr,host=tars,some=tag-3 n=3i 1476437629342569532
ctr,host=tars,some=tag-4 n=3i 1476437629342569532
ctr,host=tars,some=tag-5 n=3i 1476437629342569532
ctr,host=tars,some=tag-6 n=3i 1476437629342569532
ctr,host=tars,some=tag-7 n=3i 1476437629342569532
ctr,host=tars,some=tag-8 n=3i 1476437629342569532
ctr,host=tars,some=tag-9 n=3i 1476437629342569532
ctr,host=tars,some=tag-10 n=3i 1476437629342569532
ctr,host=tars,some=tag-11 n=3i 1476437629342569532
ctr,host=tars,some=tag-12 n=3i 1476437629342569532
ctr,host=tars,some=tag-13 n=3i 1476437629342569532
ctr,host=tars,some=tag-14 n=3i 1476437629342569532
ctr,host=tars,some=tag-15 n=3i 1476437629342569532
ctr,host=tars,some=tag-16 n=3i 1476437629342569532
ctr,host=tars,some=tag-17 n=3i 1476437629342569532
ctr,host=tars,some=tag-18 n=3i 1476437629342569532
ctr,host=tars,some=tag-19 n=3i 1476437629342569532
ctr,host=tars,some=tag-20 n=3i 1476437629342569532
ctr,host=tars,some=tag-21 n=3i 1476437629342569532
ctr,host=tars,some=tag-22 n=3i 1476437629342569532
ctr,host=tars,some=tag-23 n=3i 1476437629342569532
ctr,host=tars,some=tag-24 n=3i 1476437629342569532
ctr,host=tars,some=tag-25 n=3i 1476437629342569532
ctr,host=tars,some=tag-26 n=3i 1476437629342569532
ctr,host=tars,some=tag-27 n=3i 1476437629342569532
ctr,host=tars,some=tag-28 n=3i 1476437629342569532
ctr,host=tars,some=tag-29 n=3i 1476437629342569532
ctr,host=tars,some=tag-30 n=3i 1476437629342569532
ctr,host=tars,some=tag-31 n=2i 1476437629342569532
ctr,host=tars,some=tag-32 n=2i 1476437629342569532
ctr,host=tars,some=tag-33 n=2i 1476437629342569532
ctr,host=tars,some=tag-34 n=2i 1476437629342569532
ctr,host=tars,some=tag-35 n=2i 1476437629342569532
ctr,host=tars,some=tag-36 n=2i 1476437629342569532
ctr,host=tars,some=tag-37 n=2i 1476437629342569532
ctr,host=tars,some=tag-38 n=2i 1476437629342569532
ctr,host=tars,some=tag-39 n=2i 1476437629342569532
ctr,host=tars,some=tag-40 n=2i 1476437629342569532
ctr,host=tars,some=tag-41 n=2i 1476437629342569532
ctr,host=tars,some=tag-42 n=2i 1476437629342569532
ctr,host=tars,some=tag-43 n=2i 1476437629342569532
ctr,host=tars,some=tag-44 n=2i 1476437629342569532
ctr,host=tars,some=tag-45 n=2i 1476437629342569532
ctr,host=tars,some=tag-46 n=2i 1476437629342569532
ctr,host=tars,some=tag-47 n=2i 1476437629342569532
ctr,host=tars,some=tag-48 n=2i 1476437629342569532
ctr,host=tars,some=tag-49 n=2i 1476437629342569532
ctr,host=tars,some=tag-50 n=2i 1476437629342569532
ctr,host=tars,some=tag-51 n=2i 1476437629342569532
ctr,host=tars,some=tag-52 n=2i 1476437629342569532
ctr,host=tars,some=tag-53 n=2i 1476437629342569532
ctr,host=tars,some=tag-54 n=2i 1476437629342569532
ctr,host=tars,some=tag-55 n=2i 1476437629342569532
ctr,host=tars,some=tag-56 n=2i 1476437629342569532
ctr,host=tars,some=tag-57 n=2i 1476437629342569532
ctr,host=tars,some=tag-58 n=2i 1476437629342569532
ctr,host=tars,some=tag-59 n=2i 1476437629342569532
ctr,host=tars,some=tag-60 n=2i 1476437629342569532
ctr,host=tars,some=tag-61 n=2i 1476437629342569532
ctr,host=tars,some=tag-62 n=2i 1476437629342569532
ctr,host=tars,some=tag-63 n=2i 1476437629342569532
ctr,host=tars,some=tag-64 n=2i 1476437629342569532
ctr,host=tars,some=tag-65 n=2i 1476437629342569532
ctr,host=tars,some=tag-66 n=2i 1476437629342569532
ctr,host=tars,some=tag-67 n=2i 1476437629342569532
ctr,host=tars,some=tag-68 n=2i 1476437629342569532
ctr,host=tars,some=tag-69 n=2i 1476437629342569532
ctr,host=tars,some=tag-70 n=2i 1476437629342569532
ctr,host=tars,some=tag-71 n=2i 1476437629342569532
ctr,host=tars,some=tag-72 n=2i 1476437629342569532
ctr,host=tars,some=tag-73 n=2i 1476437629342569532
ctr,host=tars,some=tag-74 n=2i 1476437629342569532
ctr,host=tars,some=tag-75 n=2i 1476437629342569532
ctr,host=tars,some=tag-76 n=2i 1476437629342569532
ctr,host=tars,some=tag-77 n=2i 1476437629342569532
ctr,host=tars,some=tag-78 n=2i 1476437629342569532
ctr,host=tars,some=tag-79 n=2i 1476437629342569532
ctr,host=tars,some=tag-80 n=2i 1476437629342569532
ctr,host=tars,some=tag-81 n=2i 1476437629342569532
ctr,host=tars,some=tag-82 n=2i 1476437629342569532
ctr,host=tars,some=tag-83 n=2i 1476437629342569532
ctr,host=tars,some=tag-84 n=2i 1476437629342569532
ctr,host=tars,some=tag-85 n=2i 1476437629342569532
ctr,host=tars,some=tag-86 n=2i 1476437629342569532
ctr,host=tars,some=tag-87 n=2i 1476437629342569532
ctr,host=tars,some=tag-88 n=2i 1476437629342569532
ctr,host=tars,some=tag-89 n=2i 1476437629342569532
ctr,host=tars,some=tag-90 n=2i 1476437629342569532
ctr,host=tars,some=tag-91 n=2i 1476437629342569532
ctr,host=tars,some=tag-92 n=2i 1476437629342569532
ctr,host=tars,some=tag-93 n=2i 1476437629342569532
ctr,host=tars,some=tag-94 n=2i 1476437629342569532
ctr,host=tars,some=tag-95 n=2i 1476437629342569532
ctr,host=tars,some=tag-96 n=2i 1476437629342569532
ctr,host=tars,some=tag-97 n=2i 1476437629342569532
ctr,host=tars,some=tag-98 n=2i 1476437629342569532
ctr,host=tars,some=tag-99 n=2i 1476437629342569532
ctr,host=tars,some=tag-100 n=2i 1476437629342569532
ctr,host=tars,some=tag-101 n=2i 1476437629342569532
ctr,host=tars,some=tag-102 n=2i 1476437629342569532
ctr,host=tars,some=tag-103 n=2i 1476437629342569532
ctr,host=tars,some=tag-104 n=2i 1476437629342569532
ctr,host=tars,some=tag-105 n=2i 1476437629342569532
ctr,host=tars,some=tag-106 n=2i 1476437629342569532
ctr,host=tars,some=tag-107 n=2i 1476437629342569532
ctr,host=tars,some=tag-108 n=2i 1476437629342569532
ctr,host=tars,some=tag-109 n=2i 1476437629342569532
ctr,host=tars,some=tag-110 n=2i 1476437629342569532
ctr,host=tars,some=tag-111 n=2i 1476437629342569532
ctr,host=tars,some=tag-112 n=2i 1476437629342569532
ctr,host=tars,some=tag-113 n=2i 1476437629342569532
ctr,host=tars,some=tag-114 n=2i 1476437629342569532
ctr,host=tars,some=tag-115 n=2i 1476437629342569532
ctr,host=tars,some=tag-116 n=2i 1476437629342569532
ctr,host=tars,some=tag-117 n=2i 1476437629342569532
ctr,host=tars,some=tag-118 n=2i 1476437629342569532
ctr,host=tars,some=tag-119 n=2i 1476437629342569532
ctr,host=tars,some=tag-120 n=2i 1476437629342569532
ctr,host=tars,some=tag-121 n=2i 1476437629342569532
ctr,host=tars,some=tag-122 n=2i 1476437629342569532
ctr,host=tars,some=tag-123 n=2i 1476437629342569532
ctr,host=tars,some=tag-124 n=2i 1476437629342569532
ctr,host=tars,some=tag-125 n=2i 1476437629342569532
ctr,host=tars,some=tag-126 n=2i 1476437629342569532
ctr,host=tars,some=tag-127 n=2i 1476437629342569532
ctr,host=tars,some=tag-128 n=2i 1476437629342569532
ctr,host=tars,some=tag-129 n=2i 1476437629342569532
ctr,host=tars,some=tag-130 n=2i 1476437629342569532
ctr,host=tars,some=tag-131 n=2i 1476437629342569532
ctr,host=tars,some=tag-132 n=2i 1476437629342569532
ctr,host=tars,some=tag-133 n=2i 1476437629342569532
ctr,host=tars,some=tag-134 n=2i 1476437629342569532
ctr,host=tars,some=tag-135 n=2i 1476437629342569532
ctr,host=tars,some=tag-136 n=2i 1476437629342569532
ctr,host=tars,some=tag-137 n=2i 1476437629342569532
ctr,host=tars,some=tag-138 n=2i 1476437629342569532
ctr,host=tars,some=tag-139 n=2i 1476437629342569532
ctr,host=tars,some=tag-140 n=2i 1476437629342569532
ctr,host=tars,some=tag-141 n=2i 1476437629342569532
ctr,host=tars,some=tag-142 n=2i 1476437629342569532
ctr,host=tars,some=tag-143 n=2i 1476437629342569532
ctr,host=tars,some=tag-144 n=2i 1476437629342569532
ctr,host=tars,some=tag-145 n=2i 1476437629342569532
ctr,host=tars,some=tag-146 n=2i 1476437629342569532
ctr,host=tars,some=tag-147 n=2i 1476437629342569532
ctr,host=tars,some=tag-148 n=2i 1476437629342569532
ctr,host=tars,some=tag-149 n=2i 1476437629342569532
ctr,host=tars,some=tag-150 n=2i 1476437629342569532
ctr,host=tars,some=tag-151 n=2i 1476437629342569532
ctr,host=tars,some=tag-152 n=2i 1476437629342569532
ctr,host=tars,some=tag-153 n=2i 1476437629342569532
ctr,host=tars,some=tag-154 n=2i 1476437629342569532
ctr,host=tars,some=tag-155 n=2i 1476437629342569532
ctr,host=tars,some=tag-156 n=2i 1476437629342569532
ctr,host=tars,some=tag-157 n=2i 1476437629342569532
ctr,host=tars,some=tag-158 n=2i 1476437629342569532
ctr,host=tars,some=tag-159 n=2i 1476437629342569532
ctr,host=tars,some=tag-160 n=2i 1476437629342569532
ctr,host=tars,some=tag-161 n=2i 1476437629342569532
ctr,host=tars,some=tag-162 n=2i 1476437629342569532
ctr,host=tars,some=tag-163 n=2i 1476437629342569532
ctr,host=tars,some=tag-164 n=2i 1476437629342569532
ctr,host=tars,some=tag-165 n=2i 1476437629342569532
ctr,host=tars,some=tag-166 n=2i 1476437629342569532
ctr,host=tars,some=tag-167 n=2i 1476437629342569532
ctr,host=tars,some=tag-168 n=2i 1476437629342569532
ctr,host=tars,some=tag-169 n=2i 1476437629342569532
ctr,host=tars,some=tag-170 n=2i 1476437629342569532
ctr,host=tars,some=tag-171 n=2i 1476437629342569532
ctr,host=tars,some=tag-172 n=2i 1476437629342569532
ctr,host=tars,some=tag-173 n=2i 1476437629342569532
ctr,host=tars,some=tag-174 n=2i 1476437629342569532
ctr,host=tars,some=tag-175 n=2i 1476437629342569532
ctr,host=tars,some=tag-176 n=2i 1476437629342569532
ctr,host=tars,some=tag-177 n=2i 1476437629342569532
ctr,host=tars,some=tag-178 n=2i 1476437629342569532
ctr,host=tars,some=tag-179 n=2i 1476437629342569532
ctr,host=tars,some=tag-180 n=2i 1476437629342569532
ctr,host=tars,some=tag-181 n=2i 1476437629342569532
ctr,host=tars,some=tag-182 n=2i 1476437629342569532
ctr,host=tars,some=tag-183 n=2i 1476437629342569532
ctr,host=tars,some=tag-184 n=2i 1476437629342569532
ctr,host=tars,some=tag-185 n=2i 1476437629342569532
ctr,host=tars,some=tag-186 n=2i 1476437629342569532
ctr,host=tars,some=tag-187 n=2i 1476437629342569532
ctr,host=tars,some=tag-188 n=2i 1476437629342569532
ctr,host=tars,some=tag-189 n=2i 1476437629342569532
ctr,host=tars,some=tag-190 n=2i 1476437629342569532
ctr,host=tars,some=tag-191 n=2i 1476437629342569532
ctr,host=tars,some=tag-192 n=2i 1476437629342569532
ctr,host=tars,some=tag-193 n=2i 1476437629342569532
ctr,host=tars,some=tag-194 n=2i 1476437629342569532
ctr,host=tars,some=tag-195 n=2i 1476437629342569532
ctr,host=tars,some=tag-196 n=2i 1476437629342569532
ctr,host=tars,some=tag-197 n=2i 1476437629342569532
ctr,host=tars,some=tag-198 n=2i 1476437629342569532
ctr,host=tars,some=tag-199 n=2i 1476437629342569532
ctr,host=tars,some=tag-200 n=2i 1476437629342569532
ctr,host=tars,some=tag-201 n=2i 1476437629342569532
ctr,host=tars,some=tag-202 n=2i 1476437629342569532
ctr,host=tars,some=tag-203 n=2i 1476437629342569532
ctr,host=tars,some=tag-204 n=2i 1476437629342569532
ctr,host=tars,some=tag-205 n=2i 1476437629342569532
ctr,host=tars,some=tag-206 n=2i 1476437629342569532
ctr,host=tars,some=tag-207 n=2i 1476437629342569532
ctr,host=tars,some=tag-208 n=2i 1476437629342569532
ctr,host=tars,some=tag-209 n=2i 1476437629342569532
ctr,host=tars,some=tag-210 n=2i 1476437629342569532
ctr,host=tars,some=tag-211 n=2i 1476437629342569532
ctr,host=tars,some=tag-212 n=2i 1476437629342569532
ctr,host=tars,some=tag-213 n=2i 1476437629342569532
ctr,host=tars,some=tag-214 n=2i 1476437629342569532
ctr,host=tars,some=tag-215 n=2i 1476437629342569532
ctr,host=tars,some=tag-216 n=2i 1476437629342569532
ctr,host=tars,some=tag-217 n=2i 1476437629342569532
ctr,host=tars,some=tag-218 n=2i 1476437629342569532
ctr,host=tars,some=tag-219 n=2i 1476437629342569532
ctr,host=tars,some=tag-220 n=2i 1476437629342569532
ctr,host=tars,some=tag-221 n=2i 1476437629342569532
ctr,host=tars,some=tag-222 n=2i 1476437629342569532
ctr,host=tars,some=tag-223 n=2i 1476437629342569532
ctr,host=tars,some=tag-224 n=2i 1476437629342569532
ctr,host=tars,some=tag-225 n=2i 1476437629342569532
ctr,host=tars,some=tag-226 n=2i 1476437629342569532
ctr,host=tars,some=tag-227 n=2i 1476437629342569532
ctr,host=tars,some=tag-228 n=2i 1476437629342569532
ctr,host=tars,some=tag-229 n=2i 1476437629342569532
ctr,host=tars,some=tag-230 n=2i 1476437629342569532
ctr,host=tars,some=tag-231 n=2i 1476437629342569532
ctr,host=tars,some=tag-232 n=2i 1476437629342569532
ctr,host=tars,some=tag-233 n=2i 1476437629342569532
ctr,host=tars,some=tag-234 n=2i 1476437629342569532
ctr,host=tars,some=tag-235 n=2i 1476437629342569532
ctr,host=tars,some=tag-236 n=2i 1476437629342569532
ctr,host=tars,some=tag-237 n=2i 1476437629342569532
ctr,host=tars,some=tag-238 n=2i 1476437629342569532
ctr,host=tars,some=tag-239 n=2i 1476437629342569532
ctr,host=tars,some=tag-240 n=2i 1476437629342569532
ctr,host=tars,some=tag-241 n=2i 1476437629342569532
ctr,host=tars,some=tag-242 n=2i 1476437629342569532
ctr,host=tars,some=tag-243 n=2i 1476437629342569532
ctr,host=tars,some=tag-244 n=2i 1476437629342569532
ctr,host=tars,some=tag-245 n=2i 1476437629342569532
ctr,host=tars,some=tag-246 n=2i 1476437629342569532
ctr,host=tars,some=tag-247 n=2i 1476437629342569532
ctr,host=tars,some=tag-248 n=2i 1476437629342569532
ctr,host=tars,some=tag-249 n=2i 1476437629342569532
ctr,host=tars,some=tag-250 n=2i 1476437629342569532
ctr,host=tars,some=tag-251 n=1i 1476437629342569532
ctr,host=tars,some=tag-252 n=1i 1476437629342569532
ctr,host=tars,some=tag-253 n=1i 1476437629342569532
ctr,host=tars,some=tag-254 n=1i 1476437629342569532
ctr,host=tars,some=tag-255 n=1i 1476437629342569532
ctr,host=tars,some=tag-256 n=1i 1476437629342569532
ctr,host=tars,some=tag-257 n=1i 1476437629342569532
ctr,host=tars,some=tag-258 n=1i 1476437629342569532
ctr,host=tars,some=tag-259 n=1i 1476437629342569532
ctr,host=tars,some=tag-260 n=1i 1476437629342569532
ctr,host=tars,some=tag-261 n=1i 1476437629342569532
ctr,host=tars,some=tag-262 n=1i 1476437629342569532
ctr,host=tars,some=tag-263 n=1i 1476437629342569532
ctr,host=tars,some=tag-264 n=1i 1476437629342569532
ctr,host=tars,some=tag-265 n=1i 1476437629342569532
ctr,host=tars,some=tag-266 n=1i 1476437629342569532
ctr,host=tars,some=tag-267 n=1i 1476437629342569532
ctr,host=tars,some=tag-268 n=1i 1476437629342569532
ctr,host=tars,some=tag-269 n=1i 1476437629342569532
ctr,host=tars,some=tag-270 n=1i 1476437629342569532
ctr,host=tars,some=tag-271 n=1i 1476437629342569532
ctr,host=tars,some=tag-272 n=1i 1476437629342569532
ctr,host=tars,some=tag-273 n=1i 1476437629342569532
ctr,host=tars,some=tag-274 n=1i 1476437629342569532
ctr,host=tars,some=tag-275 n=1i 1476437629342569532
ctr,host=tars,some=tag-276 n=1i 1476437629342569532
ctr,host=tars,some=tag-277 n=1i 1476437629342569532
ctr,host=tars,some=tag-278 n=1i 1476437629342569532
ctr,host=tars,some=tag-279 n=1i 1476437629342569532
ctr,host=tars,some=tag-280 n=1i 1476437629342569532
ctr,host=tars,some=tag-281 n=1i 1476437629342569532
ctr,host=tars,some=tag-282 n=1i 1476437629342569532
ctr,host=tars,some=tag-283 n=1i 1476437629342569532
ctr,host=tars,some=tag-284 n=1i 1476437629342569532
ctr,host=tars,some=tag-285 n=1i 1476437629342569532
ctr,host=tars,some=tag-286 n=1i 1476437629342569532
ctr,host=tars,some=tag-287 n=1i 1476437629342569532
ctr,host=tars,some=tag-288 n=1i 1476437629342569532
ctr,host=tars,some=tag-289 n=1i 1476437629342569532
ctr,host=tars,some=tag-290 n=1i 1476437629342569532
ctr,host=tars,some=tag-291 n=1i 1476437629342569532
ctr,host=tars,some=tag-292 n=1i 1476437629342569532
ctr,host=tars,some=tag-293 n=1i 1476437629342569532
ctr,host=tars,some=tag-294 n=1i 1476437629342569532
ctr,host=tars,some=tag-295 n=1i 1476437629342569532
ctr,host=tars,some=tag-296 n=1i 1476437629342569532
ctr,host=tars,some=tag-297 n=1i 1476437629342569532
ctr,host=tars,some=tag-298 n=1i 1476437629342569532
ctr,host=tars,some=tag-299 n=1i 1476437629342569532
ctr,host=tars,some=tag-300 n=1i 1476437629342569532
ctr,host=tars,some=tag-301 n=1i 1476437629342569532
ctr,host=tars,some=tag-302 n=1i 1476437629342569532
ctr,host=tars,some=tag-303 n=1i 1476437629342569532
ctr,host=tars,some=tag-304 n=1i 1476437629342569532
ctr,host=tars,some=tag-305 n=1i 1476437629342569532
ctr,host=tars,some=tag-306 n=1i 1476437629342569532
ctr,host=tars,some=tag-307 n=1i 1476437629342569532
ctr,host=tars,some=tag-308 n=1i 1476437629342569532
ctr,host=tars,some=tag-309 n=1i 1476437629342569532
ctr,host=tars,some=tag-310 n=1i 1476437629342569532
ctr,host=tars,some=tag-311 n=1i 1476437629342569532
ctr,host=tars,some=tag-312 n=0i 1476437629342569532
ctr,host=tars,some=tag-313 n=0i 1476437629342569532
ctr,host=tars,some=tag-314 n=0i 1476437629342569532
ctr,host=tars,some=tag-315 n=0i 1476437629342569532
ctr,host=tars,some=tag-316 n=0i 1476437629342569532
ctr,host=tars,some=tag-317 n=0i 1476437629342569532
ctr,host=tars,some=tag-318 n=0i 1476437629342569532
ctr,host=tars,some=tag-319 n=0i 1476437629342569532
ctr,host=tars,some=tag-320 n=0i 1476437629342523514
ctr,host=tars,some=tag-321 n=0i 1476437629342523514
ctr,host=tars,some=tag-322 n=0i 1476437629342523514
ctr,host=tars,some=tag-323 n=0i 1476437629342523514
ctr,host=tars,some=tag-324 n=0i 1476437629342523514
ctr,host=tars,some=tag-325 n=0i 1476437629342569532
ctr,host=tars,some=tag-326 n=0i 1476437629342523514
ctr,host=tars,some=tag-327 n=0i 1476437629342523514
ctr,host=tars,some=tag-328 n=0i 1476437629342523514
ctr,host=tars,some=tag-329 n=0i 1476437629342523514
ctr,host=tars,some=tag-330 n=0i 1476437629342569532
ctr,host=tars,some=tag-331 n=0i 1476437629342569532
ctr,host=tars,some=tag-332 n=0i 1476437629342569532
ctr,host=tars,some=tag-333 n=0i 1476437629342569532
ctr,host=tars,some=tag-334 n=0i 1476437629342569532
ctr,host=tars,some=tag-335 n=0i 1476437629342569532
ctr,host=tars,some=tag-336 n=0i 1476437629342569532
ctr,host=tars,some=tag-337 n=0i 1476437629342569532
ctr,host=tars,some=tag-338 n=0i 1476437629342523514
ctr,host=tars,some=tag-339 n=0i 1476437629342523514
ctr,host=tars,some=tag-340 n=0i 1476437629342523514
ctr,host=tars,some=tag-341 n=0i 1476437629342569532
ctr,host=tars,some=tag-342 n=1i 1476437629342569532
ctr,host=tars,some=tag-343 n=1i 1476437629342569532
ctr,host=tars,some=tag-344 n=1i 1476437629342569532
ctr,host=tars,some=tag-345 n=1i 1476437629342569532
ctr,host=tars,some=tag-346 n=1i 1476437629342569532
ctr,host=tars,some=tag-347 n=1i 1476437629342569532
ctr,host=tars,some=tag-348 n=1i 1476437629342569532
ctr,host=tars,some=tag-349 n=1i 1476437629342569532
ctr,host=tars,some=tag-350 n=1i 1476437629342569532
ctr,host=tars,some=tag-351 n=1i 1476437629342569532
ctr,host=tars,some=tag-352 n=0i 1476437629342569532
ctr,host=tars,some=tag-353 n=0i 1476437629342569532
ctr,host=tars,some=tag-354 n=0i 1476437629342569532
ctr,host=tars,some=tag-355 n=0i 1476437629342569532
ctr,host=tars,some=tag-356 n=0i 1476437629342569532
ctr,host=tars,some=tag-357 n=0i 1476437629342523514
ctr,host=tars,some=tag-358 n=0i 1476437629342569532
ctr,host=tars,some=tag-359 n=0i 1476437629342569532
ctr,host=tars,some=tag-360 n=0i 1476437629342569532
ctr,host=tars,some=tag-361 n=0i 1476437629342569532
ctr,host=tars,some=tag-362 n=0i 1476437629342569532
ctr,host=tars,some=tag-363 n=0i 1476437629342569532
ctr,host=tars,some=tag-364 n=0i 1476437629342569532
ctr,host=tars,some=tag-365 n=0i 1476437629342569532
ctr,host=tars,some=tag-366 n=0i 1476437629342569532
ctr,host=tars,some=tag-367 n=0i 1476437629342569532
ctr,host=tars,some=tag-368 n=0i 1476437629342523514
ctr,host=tars,some=tag-369 n=0i 1476437629342569532
ctr,host=tars,some=tag-370 n=0i 1476437629342569532
ctr,host=tars,some=tag-371 n=0i 1476437629342569532
ctr,host=tars,some=tag-372 n=0i 1476437629342523514
ctr,host=tars,some=tag-373 n=0i 1476437629342523514
ctr,host=tars,some=tag-374 n=0i 1476437629342569532
ctr,host=tars,some=tag-375 n=0i 1476437629342569532
ctr,host=tars,some=tag-376 n=0i 1476437629342523514
ctr,host=tars,some=tag-377 n=0i 1476437629342523514
ctr,host=tars,some=tag-378 n=0i 1476437629342523514
ctr,host=tars,some=tag-379 n=0i 1476437629342523514
ctr,host=tars,some=tag-380 n=0i 1476437629342523514
ctr,host=tars,some=tag-381 n=0i 1476437629342523514
ctr,host=tars,some=tag-382 n=0i 1476437629342523514
ctr,host=tars,some=tag-383 n=0i 1476437629342523514
ctr,host=tars,some=tag-384 n=0i 1476437629342569532
ctr,host=tars,some=tag-385 n=0i 1476437629342569532
ctr,host=tars,some=tag-386 n=0i 1476437629342569532
ctr,host=tars,some=tag-387 n=0i 1476437629342569532
ctr,host=tars,some=tag-388 n=0i 1476437629342569532
ctr,host=tars,some=tag-389 n=0i 1476437629342569532
ctr,host=tars,some=tag-390 n=0i 1476437629342569532
ctr,host=tars,some=tag-391 n=0i 1476437629342569532
ctr,host=tars,some=tag-392 n=0i 1476437629342569532
ctr,host=tars,some=tag-393 n=0i 1476437629342569532
ctr,host=tars,some=tag-394 n=0i 1476437629342569532
ctr,host=tars,some=tag-395 n=0i 1476437629342523514
ctr,host=tars,some=tag-396 n=0i 1476437629342523514
ctr,host=tars,some=tag-397 n=0i 1476437629342523514
ctr,host=tars,some=tag-398 n=0i 1476437629342523514
ctr,host=tars,some=tag-399 n=0i 1476437629342523514
ctr,host=tars,some=tag-400 n=0i 1476437629342523514
ctr,host=tars,some=tag-401 n=0i 1476437629342523514
ctr,host=tars,some=tag-402 n=0i 1476437629342569532
ctr,host=tars,some=tag-403 n=0i 1476437629342569532
ctr,host=tars,some=tag-404 n=0i 1476437629342523514
ctr,host=tars,some=tag-405 n=0i 1476437629342569532
ctr,host=tars,some=tag-406 n=0i 1476437629342523514
ctr,host=tars,some=tag-407 n=0i 1476437629342523514
ctr,host=tars,some=tag-408 n=0i 1476437629342569532
ctr,host=tars,some=tag-409 n=0i 1476437629342569532
ctr,host=tars,some=tag-410 n=0i 1476437629342523514
ctr,host=tars,some=tag-411 n=0i 1476437629342523514
ctr,host=tars,some=tag-412 n=0i 1476437629342569532
ctr,host=tars,some=tag-413 n=0i 1476437629342523514
ctr,host=tars,some=tag-414 n=0i 1476437629342523514
ctr,host=tars,some=tag-415 n=0i 1476437629342523514
ctr,host=tars,some=tag-416 n=0i 1476437629342569532
ctr,host=tars,some=tag-417 n=0i 1476437629342569532
ctr,host=tars,some=tag-418 n=0i 1476437629342569532
ctr,host=tars,some=tag-419 n=0i 1476437629342523514
ctr,host=tars,some=tag-420 n=0i 1476437629342523514
ctr,host=tars,some=tag-421 n=0i 1476437629342569532
ctr,host=tars,some=tag-422 n=0i 1476437629342569532
ctr,host=tars,some=tag-423 n=0i 1476437629342523514
ctr,host=tars,some=tag-424 n=0i 1476437629342523514
ctr,host=tars,some=tag-425 n=0i 1476437629342523514
ctr,host=tars,some=tag-426 n=1i 1476437629342569532
ctr,host=tars,some=tag-427 n=1i 1476437629342569532
ctr,host=tars,some=tag-428 n=1i 1476437629342569532
ctr,host=tars,some=tag-429 n=1i 1476437629342569532
ctr,host=tars,some=tag-430 n=1i 1476437629342569532
ctr,host=tars,some=tag-431 n=1i 1476437629342569532
ctr,host=tars,some=tag-432 n=1i 1476437629342569532
ctr,host=tars,some=tag-433 n=1i 1476437629342569532
ctr,host=tars,some=tag-434 n=1i 1476437629342569532
ctr,host=tars,some=tag-435 n=1i 1476437629342569532
ctr,host=tars,some=tag-436 n=0i 1476437629342569532
ctr,host=tars,some=tag-437 n=0i 1476437629342569532
ctr,host=tars,some=tag-438 n=0i 1476437629342523514
ctr,host=tars,some=tag-439 n=0i 1476437629342569532
ctr,host=tars,some=tag-440 n=0i 1476437629342569532
ctr,host=tars,some=tag-441 n=0i 1476437629342523514
ctr,host=tars,some=tag-442 n=0i 1476437629342569532
ctr,host=tars,some=tag-443 n=0i 1476437629342569532
ctr,host=tars,some=tag-444 n=0i 1476437629342523514
ctr,host=tars,some=tag-445 n=0i 1476437629342569532
ctr,host=tars,some=tag-446 n=0i 1476437629342523514
ctr,host=tars,some=tag-447 n=0i 1476437629342569532
ctr,host=tars,some=tag-448 n=0i 1476437629342523514
ctr,host=tars,some=tag-449 n=0i 1476437629342523514
ctr,host=tars,some=tag-450 n=0i 1476437629342523514
ctr,host=tars,some=tag-451 n=0i 1476437629342523514
ctr,host=tars,some=tag-452 n=0i 1476437629342523514
ctr,host=tars,some=tag-453 n=0i 1476437629342569532
ctr,host=tars,some=tag-454 n=0i 1476437629342569532
ctr,host=tars,some=tag-455 n=0i 1476437629342569532
ctr,host=tars,some=tag-456 n=0i 1476437629342569532
ctr,host=tars,some=tag-457 n=0i 1476437629342569532
ctr,host=tars,some=tag-458 n=0i 1476437629342569532
ctr,host=tars,some=tag-459 n=0i 1476437629342569532
ctr,host=tars,some=tag-460 n=0i 1476437629342569532
ctr,host=tars,some=tag-461 n=0i 1476437629342523514
ctr,host=tars,some=tag-462 n=0i 1476437629342569532
ctr,host=tars,some=tag-463 n=0i 1476437629342569532
ctr,host=tars,some=tag-464 n=0i 1476437629342523514
ctr,host=tars,some=tag-465 n=0i 1476437629342523514
ctr,host=tars,some=tag-466 n=0i 1476437629342569532
ctr,host=tars,some=tag-467 n=0i 1476437629342569532
ctr,host=tars,some=tag-468 n=0i 1476437629342569532
ctr,host=tars,some=tag-469 n=2i 1476437629342569532
ctr,host=tars,some=tag-470 n=2i 1476437629342569532
ctr,host=tars,some=tag-471 n=2i 1476437629342569532
ctr,host=tars,some=tag-472 n=2i 1476437629342569532
ctr,host=tars,some=tag-473 n=2i 1476437629342569532
ctr,host=tars,some=tag-474 n=2i 1476437629342569532
ctr,host=tars,some=tag-475 n=2i 1476437629342569532
ctr,host=tars,some=tag-476 n=2i 1476437629342569532
ctr,host=tars,some=tag-477 n=2i 1476437629342569532
ctr,host=tars,some=tag-478 n=2i 1476437629342569532
ctr,host=tars,some=tag-479 n=2i 1476437629342569532
ctr,host=tars,some=tag-480 n=2i 1476437629342569532
ctr,host=tars,some=tag-481 n=2i 1476437629342569532
ctr,host=tars,some=tag-482 n=2i 1476437629342569532
ctr,host=tars,some=tag-483 n=2i 1476437629342569532
ctr,host=tars,some=tag-484 n=2i 1476437629342569532
ctr,host=tars,some=tag-485 n=2i 1476437629342569532
ctr,host=tars,some=tag-486 n=2i 1476437629342569532
ctr,host=tars,some=tag-487 n=2i 1476437629342569532
ctr,host=tars,some=tag-488 n=1i 1476437629342569532
ctr,host=tars,some=tag-489 n=1i 1476437629342569532
ctr,host=tars,some=tag-490 n=2i 1476437629342569532
ctr,host=tars,some=tag-491 n=2i 1476437629342569532
ctr,host=tars,some=tag-492 n=2i 1476437629342569532
ctr,host=tars,some=tag-493 n=2i 1476437629342569532
ctr,host=tars,some=tag-494 n=2i 1476437629342569532
ctr,host=tars,some=tag-495 n=2i 1476437629342569532
ctr,host=tars,some=tag-496 n=2i 1476437629342569532
ctr,host=tars,some=tag-497 n=2i 1476437629342569532
ctr,host=tars,some=tag-498 n=1i 1476437629342569532
ctr,host=tars,some=tag-499 n=1i 1476437629342569532

View File

@@ -0,0 +1,89 @@
package influx
import (
"bytes"
"reflect"
"strconv"
"strings"
"unsafe"
)
const (
escapes = " ,="
nameEscapes = " ,"
stringFieldEscapes = `\"`
)
var (
unescaper = strings.NewReplacer(
`\,`, `,`,
`\"`, `"`, // ???
`\ `, ` `,
`\=`, `=`,
)
nameUnescaper = strings.NewReplacer(
`\,`, `,`,
`\ `, ` `,
)
stringFieldUnescaper = strings.NewReplacer(
`\"`, `"`,
`\\`, `\`,
)
)
func unescape(b []byte) string {
if bytes.ContainsAny(b, escapes) {
return unescaper.Replace(unsafeBytesToString(b))
} else {
return string(b)
}
}
func nameUnescape(b []byte) string {
if bytes.ContainsAny(b, nameEscapes) {
return nameUnescaper.Replace(unsafeBytesToString(b))
} else {
return string(b)
}
}
func stringFieldUnescape(b []byte) string {
if bytes.ContainsAny(b, stringFieldEscapes) {
return stringFieldUnescaper.Replace(unsafeBytesToString(b))
} else {
return string(b)
}
}
// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt.
func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) {
s := unsafeBytesToString(b)
return strconv.ParseInt(s, base, bitSize)
}
// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat.
func parseFloatBytes(b []byte, bitSize int) (float64, error) {
s := unsafeBytesToString(b)
return strconv.ParseFloat(s, bitSize)
}
// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool.
func parseBoolBytes(b []byte) (bool, error) {
return strconv.ParseBool(unsafeBytesToString(b))
}
// unsafeBytesToString converts a []byte to a string without a heap allocation.
//
// It is unsafe, and is intended to prepare input to short-lived functions
// that require strings.
func unsafeBytesToString(in []byte) string {
src := *(*reflect.SliceHeader)(unsafe.Pointer(&in))
dst := reflect.StringHeader{
Data: src.Data,
Len: src.Len,
}
s := *(*string)(unsafe.Pointer(&dst))
return s
}

View File

@@ -0,0 +1,95 @@
package influx
import (
"bytes"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/prometheus/common/log"
)
type MetricHandler struct {
builder *metric.Builder
metrics []telegraf.Metric
precision time.Duration
}
func NewMetricHandler() *MetricHandler {
return &MetricHandler{
builder: metric.NewBuilder(),
precision: time.Nanosecond,
}
}
func (h *MetricHandler) SetTimeFunc(f metric.TimeFunc) {
h.builder.TimeFunc = f
}
func (h *MetricHandler) SetPrecision(factor time.Duration) {
h.precision = factor
}
func (h *MetricHandler) Metric() (telegraf.Metric, error) {
return h.builder.Metric()
}
func (h *MetricHandler) SetMeasurement(name []byte) {
h.builder.SetName(nameUnescape(name))
}
func (h *MetricHandler) AddTag(key []byte, value []byte) {
tk := unescape(key)
tv := unescape(value)
h.builder.AddTag(tk, tv)
}
func (h *MetricHandler) AddInt(key []byte, value []byte) {
fk := unescape(key)
fv, err := parseIntBytes(bytes.TrimSuffix(value, []byte("i")), 10, 64)
if err != nil {
log.Errorf("E! Received unparseable int value: %q", value)
return
}
h.builder.AddField(fk, fv)
}
func (h *MetricHandler) AddFloat(key []byte, value []byte) {
fk := unescape(key)
fv, err := parseFloatBytes(value, 64)
if err != nil {
log.Errorf("E! Received unparseable float value: %q", value)
return
}
h.builder.AddField(fk, fv)
}
func (h *MetricHandler) AddString(key []byte, value []byte) {
fk := unescape(key)
fv := stringFieldUnescape(value)
h.builder.AddField(fk, fv)
}
func (h *MetricHandler) AddBool(key []byte, value []byte) {
fk := unescape(key)
fv, err := parseBoolBytes(value)
if err != nil {
log.Errorf("E! Received unparseable boolean value: %q", value)
return
}
h.builder.AddField(fk, fv)
}
func (h *MetricHandler) SetTimestamp(tm []byte) {
v, err := parseIntBytes(tm, 10, 64)
if err != nil {
log.Errorf("E! Received unparseable timestamp: %q", tm)
return
}
ns := v * int64(h.precision)
h.builder.SetTime(time.Unix(0, ns))
}
func (h *MetricHandler) Reset() {
h.builder.Reset()
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,297 @@
package influx
import (
"errors"
)
var (
ErrNameParse = errors.New("expected measurement name")
ErrFieldParse = errors.New("expected field")
ErrTagParse = errors.New("expected tag")
ErrTimestampParse = errors.New("expected timestamp")
ErrParse = errors.New("parse error")
)
%%{
machine LineProtocol;
action begin {
m.pb = m.p
}
action yield {
yield = true
fnext align;
fbreak;
}
action name_error {
m.err = ErrNameParse
fhold;
fnext discard_line;
fbreak;
}
action field_error {
m.err = ErrFieldParse
fhold;
fnext discard_line;
fbreak;
}
action tagset_error {
m.err = ErrTagParse
fhold;
fnext discard_line;
fbreak;
}
action timestamp_error {
m.err = ErrTimestampParse
fhold;
fnext discard_line;
fbreak;
}
action parse_error {
m.err = ErrParse
fhold;
fnext discard_line;
fbreak;
}
action hold_recover {
fhold;
fgoto main;
}
action discard {
fgoto align;
}
action name {
m.handler.SetMeasurement(m.text())
}
action tagkey {
key = m.text()
}
action tagvalue {
m.handler.AddTag(key, m.text())
}
action fieldkey {
key = m.text()
}
action integer {
m.handler.AddInt(key, m.text())
}
action float {
m.handler.AddFloat(key, m.text())
}
action bool {
m.handler.AddBool(key, m.text())
}
action string {
m.handler.AddString(key, m.text())
}
action timestamp {
m.handler.SetTimestamp(m.text())
}
ws =
[\t\v\f ];
non_zero_digit =
[1-9];
integer =
'-'? ( digit | ( non_zero_digit digit* ) );
number =
( integer ( '.' digit* )? ) | ( '.' digit* );
scientific =
number 'e'i ["\-+"]? digit+;
timestamp =
('-'? digit{1,19}) >begin %timestamp;
fieldkeychar =
[^\t\n\f\r ,=\\] | ( '\\' [^\t\n\f\r] );
fieldkey =
fieldkeychar+ >begin %fieldkey;
fieldfloat =
(scientific | number) >begin %float;
fieldinteger =
(integer 'i') >begin %integer;
false =
"false" | "FALSE" | "False" | "F" | "f";
true =
"true" | "TRUE" | "True" | "T" | "t";
fieldbool =
(true | false) >begin %bool;
fieldstringchar =
[^\\"] | '\\' [\\"];
fieldstring =
fieldstringchar* >begin %string;
fieldstringquoted =
'"' fieldstring '"';
fieldvalue = fieldinteger | fieldfloat | fieldstringquoted | fieldbool;
field =
fieldkey '=' fieldvalue;
fieldset =
field ( ',' field )*;
tagchar =
[^\t\n\f\r ,=\\] | ( '\\' [^\t\n\f\r] );
tagkey =
tagchar+ >begin %tagkey;
tagvalue =
tagchar+ >begin %tagvalue;
tagset =
(',' (tagkey '=' tagvalue) $err(tagset_error))*;
measurement_chars =
[^\t\n\f\r ,\\] | ( '\\' [^\t\n\f\r] );
measurement_start =
measurement_chars - '#';
measurement =
(measurement_start measurement_chars*) >begin %name;
newline =
[\r\n];
comment =
'#' (any -- newline)* newline;
eol =
ws* newline? >yield %eof(yield);
line =
measurement
tagset
(ws+ fieldset) $err(field_error)
(ws+ timestamp)? $err(timestamp_error)
eol;
# The main machine parses a single line of line protocol.
main := line $err(parse_error);
# The discard_line machine discards the current line. Useful for recovering
# on the next line when an error occurs.
discard_line :=
(any - newline)* newline @discard;
# The align machine scans forward to the start of the next line. This machine
# is used to skip over whitespace and comments, keeping this logic out of the
# main machine.
align :=
(space* comment)* space* measurement_start @hold_recover %eof(yield);
}%%
%% write data;
type machine struct {
data []byte
cs int
p, pe, eof int
pb int
handler Handler
err error
}
func NewMachine(handler Handler) *machine {
m := &machine{
handler: handler,
}
%% access m.;
%% variable p m.p;
%% variable pe m.pe;
%% variable eof m.eof;
%% variable data m.data;
%% write init;
return m
}
func (m *machine) SetData(data []byte) {
m.data = data
m.p = 0
m.pb = 0
m.pe = len(data)
m.eof = len(data)
m.err = nil
%% write init;
m.cs = LineProtocol_en_align
}
// ParseLine parses a line of input and returns true if more data can be
// parsed.
func (m *machine) ParseLine() bool {
if m.data == nil || m.p >= m.pe {
m.err = nil
return false
}
m.err = nil
var key []byte
var yield bool
%% write exec;
// Even if there was an error, return true. On the next call to this
// function we will attempt to scan to the next line of input and recover.
if m.err != nil {
return true
}
// Don't check the error state in the case that we just yielded, because
// the yield indicates we just completed parsing a line.
if !yield && m.cs == LineProtocol_error {
m.err = ErrParse
return true
}
return true
}
// Err returns the error that occurred on the last call to ParseLine. If the
// result is nil, then the line was parsed successfully.
func (m *machine) Err() error {
return m.err
}
// Position returns the current position into the input.
func (m *machine) Position() int {
return m.p
}
func (m *machine) text() []byte {
return m.data[m.pb:m.p]
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,64 +1,112 @@
package influx
import (
"bytes"
"errors"
"fmt"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
)
// InfluxParser is an object for Parsing incoming metrics.
type InfluxParser struct {
// DefaultTags will be added to every parsed metric
DefaultTags map[string]string
const (
maxErrorBufferSize = 1024
)
var (
ErrNoMetric = errors.New("no metric in line")
)
type Handler interface {
SetMeasurement(name []byte)
AddTag(key []byte, value []byte)
AddInt(key []byte, value []byte)
AddFloat(key []byte, value []byte)
AddString(key []byte, value []byte)
AddBool(key []byte, value []byte)
SetTimestamp(tm []byte)
Reset()
}
func (p *InfluxParser) ParseWithDefaultTimePrecision(buf []byte, t time.Time, precision string) ([]telegraf.Metric, error) {
if !bytes.HasSuffix(buf, []byte("\n")) {
buf = append(buf, '\n')
type ParseError struct {
Offset int
msg string
buf string
}
func (e *ParseError) Error() string {
buffer := e.buf
if len(buffer) > maxErrorBufferSize {
buffer = buffer[:maxErrorBufferSize] + "..."
}
// parse even if the buffer begins with a newline
buf = bytes.TrimPrefix(buf, []byte("\n"))
metrics, err := metric.ParseWithDefaultTimePrecision(buf, t, precision)
if len(p.DefaultTags) > 0 {
for _, m := range metrics {
for k, v := range p.DefaultTags {
// only set the default tag if it doesn't already exist:
if !m.HasTag(k) {
m.AddTag(k, v)
}
return fmt.Sprintf("metric parse error: %s at offset %d: %q", e.msg, e.Offset, buffer)
}
type Parser struct {
DefaultTags map[string]string
*machine
handler *MetricHandler
}
func NewParser(handler *MetricHandler) *Parser {
return &Parser{
machine: NewMachine(handler),
handler: handler,
}
}
func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) {
metrics := make([]telegraf.Metric, 0)
p.machine.SetData(input)
for p.machine.ParseLine() {
err := p.machine.Err()
if err != nil {
return nil, &ParseError{
Offset: p.machine.Position(),
msg: err.Error(),
buf: string(input),
}
}
metric, err := p.handler.Metric()
if err != nil {
return nil, err
}
p.handler.Reset()
metrics = append(metrics, metric)
}
return metrics, err
p.applyDefaultTags(metrics)
return metrics, nil
}
// Parse returns a slice of Metrics from a text representation of a
// metric (in line-protocol format)
// with each metric separated by newlines. If any metrics fail to parse,
// a non-nil error will be returned in addition to the metrics that parsed
// successfully.
func (p *InfluxParser) Parse(buf []byte) ([]telegraf.Metric, error) {
return p.ParseWithDefaultTimePrecision(buf, time.Now(), "")
}
func (p *InfluxParser) ParseLine(line string) (telegraf.Metric, error) {
func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
metrics, err := p.Parse([]byte(line + "\n"))
if err != nil {
return nil, err
}
if len(metrics) < 1 {
return nil, fmt.Errorf(
"Can not parse the line: %s, for data format: influx ", line)
return nil, ErrNoMetric
}
return metrics[0], nil
}
func (p *InfluxParser) SetDefaultTags(tags map[string]string) {
func (p *Parser) SetDefaultTags(tags map[string]string) {
p.DefaultTags = tags
}
func (p *Parser) applyDefaultTags(metrics []telegraf.Metric) {
if len(p.DefaultTags) == 0 {
return
}
for _, m := range metrics {
for k, v := range p.DefaultTags {
if !m.HasTag(k) {
m.AddTag(k, v)
}
}
}
}

View File

@@ -1,294 +1,488 @@
package influx
import (
"fmt"
"io/ioutil"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/stretchr/testify/assert"
"github.com/influxdata/telegraf/metric"
"github.com/stretchr/testify/require"
)
var (
ms []telegraf.Metric
writer = ioutil.Discard
metrics500 []byte
exptime = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).UnixNano()
)
const (
validInflux = "cpu_load_short,cpu=cpu0 value=10 1257894000000000000\n"
negativeFloat = "cpu_load_short,cpu=cpu0 value=-13.4 1257894000000000000\n"
validInfluxNewline = "\ncpu_load_short,cpu=cpu0 value=10 1257894000000000000\n"
validInfluxNoNewline = "cpu_load_short,cpu=cpu0 value=10 1257894000000000000"
invalidInflux = "I don't think this is line protocol\n"
invalidInflux2 = "{\"a\": 5, \"b\": {\"c\": 6}}\n"
invalidInflux3 = `name text="unescaped "quote" ",value=1 1498077493081000000`
invalidInflux4 = `name text="unbalanced "quote" 1498077493081000000`
)
const influxMulti = `
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
`
const influxMultiSomeInvalid = `
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,cpu=cpu3, host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,cpu=cpu4 , usage_idle=99,usage_busy=1
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
`
func TestParseValidInflux(t *testing.T) {
parser := InfluxParser{}
metrics, err := parser.Parse([]byte(validInflux))
assert.NoError(t, err)
assert.Len(t, metrics, 1)
assert.Equal(t, "cpu_load_short", metrics[0].Name())
assert.Equal(t, map[string]interface{}{
"value": float64(10),
}, metrics[0].Fields())
assert.Equal(t, map[string]string{
"cpu": "cpu0",
}, metrics[0].Tags())
assert.Equal(t, exptime, metrics[0].Time().UnixNano())
metrics, err = parser.Parse([]byte(validInfluxNewline))
assert.NoError(t, err)
assert.Len(t, metrics, 1)
assert.Equal(t, "cpu_load_short", metrics[0].Name())
assert.Equal(t, map[string]interface{}{
"value": float64(10),
}, metrics[0].Fields())
assert.Equal(t, map[string]string{
"cpu": "cpu0",
}, metrics[0].Tags())
assert.Equal(t, exptime, metrics[0].Time().UnixNano())
metrics, err = parser.Parse([]byte(validInfluxNoNewline))
assert.NoError(t, err)
assert.Len(t, metrics, 1)
assert.Equal(t, "cpu_load_short", metrics[0].Name())
assert.Equal(t, map[string]interface{}{
"value": float64(10),
}, metrics[0].Fields())
assert.Equal(t, map[string]string{
"cpu": "cpu0",
}, metrics[0].Tags())
assert.Equal(t, exptime, metrics[0].Time().UnixNano())
metrics, err = parser.Parse([]byte(negativeFloat))
assert.NoError(t, err)
assert.Len(t, metrics, 1)
assert.Equal(t, "cpu_load_short", metrics[0].Name())
assert.Equal(t, map[string]interface{}{
"value": float64(-13.4),
}, metrics[0].Fields())
assert.Equal(t, map[string]string{
"cpu": "cpu0",
}, metrics[0].Tags())
assert.Equal(t, exptime, metrics[0].Time().UnixNano())
}
func TestParseLineValidInflux(t *testing.T) {
parser := InfluxParser{}
metric, err := parser.ParseLine(validInflux)
assert.NoError(t, err)
assert.Equal(t, "cpu_load_short", metric.Name())
assert.Equal(t, map[string]interface{}{
"value": float64(10),
}, metric.Fields())
assert.Equal(t, map[string]string{
"cpu": "cpu0",
}, metric.Tags())
assert.Equal(t, exptime, metric.Time().UnixNano())
metric, err = parser.ParseLine(validInfluxNewline)
assert.NoError(t, err)
assert.Equal(t, "cpu_load_short", metric.Name())
assert.Equal(t, map[string]interface{}{
"value": float64(10),
}, metric.Fields())
assert.Equal(t, map[string]string{
"cpu": "cpu0",
}, metric.Tags())
assert.Equal(t, exptime, metric.Time().UnixNano())
}
func TestParseMultipleValid(t *testing.T) {
parser := InfluxParser{}
metrics, err := parser.Parse([]byte(influxMulti))
assert.NoError(t, err)
assert.Len(t, metrics, 7)
for _, metric := range metrics {
assert.Equal(t, "cpu", metric.Name())
assert.Equal(t, map[string]string{
"datacenter": "us-east",
"host": "foo",
}, metrics[0].Tags())
assert.Equal(t, map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}, metrics[0].Fields())
}
}
func TestParseSomeValid(t *testing.T) {
parser := InfluxParser{}
metrics, err := parser.Parse([]byte(influxMultiSomeInvalid))
assert.Error(t, err)
assert.Len(t, metrics, 4)
for _, metric := range metrics {
assert.Equal(t, "cpu", metric.Name())
assert.Equal(t, map[string]string{
"datacenter": "us-east",
"host": "foo",
}, metrics[0].Tags())
assert.Equal(t, map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}, metrics[0].Fields())
}
}
// Test that default tags are applied.
func TestParseDefaultTags(t *testing.T) {
parser := InfluxParser{
DefaultTags: map[string]string{
"tag": "default",
},
}
metrics, err := parser.Parse([]byte(influxMultiSomeInvalid))
assert.Error(t, err)
assert.Len(t, metrics, 4)
for _, metric := range metrics {
assert.Equal(t, "cpu", metric.Name())
assert.Equal(t, map[string]string{
"datacenter": "us-east",
"host": "foo",
"tag": "default",
}, metric.Tags())
assert.Equal(t, map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}, metric.Fields())
}
}
// Verify that metric tags will override default tags
func TestParseDefaultTagsOverride(t *testing.T) {
parser := InfluxParser{
DefaultTags: map[string]string{
"host": "default",
},
}
metrics, err := parser.Parse([]byte(influxMultiSomeInvalid))
assert.Error(t, err)
assert.Len(t, metrics, 4)
for _, metric := range metrics {
assert.Equal(t, "cpu", metric.Name())
assert.Equal(t, map[string]string{
"datacenter": "us-east",
"host": "foo",
}, metrics[0].Tags())
assert.Equal(t, map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}, metrics[0].Fields())
}
}
func TestParseInvalidInflux(t *testing.T) {
parser := InfluxParser{}
_, err := parser.Parse([]byte(invalidInflux))
assert.Error(t, err)
_, err = parser.Parse([]byte(invalidInflux2))
assert.Error(t, err)
_, err = parser.Parse([]byte(invalidInflux3))
assert.Error(t, err)
fmt.Printf("%+v\n", err) // output for debug
_, err = parser.Parse([]byte(invalidInflux4))
assert.Error(t, err)
_, err = parser.ParseLine(invalidInflux)
assert.Error(t, err)
_, err = parser.ParseLine(invalidInflux2)
assert.Error(t, err)
_, err = parser.ParseLine(invalidInflux3)
assert.Error(t, err)
_, err = parser.ParseLine(invalidInflux4)
assert.Error(t, err)
}
func BenchmarkSingle(b *testing.B) {
parser := InfluxParser{}
b.ResetTimer()
for n := 0; n < b.N; n++ {
_, err := parser.Parse([]byte("cpu value=42\n"))
if err != nil {
panic(err)
}
}
}
func BenchmarkParse(b *testing.B) {
var err error
parser := InfluxParser{}
for n := 0; n < b.N; n++ {
// parse:
ms, err = parser.Parse(metrics500)
if err != nil {
panic(err)
}
if len(ms) != 500 {
panic("500 metrics not parsed!!")
}
}
}
func BenchmarkParseAddTagWrite(b *testing.B) {
var err error
parser := InfluxParser{}
for n := 0; n < b.N; n++ {
ms, err = parser.Parse(metrics500)
if err != nil {
panic(err)
}
if len(ms) != 500 {
panic("500 metrics not parsed!!")
}
for _, tmp := range ms {
tmp.AddTag("host", "localhost")
writer.Write(tmp.Serialize())
}
}
}
func init() {
var err error
metrics500, err = ioutil.ReadFile("500.metrics")
func Metric(v telegraf.Metric, err error) telegraf.Metric {
if err != nil {
panic(err)
}
return v
}
var DefaultTime = func() time.Time {
return time.Unix(42, 0)
}
var ptests = []struct {
name string
input []byte
metrics []telegraf.Metric
err error
}{
{
name: "minimal",
input: []byte("cpu value=42 0"),
metrics: []telegraf.Metric{
Metric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
),
},
err: nil,
},
{
name: "minimal with newline",
input: []byte("cpu value=42 0\n"),
metrics: []telegraf.Metric{
Metric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
),
},
err: nil,
},
{
name: "measurement escape space",
input: []byte(`c\ pu value=42`),
metrics: []telegraf.Metric{
Metric(
metric.New(
"c pu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(42, 0),
),
),
},
err: nil,
},
{
name: "measurement escape comma",
input: []byte(`c\,pu value=42`),
metrics: []telegraf.Metric{
Metric(
metric.New(
"c,pu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(42, 0),
),
),
},
err: nil,
},
{
name: "tags",
input: []byte(`cpu,cpu=cpu0,host=localhost value=42`),
metrics: []telegraf.Metric{
Metric(
metric.New(
"cpu",
map[string]string{
"cpu": "cpu0",
"host": "localhost",
},
map[string]interface{}{
"value": 42.0,
},
time.Unix(42, 0),
),
),
},
err: nil,
},
{
name: "tags escape unescapable",
input: []byte(`cpu,ho\st=localhost value=42`),
metrics: []telegraf.Metric{
Metric(
metric.New(
"cpu",
map[string]string{
`ho\st`: "localhost",
},
map[string]interface{}{
"value": 42.0,
},
time.Unix(42, 0),
),
),
},
err: nil,
},
{
name: "tags escape equals",
input: []byte(`cpu,ho\=st=localhost value=42`),
metrics: []telegraf.Metric{
Metric(
metric.New(
"cpu",
map[string]string{
"ho=st": "localhost",
},
map[string]interface{}{
"value": 42.0,
},
time.Unix(42, 0),
),
),
},
err: nil,
},
{
name: "tags escape comma",
input: []byte(`cpu,ho\,st=localhost value=42`),
metrics: []telegraf.Metric{
Metric(
metric.New(
"cpu",
map[string]string{
"ho,st": "localhost",
},
map[string]interface{}{
"value": 42.0,
},
time.Unix(42, 0),
),
),
},
err: nil,
},
{
name: "field key escape not escapable",
input: []byte(`cpu va\lue=42`),
metrics: []telegraf.Metric{
Metric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
`va\lue`: 42.0,
},
time.Unix(42, 0),
),
),
},
err: nil,
},
{
name: "field key escape equals",
input: []byte(`cpu va\=lue=42`),
metrics: []telegraf.Metric{
Metric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
`va=lue`: 42.0,
},
time.Unix(42, 0),
),
),
},
err: nil,
},
{
name: "field key escape comma",
input: []byte(`cpu va\,lue=42`),
metrics: []telegraf.Metric{
Metric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
`va,lue`: 42.0,
},
time.Unix(42, 0),
),
),
},
err: nil,
},
{
name: "field key escape space",
input: []byte(`cpu va\ lue=42`),
metrics: []telegraf.Metric{
Metric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
`va lue`: 42.0,
},
time.Unix(42, 0),
),
),
},
err: nil,
},
{
name: "field int",
input: []byte("cpu value=42i"),
metrics: []telegraf.Metric{
Metric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42,
},
time.Unix(42, 0),
),
),
},
err: nil,
},
{
name: "field boolean",
input: []byte("cpu value=true"),
metrics: []telegraf.Metric{
Metric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": true,
},
time.Unix(42, 0),
),
),
},
err: nil,
},
{
name: "field string",
input: []byte(`cpu value="42"`),
metrics: []telegraf.Metric{
Metric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": "42",
},
time.Unix(42, 0),
),
),
},
err: nil,
},
{
name: "field string escape quote",
input: []byte(`cpu value="how\"dy"`),
metrics: []telegraf.Metric{
Metric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
`value`: `how"dy`,
},
time.Unix(42, 0),
),
),
},
err: nil,
},
{
name: "field string escape backslash",
input: []byte(`cpu value="how\\dy"`),
metrics: []telegraf.Metric{
Metric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
`value`: `how\dy`,
},
time.Unix(42, 0),
),
),
},
err: nil,
},
{
name: "default timestamp",
input: []byte("cpu value=42"),
metrics: []telegraf.Metric{
Metric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(42, 0),
),
),
},
err: nil,
},
{
name: "multiple lines",
input: []byte("cpu value=42\ncpu value=42"),
metrics: []telegraf.Metric{
Metric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(42, 0),
),
),
Metric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(42, 0),
),
),
},
err: nil,
},
{
name: "invalid measurement only",
input: []byte("cpu"),
metrics: nil,
err: &ParseError{
Offset: 3,
msg: ErrFieldParse.Error(),
buf: "cpu",
},
},
{
name: "procstat",
input: []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,cpu_time_stolen=0,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000"),
metrics: []telegraf.Metric{
Metric(
metric.New(
"procstat",
map[string]string{
"exe": "bash",
"process_name": "bash",
},
map[string]interface{}{
"cpu_time": 0,
"cpu_time_guest": float64(0),
"cpu_time_guest_nice": float64(0),
"cpu_time_idle": float64(0),
"cpu_time_iowait": float64(0),
"cpu_time_irq": float64(0),
"cpu_time_nice": float64(0),
"cpu_time_soft_irq": float64(0),
"cpu_time_steal": float64(0),
"cpu_time_stolen": float64(0),
"cpu_time_system": float64(0),
"cpu_time_user": float64(0.02),
"cpu_usage": float64(0),
"involuntary_context_switches": 2,
"memory_data": 1576960,
"memory_locked": 0,
"memory_rss": 5103616,
"memory_stack": 139264,
"memory_swap": 0,
"memory_vms": 21659648,
"nice_priority": 20,
"num_fds": 4,
"num_threads": 1,
"pid": 29417,
"read_bytes": 0,
"read_count": 259,
"realtime_priority": 0,
"rlimit_cpu_time_hard": 2147483647,
"rlimit_cpu_time_soft": 2147483647,
"rlimit_file_locks_hard": 2147483647,
"rlimit_file_locks_soft": 2147483647,
"rlimit_memory_data_hard": 2147483647,
"rlimit_memory_data_soft": 2147483647,
"rlimit_memory_locked_hard": 65536,
"rlimit_memory_locked_soft": 65536,
"rlimit_memory_rss_hard": 2147483647,
"rlimit_memory_rss_soft": 2147483647,
"rlimit_memory_stack_hard": 2147483647,
"rlimit_memory_stack_soft": 8388608,
"rlimit_memory_vms_hard": 2147483647,
"rlimit_memory_vms_soft": 2147483647,
"rlimit_nice_priority_hard": 0,
"rlimit_nice_priority_soft": 0,
"rlimit_num_fds_hard": 4096,
"rlimit_num_fds_soft": 1024,
"rlimit_realtime_priority_hard": 0,
"rlimit_realtime_priority_soft": 0,
"rlimit_signals_pending_hard": 78994,
"rlimit_signals_pending_soft": 78994,
"signals_pending": 0,
"voluntary_context_switches": 42,
"write_bytes": 106496,
"write_count": 35,
},
time.Unix(0, 1517620624000000000),
),
),
},
err: nil,
},
}
func TestParser(t *testing.T) {
for _, tt := range ptests {
t.Run(tt.name, func(t *testing.T) {
handler := NewMetricHandler()
handler.SetTimeFunc(DefaultTime)
parser := NewParser(handler)
metrics, err := parser.Parse(tt.input)
require.Equal(t, tt.err, err)
require.Equal(t, len(tt.metrics), len(metrics))
for i, expected := range tt.metrics {
require.Equal(t, expected.Name(), metrics[i].Name())
require.Equal(t, expected.Tags(), metrics[i].Tags())
require.Equal(t, expected.Fields(), metrics[i].Fields())
require.Equal(t, expected.Time(), metrics[i].Time())
}
})
}
}
func BenchmarkParser(b *testing.B) {
for _, tt := range ptests {
b.Run(tt.name, func(b *testing.B) {
handler := NewMetricHandler()
parser := NewParser(handler)
for n := 0; n < b.N; n++ {
metrics, err := parser.Parse(tt.input)
_ = err
_ = metrics
}
})
}
}

View File

@@ -2,6 +2,7 @@ package nagios
import (
"regexp"
"strconv"
"strings"
"time"
@@ -72,23 +73,41 @@ func (p *NagiosParser) Parse(buf []byte) ([]telegraf.Metric, error) {
fieldName := string(perf[0][1])
tags := make(map[string]string)
if perf[0][3] != nil {
tags["unit"] = string(perf[0][3])
str := string(perf[0][3])
if str != "" {
tags["unit"] = str
}
}
fields := make(map[string]interface{})
fields["value"] = perf[0][2]
f, err := strconv.ParseFloat(string(perf[0][2]), 64)
if err == nil {
fields["value"] = f
}
// TODO should we set empty field
// if metric if there is no data ?
if perf[0][4] != nil {
fields["warning"] = perf[0][4]
f, err := strconv.ParseFloat(string(perf[0][4]), 64)
if err == nil {
fields["warning"] = f
}
}
if perf[0][5] != nil {
fields["critical"] = perf[0][5]
f, err := strconv.ParseFloat(string(perf[0][5]), 64)
if err == nil {
fields["critical"] = f
}
}
if perf[0][6] != nil {
fields["min"] = perf[0][6]
f, err := strconv.ParseFloat(string(perf[0][6]), 64)
if err == nil {
fields["min"] = f
}
}
if perf[0][7] != nil {
fields["max"] = perf[0][7]
f, err := strconv.ParseFloat(string(perf[0][7]), 64)
if err == nil {
fields["max"] = f
}
}
// Create metric
metric, err := metric.New(fieldName, tags, fields, time.Now().UTC())

View File

@@ -134,7 +134,8 @@ func NewNagiosParser() (Parser, error) {
}
func NewInfluxParser() (Parser, error) {
return &influx.InfluxParser{}, nil
handler := influx.NewMetricHandler()
return influx.NewParser(handler), nil
}
func NewGraphiteParser(

View File

@@ -37,10 +37,10 @@ func (p *Override) Apply(in ...telegraf.Metric) []telegraf.Metric {
metric.SetName(p.NameOverride)
}
if len(p.NamePrefix) > 0 {
metric.SetPrefix(p.NamePrefix)
metric.AddPrefix(p.NamePrefix)
}
if len(p.NameSuffix) > 0 {
metric.SetSuffix(p.NameSuffix)
metric.AddSuffix(p.NameSuffix)
}
for key, value := range p.Tags {
metric.AddTag(key, value)

View File

@@ -5,9 +5,12 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/processors"
"github.com/influxdata/telegraf/plugins/serializers"
"github.com/influxdata/telegraf/plugins/serializers/influx"
)
type Printer struct {
serializer serializers.Serializer
}
var sampleConfig = `
@@ -23,13 +26,19 @@ func (p *Printer) Description() string {
func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric {
for _, metric := range in {
fmt.Println(metric.String())
octets, err := p.serializer.Serialize(metric)
if err != nil {
continue
}
fmt.Println(octets)
}
return in
}
func init() {
processors.Add("printer", func() telegraf.Processor {
return &Printer{}
return &Printer{
serializer: influx.NewSerializer(),
}
})
}

View File

@@ -35,7 +35,7 @@ func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]byte, error) {
out := []byte{}
// Convert UnixNano to Unix timestamps
timestamp := metric.UnixNano() / 1000000000
timestamp := metric.Time().UnixNano() / 1000000000
bucket := SerializeBucketName(metric.Name(), metric.Tags(), s.Template, s.Prefix)
if bucket == "" {

View File

@@ -0,0 +1,52 @@
package influx
import "strings"
const (
escapes = " ,="
nameEscapes = " ,"
stringFieldEscapes = `\"`
)
var (
escaper = strings.NewReplacer(
`,`, `\,`,
`"`, `\"`, // ???
` `, `\ `,
`=`, `\=`,
)
nameEscaper = strings.NewReplacer(
`,`, `\,`,
` `, `\ `,
)
stringFieldEscaper = strings.NewReplacer(
`"`, `\"`,
`\`, `\\`,
)
)
func escape(s string) string {
if strings.ContainsAny(s, escapes) {
return escaper.Replace(s)
} else {
return s
}
}
func nameEscape(s string) string {
if strings.ContainsAny(s, nameEscapes) {
return nameEscaper.Replace(s)
} else {
return s
}
}
func stringFieldEscape(s string) string {
if strings.ContainsAny(s, stringFieldEscapes) {
return stringFieldEscaper.Replace(s)
} else {
return s
}
}

View File

@@ -1,12 +1,277 @@
package influx
import (
"bytes"
"errors"
"io"
"math"
"sort"
"strconv"
"github.com/influxdata/telegraf"
)
type InfluxSerializer struct {
const MaxInt = int(^uint(0) >> 1)
type FieldSortOrder int
const (
NoSortFields FieldSortOrder = iota
SortFields
)
var (
ErrNeedMoreSpace = errors.New("need more space")
ErrInvalidName = errors.New("invalid name")
ErrInvalidFieldKey = errors.New("invalid field key")
ErrInvalidFieldType = errors.New("invalid field type")
ErrFieldIsNaN = errors.New("is NaN")
ErrFieldIsInf = errors.New("is Inf")
ErrNoFields = errors.New("no fields")
)
// Serializer is a serializer for line protocol.
type Serializer struct {
maxLineBytes int
bytesWritten int
fieldSortOrder FieldSortOrder
buf bytes.Buffer
header []byte
footer []byte
pair []byte
}
func (s *InfluxSerializer) Serialize(m telegraf.Metric) ([]byte, error) {
return m.Serialize(), nil
func NewSerializer() *Serializer {
serializer := &Serializer{
fieldSortOrder: NoSortFields,
header: make([]byte, 0, 50),
footer: make([]byte, 0, 21),
pair: make([]byte, 0, 50),
}
return serializer
}
func (s *Serializer) SetMaxLineBytes(bytes int) {
s.maxLineBytes = bytes
}
func (s *Serializer) SetFieldSortOrder(order FieldSortOrder) {
s.fieldSortOrder = order
}
// Serialize writes the telegraf.Metric to a byte slice. May produce multiple
// lines of output if longer than maximum line length. Lines are terminated
// with a newline (LF) char.
func (s *Serializer) Serialize(m telegraf.Metric) ([]byte, error) {
s.buf.Reset()
err := s.writeMetric(&s.buf, m)
if err != nil {
return nil, err
}
out := make([]byte, s.buf.Len())
copy(out, s.buf.Bytes())
return out, nil
}
func (s *Serializer) Write(w io.Writer, m telegraf.Metric) (int, error) {
err := s.writeMetric(w, m)
return s.bytesWritten, err
}
func (s *Serializer) writeString(w io.Writer, str string) error {
n, err := io.WriteString(w, str)
s.bytesWritten += n
return err
}
func (s *Serializer) write(w io.Writer, b []byte) error {
n, err := w.Write(b)
s.bytesWritten += n
return err
}
func (s *Serializer) buildHeader(m telegraf.Metric) error {
s.header = s.header[:0]
name := nameEscape(m.Name())
if name == "" {
return ErrInvalidName
}
s.header = append(s.header, name...)
for _, tag := range m.TagList() {
key := escape(tag.Key)
value := escape(tag.Value)
// Some keys and values are not encodeable as line protocol, such as
// those with a trailing '\' or empty strings.
if key == "" || value == "" {
continue
}
s.header = append(s.header, ',')
s.header = append(s.header, key...)
s.header = append(s.header, '=')
s.header = append(s.header, value...)
}
s.header = append(s.header, ' ')
return nil
}
func (s *Serializer) buildFooter(m telegraf.Metric) {
s.footer = s.footer[:0]
s.footer = append(s.footer, ' ')
s.footer = strconv.AppendInt(s.footer, m.Time().UnixNano(), 10)
s.footer = append(s.footer, '\n')
}
func (s *Serializer) buildFieldPair(key string, value interface{}) error {
s.pair = s.pair[:0]
key = escape(key)
// Some keys are not encodeable as line protocol, such as those with a
// trailing '\' or empty strings.
if key == "" {
return ErrInvalidFieldKey
}
s.pair = append(s.pair, key...)
s.pair = append(s.pair, '=')
pair, err := appendFieldValue(s.pair, value)
if err != nil {
return err
}
s.pair = pair
return nil
}
func (s *Serializer) writeMetric(w io.Writer, m telegraf.Metric) error {
var err error
err = s.buildHeader(m)
if err != nil {
return err
}
s.buildFooter(m)
if s.fieldSortOrder == SortFields {
sort.Slice(m.FieldList(), func(i, j int) bool {
return m.FieldList()[i].Key < m.FieldList()[j].Key
})
}
pairsLen := 0
firstField := true
for _, field := range m.FieldList() {
err = s.buildFieldPair(field.Key, field.Value)
if err != nil {
continue
}
bytesNeeded := len(s.header) + pairsLen + len(s.pair) + len(s.footer)
// Additional length needed for field separator `,`
if !firstField {
bytesNeeded += 1
}
if s.maxLineBytes > 0 && bytesNeeded > s.maxLineBytes {
// Need at least one field per line
if firstField {
return ErrNeedMoreSpace
}
err = s.write(w, s.footer)
if err != nil {
return err
}
bytesNeeded = len(s.header) + len(s.pair) + len(s.footer)
if s.maxLineBytes > 0 && bytesNeeded > s.maxLineBytes {
return ErrNeedMoreSpace
}
err = s.write(w, s.header)
if err != nil {
return err
}
s.write(w, s.pair)
pairsLen += len(s.pair)
firstField = false
continue
}
if firstField {
err = s.write(w, s.header)
if err != nil {
return err
}
} else {
err = s.writeString(w, ",")
if err != nil {
return err
}
}
s.write(w, s.pair)
pairsLen += len(s.pair)
firstField = false
}
if firstField {
return ErrNoFields
}
return s.write(w, s.footer)
}
func appendFieldValue(buf []byte, value interface{}) ([]byte, error) {
switch v := value.(type) {
case int64:
return appendIntField(buf, v), nil
case float64:
if math.IsNaN(v) {
return nil, ErrFieldIsNaN
}
if math.IsInf(v, 0) {
return nil, ErrFieldIsInf
}
return appendFloatField(buf, v), nil
case string:
return appendStringField(buf, v), nil
case bool:
return appendBoolField(buf, v), nil
}
return buf, ErrInvalidFieldType
}
func appendIntField(buf []byte, value int64) []byte {
return append(strconv.AppendInt(buf, value, 10), 'i')
}
func appendFloatField(buf []byte, value float64) []byte {
return strconv.AppendFloat(buf, value, 'g', -1, 64)
}
func appendBoolField(buf []byte, value bool) []byte {
return strconv.AppendBool(buf, value)
}
func appendStringField(buf []byte, value string) []byte {
buf = append(buf, '"')
buf = append(buf, stringFieldEscape(value)...)
buf = append(buf, '"')
return buf
}

View File

@@ -1,72 +1,330 @@
package influx
import (
"fmt"
"strings"
"math"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/stretchr/testify/require"
)
func TestSerializeMetricFloat(t *testing.T) {
now := time.Now()
tags := map[string]string{
"cpu": "cpu0",
func MustMetric(v telegraf.Metric, err error) telegraf.Metric {
if err != nil {
panic(err)
}
fields := map[string]interface{}{
"usage_idle": float64(91.5),
}
m, err := metric.New("cpu", tags, fields, now)
assert.NoError(t, err)
s := InfluxSerializer{}
buf, _ := s.Serialize(m)
mS := strings.Split(strings.TrimSpace(string(buf)), "\n")
assert.NoError(t, err)
expS := []string{fmt.Sprintf("cpu,cpu=cpu0 usage_idle=91.5 %d", now.UnixNano())}
assert.Equal(t, expS, mS)
return v
}
func TestSerializeMetricInt(t *testing.T) {
now := time.Now()
tags := map[string]string{
"cpu": "cpu0",
}
fields := map[string]interface{}{
"usage_idle": int64(90),
}
m, err := metric.New("cpu", tags, fields, now)
assert.NoError(t, err)
s := InfluxSerializer{}
buf, _ := s.Serialize(m)
mS := strings.Split(strings.TrimSpace(string(buf)), "\n")
assert.NoError(t, err)
expS := []string{fmt.Sprintf("cpu,cpu=cpu0 usage_idle=90i %d", now.UnixNano())}
assert.Equal(t, expS, mS)
var tests = []struct {
name string
maxBytes int
input telegraf.Metric
output []byte
err error
}{
{
name: "minimal",
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
),
output: []byte("cpu value=42 0\n"),
},
{
name: "multiple tags",
input: MustMetric(
metric.New(
"cpu",
map[string]string{
"host": "localhost",
"cpu": "CPU0",
},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
),
output: []byte("cpu,cpu=CPU0,host=localhost value=42 0\n"),
},
{
name: "multiple fields",
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"x": 42.0,
"y": 42.0,
},
time.Unix(0, 0),
),
),
output: []byte("cpu x=42,y=42 0\n"),
},
{
name: "float NaN",
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"x": math.NaN(),
"y": 42,
},
time.Unix(0, 0),
),
),
output: []byte("cpu y=42i 0\n"),
},
{
name: "float NaN only",
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": math.NaN(),
},
time.Unix(0, 0),
),
),
err: ErrNoFields,
},
{
name: "float Inf",
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": math.Inf(1),
"y": 42,
},
time.Unix(0, 0),
),
),
output: []byte("cpu y=42i 0\n"),
},
{
name: "integer field",
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42,
},
time.Unix(0, 0),
),
),
output: []byte("cpu value=42i 0\n"),
},
{
name: "bool field",
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": true,
},
time.Unix(0, 0),
),
),
output: []byte("cpu value=true 0\n"),
},
{
name: "string field",
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": "howdy",
},
time.Unix(0, 0),
),
),
output: []byte("cpu value=\"howdy\" 0\n"),
},
{
name: "timestamp",
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(1519194109, 42),
),
),
output: []byte("cpu value=42 1519194109000000042\n"),
},
{
name: "split fields exact",
maxBytes: 33,
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"abc": 123,
"def": 456,
},
time.Unix(1519194109, 42),
),
),
output: []byte("cpu abc=123i 1519194109000000042\ncpu def=456i 1519194109000000042\n"),
},
{
name: "split fields extra",
maxBytes: 34,
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"abc": 123,
"def": 456,
},
time.Unix(1519194109, 42),
),
),
output: []byte("cpu abc=123i 1519194109000000042\ncpu def=456i 1519194109000000042\n"),
},
{
name: "need more space",
maxBytes: 32,
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"abc": 123,
"def": 456,
},
time.Unix(1519194109, 42),
),
),
output: nil,
err: ErrNeedMoreSpace,
},
{
name: "no fields",
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{},
time.Unix(0, 0),
),
),
err: ErrNoFields,
},
{
name: "procstat",
input: MustMetric(
metric.New(
"procstat",
map[string]string{
"exe": "bash",
"process_name": "bash",
},
map[string]interface{}{
"cpu_time": 0,
"cpu_time_guest": float64(0),
"cpu_time_guest_nice": float64(0),
"cpu_time_idle": float64(0),
"cpu_time_iowait": float64(0),
"cpu_time_irq": float64(0),
"cpu_time_nice": float64(0),
"cpu_time_soft_irq": float64(0),
"cpu_time_steal": float64(0),
"cpu_time_stolen": float64(0),
"cpu_time_system": float64(0),
"cpu_time_user": float64(0.02),
"cpu_usage": float64(0),
"involuntary_context_switches": 2,
"memory_data": 1576960,
"memory_locked": 0,
"memory_rss": 5103616,
"memory_stack": 139264,
"memory_swap": 0,
"memory_vms": 21659648,
"nice_priority": 20,
"num_fds": 4,
"num_threads": 1,
"pid": 29417,
"read_bytes": 0,
"read_count": 259,
"realtime_priority": 0,
"rlimit_cpu_time_hard": 2147483647,
"rlimit_cpu_time_soft": 2147483647,
"rlimit_file_locks_hard": 2147483647,
"rlimit_file_locks_soft": 2147483647,
"rlimit_memory_data_hard": 2147483647,
"rlimit_memory_data_soft": 2147483647,
"rlimit_memory_locked_hard": 65536,
"rlimit_memory_locked_soft": 65536,
"rlimit_memory_rss_hard": 2147483647,
"rlimit_memory_rss_soft": 2147483647,
"rlimit_memory_stack_hard": 2147483647,
"rlimit_memory_stack_soft": 8388608,
"rlimit_memory_vms_hard": 2147483647,
"rlimit_memory_vms_soft": 2147483647,
"rlimit_nice_priority_hard": 0,
"rlimit_nice_priority_soft": 0,
"rlimit_num_fds_hard": 4096,
"rlimit_num_fds_soft": 1024,
"rlimit_realtime_priority_hard": 0,
"rlimit_realtime_priority_soft": 0,
"rlimit_signals_pending_hard": 78994,
"rlimit_signals_pending_soft": 78994,
"signals_pending": 0,
"voluntary_context_switches": 42,
"write_bytes": 106496,
"write_count": 35,
},
time.Unix(0, 1517620624000000000),
),
),
output: []byte("procstat,exe=bash,process_name=bash cpu_time=0i,cpu_time_guest=0,cpu_time_guest_nice=0,cpu_time_idle=0,cpu_time_iowait=0,cpu_time_irq=0,cpu_time_nice=0,cpu_time_soft_irq=0,cpu_time_steal=0,cpu_time_stolen=0,cpu_time_system=0,cpu_time_user=0.02,cpu_usage=0,involuntary_context_switches=2i,memory_data=1576960i,memory_locked=0i,memory_rss=5103616i,memory_stack=139264i,memory_swap=0i,memory_vms=21659648i,nice_priority=20i,num_fds=4i,num_threads=1i,pid=29417i,read_bytes=0i,read_count=259i,realtime_priority=0i,rlimit_cpu_time_hard=2147483647i,rlimit_cpu_time_soft=2147483647i,rlimit_file_locks_hard=2147483647i,rlimit_file_locks_soft=2147483647i,rlimit_memory_data_hard=2147483647i,rlimit_memory_data_soft=2147483647i,rlimit_memory_locked_hard=65536i,rlimit_memory_locked_soft=65536i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_rss_soft=2147483647i,rlimit_memory_stack_hard=2147483647i,rlimit_memory_stack_soft=8388608i,rlimit_memory_vms_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_nice_priority_hard=0i,rlimit_nice_priority_soft=0i,rlimit_num_fds_hard=4096i,rlimit_num_fds_soft=1024i,rlimit_realtime_priority_hard=0i,rlimit_realtime_priority_soft=0i,rlimit_signals_pending_hard=78994i,rlimit_signals_pending_soft=78994i,signals_pending=0i,voluntary_context_switches=42i,write_bytes=106496i,write_count=35i 1517620624000000000\n"),
},
}
func TestSerializeMetricString(t *testing.T) {
now := time.Now()
tags := map[string]string{
"cpu": "cpu0",
func TestSerializer(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
serializer := NewSerializer()
serializer.SetMaxLineBytes(tt.maxBytes)
serializer.SetFieldSortOrder(SortFields)
output, err := serializer.Serialize(tt.input)
require.Equal(t, tt.err, err)
require.Equal(t, string(tt.output), string(output))
})
}
}
func BenchmarkSerializer(b *testing.B) {
for _, tt := range tests {
b.Run(tt.name, func(b *testing.B) {
serializer := NewSerializer()
serializer.SetMaxLineBytes(tt.maxBytes)
for n := 0; n < b.N; n++ {
output, err := serializer.Serialize(tt.input)
_ = err
_ = output
}
})
}
fields := map[string]interface{}{
"usage_idle": "foobar",
}
m, err := metric.New("cpu", tags, fields, now)
assert.NoError(t, err)
s := InfluxSerializer{}
buf, _ := s.Serialize(m)
mS := strings.Split(strings.TrimSpace(string(buf)), "\n")
assert.NoError(t, err)
expS := []string{fmt.Sprintf("cpu,cpu=cpu0 usage_idle=\"foobar\" %d", now.UnixNano())}
assert.Equal(t, expS, mS)
}

View File

@@ -0,0 +1,58 @@
package influx
import (
"bytes"
"io"
"github.com/influxdata/telegraf"
)
// reader is an io.Reader for line protocol.
type reader struct {
metrics []telegraf.Metric
serializer *Serializer
offset int
buf *bytes.Buffer
}
// NewReader creates a new reader over the given metrics.
func NewReader(metrics []telegraf.Metric, serializer *Serializer) io.Reader {
return &reader{
metrics: metrics,
serializer: serializer,
offset: 0,
buf: bytes.NewBuffer(make([]byte, 0, serializer.maxLineBytes)),
}
}
// SetMetrics changes the metrics to be read.
func (r *reader) SetMetrics(metrics []telegraf.Metric) {
r.metrics = metrics
r.offset = 0
r.buf.Reset()
}
// Read reads up to len(p) bytes of the current metric into p, each call will
// only serialize at most one metric so the number of bytes read may be less
// than p. Subsequent calls to Read will read the next metric until all are
// emitted. If a metric cannot be serialized, an error will be returned, you
// may resume with the next metric by calling Read again. When all metrics
// are emitted the err is io.EOF.
func (r *reader) Read(p []byte) (int, error) {
if r.buf.Len() > 0 {
return r.buf.Read(p)
}
if r.offset >= len(r.metrics) {
return 0, io.EOF
}
_, err := r.serializer.Write(r.buf, r.metrics[r.offset])
r.offset += 1
if err != nil {
r.buf.Reset()
return 0, err
}
return r.buf.Read(p)
}

View File

@@ -0,0 +1,135 @@
package influx
import (
"bytes"
"io"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/stretchr/testify/require"
)
func TestReader(t *testing.T) {
tests := []struct {
name string
maxLineBytes int
bufferSize int
input []telegraf.Metric
expected []byte
}{
{
name: "minimal",
maxLineBytes: 4096,
bufferSize: 20,
input: []telegraf.Metric{
MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
),
},
expected: []byte("cpu value=42 0\n"),
},
{
name: "multiple lines",
maxLineBytes: 4096,
bufferSize: 20,
input: []telegraf.Metric{
MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
),
MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
),
},
expected: []byte("cpu value=42 0\ncpu value=42 0\n"),
},
{
name: "exact fit",
maxLineBytes: 4096,
bufferSize: 15,
input: []telegraf.Metric{
MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
),
},
expected: []byte("cpu value=42 0\n"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
serializer := NewSerializer()
serializer.SetMaxLineBytes(tt.maxLineBytes)
serializer.SetFieldSortOrder(SortFields)
reader := NewReader(tt.input, serializer)
data := new(bytes.Buffer)
readbuf := make([]byte, tt.bufferSize)
total := 0
for {
n, err := reader.Read(readbuf)
total += n
if err == io.EOF {
break
}
data.Write(readbuf[:n])
require.NoError(t, err)
}
require.Equal(t, tt.expected, data.Bytes())
require.Equal(t, len(tt.expected), total)
})
}
}
func TestZeroLengthBufferNoError(t *testing.T) {
m := MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
)
serializer := NewSerializer()
serializer.SetFieldSortOrder(SortFields)
reader := NewReader([]telegraf.Metric{m}, serializer)
readbuf := make([]byte, 0)
n, err := reader.Read(readbuf)
require.NoError(t, err)
require.Equal(t, 0, n)
}

View File

@@ -22,7 +22,7 @@ func (s *JsonSerializer) Serialize(metric telegraf.Metric) ([]byte, error) {
m["tags"] = metric.Tags()
m["fields"] = metric.Fields()
m["name"] = metric.Name()
m["timestamp"] = metric.UnixNano() / units_nanoseconds
m["timestamp"] = metric.Time().UnixNano() / units_nanoseconds
serialized, err := ejson.Marshal(m)
if err != nil {
return []byte{}, err

View File

@@ -33,6 +33,13 @@ type Config struct {
// Dataformat can be one of: influx, graphite, or json
DataFormat string
// Maximum line length in bytes; influx format only
InfluxMaxLineBytes int
// Sort field keys, set to true only when debugging as it less performant
// than unsorted fields; influx format only
InfluxSortFields bool
// Prefix to add to all measurements, only supports Graphite
Prefix string
@@ -50,7 +57,7 @@ func NewSerializer(config *Config) (Serializer, error) {
var serializer Serializer
switch config.DataFormat {
case "influx":
serializer, err = NewInfluxSerializer()
serializer, err = NewInfluxSerializerConfig(config)
case "graphite":
serializer, err = NewGraphiteSerializer(config.Prefix, config.Template)
case "json":
@@ -65,8 +72,19 @@ func NewJsonSerializer(timestampUnits time.Duration) (Serializer, error) {
return &json.JsonSerializer{TimestampUnits: timestampUnits}, nil
}
func NewInfluxSerializerConfig(config *Config) (Serializer, error) {
var sort influx.FieldSortOrder
if config.InfluxSortFields {
sort = influx.SortFields
}
s := influx.NewSerializer()
s.SetMaxLineBytes(config.InfluxMaxLineBytes)
s.SetFieldSortOrder(sort)
return s, nil
}
func NewInfluxSerializer() (Serializer, error) {
return &influx.InfluxSerializer{}, nil
return influx.NewSerializer(), nil
}
func NewGraphiteSerializer(prefix, template string) (Serializer, error) {