Add new line protocol parser and serializer, influxdb output (#3924)

This commit is contained in:
Daniel Nelson
2018-03-27 17:30:51 -07:00
committed by GitHub
parent 503881d4d7
commit 1c0f63a90d
70 changed files with 26827 additions and 6533 deletions

View File

@@ -35,7 +35,7 @@ func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]byte, error) {
out := []byte{}
// Convert UnixNano to Unix timestamps
timestamp := metric.UnixNano() / 1000000000
timestamp := metric.Time().UnixNano() / 1000000000
bucket := SerializeBucketName(metric.Name(), metric.Tags(), s.Template, s.Prefix)
if bucket == "" {

View File

@@ -0,0 +1,52 @@
package influx
import "strings"
const (
escapes = " ,="
nameEscapes = " ,"
stringFieldEscapes = `\"`
)
var (
escaper = strings.NewReplacer(
`,`, `\,`,
`"`, `\"`, // ???
` `, `\ `,
`=`, `\=`,
)
nameEscaper = strings.NewReplacer(
`,`, `\,`,
` `, `\ `,
)
stringFieldEscaper = strings.NewReplacer(
`"`, `\"`,
`\`, `\\`,
)
)
func escape(s string) string {
if strings.ContainsAny(s, escapes) {
return escaper.Replace(s)
} else {
return s
}
}
func nameEscape(s string) string {
if strings.ContainsAny(s, nameEscapes) {
return nameEscaper.Replace(s)
} else {
return s
}
}
func stringFieldEscape(s string) string {
if strings.ContainsAny(s, stringFieldEscapes) {
return stringFieldEscaper.Replace(s)
} else {
return s
}
}

View File

@@ -1,12 +1,277 @@
package influx
import (
"bytes"
"errors"
"io"
"math"
"sort"
"strconv"
"github.com/influxdata/telegraf"
)
type InfluxSerializer struct {
const MaxInt = int(^uint(0) >> 1)
type FieldSortOrder int
const (
NoSortFields FieldSortOrder = iota
SortFields
)
var (
ErrNeedMoreSpace = errors.New("need more space")
ErrInvalidName = errors.New("invalid name")
ErrInvalidFieldKey = errors.New("invalid field key")
ErrInvalidFieldType = errors.New("invalid field type")
ErrFieldIsNaN = errors.New("is NaN")
ErrFieldIsInf = errors.New("is Inf")
ErrNoFields = errors.New("no fields")
)
// Serializer is a serializer for line protocol.
type Serializer struct {
maxLineBytes int
bytesWritten int
fieldSortOrder FieldSortOrder
buf bytes.Buffer
header []byte
footer []byte
pair []byte
}
func (s *InfluxSerializer) Serialize(m telegraf.Metric) ([]byte, error) {
return m.Serialize(), nil
func NewSerializer() *Serializer {
serializer := &Serializer{
fieldSortOrder: NoSortFields,
header: make([]byte, 0, 50),
footer: make([]byte, 0, 21),
pair: make([]byte, 0, 50),
}
return serializer
}
func (s *Serializer) SetMaxLineBytes(bytes int) {
s.maxLineBytes = bytes
}
func (s *Serializer) SetFieldSortOrder(order FieldSortOrder) {
s.fieldSortOrder = order
}
// Serialize writes the telegraf.Metric to a byte slice. May produce multiple
// lines of output if longer than maximum line length. Lines are terminated
// with a newline (LF) char.
func (s *Serializer) Serialize(m telegraf.Metric) ([]byte, error) {
s.buf.Reset()
err := s.writeMetric(&s.buf, m)
if err != nil {
return nil, err
}
out := make([]byte, s.buf.Len())
copy(out, s.buf.Bytes())
return out, nil
}
func (s *Serializer) Write(w io.Writer, m telegraf.Metric) (int, error) {
err := s.writeMetric(w, m)
return s.bytesWritten, err
}
func (s *Serializer) writeString(w io.Writer, str string) error {
n, err := io.WriteString(w, str)
s.bytesWritten += n
return err
}
func (s *Serializer) write(w io.Writer, b []byte) error {
n, err := w.Write(b)
s.bytesWritten += n
return err
}
func (s *Serializer) buildHeader(m telegraf.Metric) error {
s.header = s.header[:0]
name := nameEscape(m.Name())
if name == "" {
return ErrInvalidName
}
s.header = append(s.header, name...)
for _, tag := range m.TagList() {
key := escape(tag.Key)
value := escape(tag.Value)
// Some keys and values are not encodeable as line protocol, such as
// those with a trailing '\' or empty strings.
if key == "" || value == "" {
continue
}
s.header = append(s.header, ',')
s.header = append(s.header, key...)
s.header = append(s.header, '=')
s.header = append(s.header, value...)
}
s.header = append(s.header, ' ')
return nil
}
func (s *Serializer) buildFooter(m telegraf.Metric) {
s.footer = s.footer[:0]
s.footer = append(s.footer, ' ')
s.footer = strconv.AppendInt(s.footer, m.Time().UnixNano(), 10)
s.footer = append(s.footer, '\n')
}
func (s *Serializer) buildFieldPair(key string, value interface{}) error {
s.pair = s.pair[:0]
key = escape(key)
// Some keys are not encodeable as line protocol, such as those with a
// trailing '\' or empty strings.
if key == "" {
return ErrInvalidFieldKey
}
s.pair = append(s.pair, key...)
s.pair = append(s.pair, '=')
pair, err := appendFieldValue(s.pair, value)
if err != nil {
return err
}
s.pair = pair
return nil
}
func (s *Serializer) writeMetric(w io.Writer, m telegraf.Metric) error {
var err error
err = s.buildHeader(m)
if err != nil {
return err
}
s.buildFooter(m)
if s.fieldSortOrder == SortFields {
sort.Slice(m.FieldList(), func(i, j int) bool {
return m.FieldList()[i].Key < m.FieldList()[j].Key
})
}
pairsLen := 0
firstField := true
for _, field := range m.FieldList() {
err = s.buildFieldPair(field.Key, field.Value)
if err != nil {
continue
}
bytesNeeded := len(s.header) + pairsLen + len(s.pair) + len(s.footer)
// Additional length needed for field separator `,`
if !firstField {
bytesNeeded += 1
}
if s.maxLineBytes > 0 && bytesNeeded > s.maxLineBytes {
// Need at least one field per line
if firstField {
return ErrNeedMoreSpace
}
err = s.write(w, s.footer)
if err != nil {
return err
}
bytesNeeded = len(s.header) + len(s.pair) + len(s.footer)
if s.maxLineBytes > 0 && bytesNeeded > s.maxLineBytes {
return ErrNeedMoreSpace
}
err = s.write(w, s.header)
if err != nil {
return err
}
s.write(w, s.pair)
pairsLen += len(s.pair)
firstField = false
continue
}
if firstField {
err = s.write(w, s.header)
if err != nil {
return err
}
} else {
err = s.writeString(w, ",")
if err != nil {
return err
}
}
s.write(w, s.pair)
pairsLen += len(s.pair)
firstField = false
}
if firstField {
return ErrNoFields
}
return s.write(w, s.footer)
}
func appendFieldValue(buf []byte, value interface{}) ([]byte, error) {
switch v := value.(type) {
case int64:
return appendIntField(buf, v), nil
case float64:
if math.IsNaN(v) {
return nil, ErrFieldIsNaN
}
if math.IsInf(v, 0) {
return nil, ErrFieldIsInf
}
return appendFloatField(buf, v), nil
case string:
return appendStringField(buf, v), nil
case bool:
return appendBoolField(buf, v), nil
}
return buf, ErrInvalidFieldType
}
func appendIntField(buf []byte, value int64) []byte {
return append(strconv.AppendInt(buf, value, 10), 'i')
}
func appendFloatField(buf []byte, value float64) []byte {
return strconv.AppendFloat(buf, value, 'g', -1, 64)
}
func appendBoolField(buf []byte, value bool) []byte {
return strconv.AppendBool(buf, value)
}
func appendStringField(buf []byte, value string) []byte {
buf = append(buf, '"')
buf = append(buf, stringFieldEscape(value)...)
buf = append(buf, '"')
return buf
}

View File

@@ -1,72 +1,330 @@
package influx
import (
"fmt"
"strings"
"math"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/stretchr/testify/require"
)
func TestSerializeMetricFloat(t *testing.T) {
now := time.Now()
tags := map[string]string{
"cpu": "cpu0",
func MustMetric(v telegraf.Metric, err error) telegraf.Metric {
if err != nil {
panic(err)
}
fields := map[string]interface{}{
"usage_idle": float64(91.5),
}
m, err := metric.New("cpu", tags, fields, now)
assert.NoError(t, err)
s := InfluxSerializer{}
buf, _ := s.Serialize(m)
mS := strings.Split(strings.TrimSpace(string(buf)), "\n")
assert.NoError(t, err)
expS := []string{fmt.Sprintf("cpu,cpu=cpu0 usage_idle=91.5 %d", now.UnixNano())}
assert.Equal(t, expS, mS)
return v
}
func TestSerializeMetricInt(t *testing.T) {
now := time.Now()
tags := map[string]string{
"cpu": "cpu0",
}
fields := map[string]interface{}{
"usage_idle": int64(90),
}
m, err := metric.New("cpu", tags, fields, now)
assert.NoError(t, err)
s := InfluxSerializer{}
buf, _ := s.Serialize(m)
mS := strings.Split(strings.TrimSpace(string(buf)), "\n")
assert.NoError(t, err)
expS := []string{fmt.Sprintf("cpu,cpu=cpu0 usage_idle=90i %d", now.UnixNano())}
assert.Equal(t, expS, mS)
var tests = []struct {
name string
maxBytes int
input telegraf.Metric
output []byte
err error
}{
{
name: "minimal",
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
),
output: []byte("cpu value=42 0\n"),
},
{
name: "multiple tags",
input: MustMetric(
metric.New(
"cpu",
map[string]string{
"host": "localhost",
"cpu": "CPU0",
},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
),
output: []byte("cpu,cpu=CPU0,host=localhost value=42 0\n"),
},
{
name: "multiple fields",
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"x": 42.0,
"y": 42.0,
},
time.Unix(0, 0),
),
),
output: []byte("cpu x=42,y=42 0\n"),
},
{
name: "float NaN",
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"x": math.NaN(),
"y": 42,
},
time.Unix(0, 0),
),
),
output: []byte("cpu y=42i 0\n"),
},
{
name: "float NaN only",
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": math.NaN(),
},
time.Unix(0, 0),
),
),
err: ErrNoFields,
},
{
name: "float Inf",
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": math.Inf(1),
"y": 42,
},
time.Unix(0, 0),
),
),
output: []byte("cpu y=42i 0\n"),
},
{
name: "integer field",
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42,
},
time.Unix(0, 0),
),
),
output: []byte("cpu value=42i 0\n"),
},
{
name: "bool field",
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": true,
},
time.Unix(0, 0),
),
),
output: []byte("cpu value=true 0\n"),
},
{
name: "string field",
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": "howdy",
},
time.Unix(0, 0),
),
),
output: []byte("cpu value=\"howdy\" 0\n"),
},
{
name: "timestamp",
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(1519194109, 42),
),
),
output: []byte("cpu value=42 1519194109000000042\n"),
},
{
name: "split fields exact",
maxBytes: 33,
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"abc": 123,
"def": 456,
},
time.Unix(1519194109, 42),
),
),
output: []byte("cpu abc=123i 1519194109000000042\ncpu def=456i 1519194109000000042\n"),
},
{
name: "split fields extra",
maxBytes: 34,
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"abc": 123,
"def": 456,
},
time.Unix(1519194109, 42),
),
),
output: []byte("cpu abc=123i 1519194109000000042\ncpu def=456i 1519194109000000042\n"),
},
{
name: "need more space",
maxBytes: 32,
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"abc": 123,
"def": 456,
},
time.Unix(1519194109, 42),
),
),
output: nil,
err: ErrNeedMoreSpace,
},
{
name: "no fields",
input: MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{},
time.Unix(0, 0),
),
),
err: ErrNoFields,
},
{
name: "procstat",
input: MustMetric(
metric.New(
"procstat",
map[string]string{
"exe": "bash",
"process_name": "bash",
},
map[string]interface{}{
"cpu_time": 0,
"cpu_time_guest": float64(0),
"cpu_time_guest_nice": float64(0),
"cpu_time_idle": float64(0),
"cpu_time_iowait": float64(0),
"cpu_time_irq": float64(0),
"cpu_time_nice": float64(0),
"cpu_time_soft_irq": float64(0),
"cpu_time_steal": float64(0),
"cpu_time_stolen": float64(0),
"cpu_time_system": float64(0),
"cpu_time_user": float64(0.02),
"cpu_usage": float64(0),
"involuntary_context_switches": 2,
"memory_data": 1576960,
"memory_locked": 0,
"memory_rss": 5103616,
"memory_stack": 139264,
"memory_swap": 0,
"memory_vms": 21659648,
"nice_priority": 20,
"num_fds": 4,
"num_threads": 1,
"pid": 29417,
"read_bytes": 0,
"read_count": 259,
"realtime_priority": 0,
"rlimit_cpu_time_hard": 2147483647,
"rlimit_cpu_time_soft": 2147483647,
"rlimit_file_locks_hard": 2147483647,
"rlimit_file_locks_soft": 2147483647,
"rlimit_memory_data_hard": 2147483647,
"rlimit_memory_data_soft": 2147483647,
"rlimit_memory_locked_hard": 65536,
"rlimit_memory_locked_soft": 65536,
"rlimit_memory_rss_hard": 2147483647,
"rlimit_memory_rss_soft": 2147483647,
"rlimit_memory_stack_hard": 2147483647,
"rlimit_memory_stack_soft": 8388608,
"rlimit_memory_vms_hard": 2147483647,
"rlimit_memory_vms_soft": 2147483647,
"rlimit_nice_priority_hard": 0,
"rlimit_nice_priority_soft": 0,
"rlimit_num_fds_hard": 4096,
"rlimit_num_fds_soft": 1024,
"rlimit_realtime_priority_hard": 0,
"rlimit_realtime_priority_soft": 0,
"rlimit_signals_pending_hard": 78994,
"rlimit_signals_pending_soft": 78994,
"signals_pending": 0,
"voluntary_context_switches": 42,
"write_bytes": 106496,
"write_count": 35,
},
time.Unix(0, 1517620624000000000),
),
),
output: []byte("procstat,exe=bash,process_name=bash cpu_time=0i,cpu_time_guest=0,cpu_time_guest_nice=0,cpu_time_idle=0,cpu_time_iowait=0,cpu_time_irq=0,cpu_time_nice=0,cpu_time_soft_irq=0,cpu_time_steal=0,cpu_time_stolen=0,cpu_time_system=0,cpu_time_user=0.02,cpu_usage=0,involuntary_context_switches=2i,memory_data=1576960i,memory_locked=0i,memory_rss=5103616i,memory_stack=139264i,memory_swap=0i,memory_vms=21659648i,nice_priority=20i,num_fds=4i,num_threads=1i,pid=29417i,read_bytes=0i,read_count=259i,realtime_priority=0i,rlimit_cpu_time_hard=2147483647i,rlimit_cpu_time_soft=2147483647i,rlimit_file_locks_hard=2147483647i,rlimit_file_locks_soft=2147483647i,rlimit_memory_data_hard=2147483647i,rlimit_memory_data_soft=2147483647i,rlimit_memory_locked_hard=65536i,rlimit_memory_locked_soft=65536i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_rss_soft=2147483647i,rlimit_memory_stack_hard=2147483647i,rlimit_memory_stack_soft=8388608i,rlimit_memory_vms_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_nice_priority_hard=0i,rlimit_nice_priority_soft=0i,rlimit_num_fds_hard=4096i,rlimit_num_fds_soft=1024i,rlimit_realtime_priority_hard=0i,rlimit_realtime_priority_soft=0i,rlimit_signals_pending_hard=78994i,rlimit_signals_pending_soft=78994i,signals_pending=0i,voluntary_context_switches=42i,write_bytes=106496i,write_count=35i 1517620624000000000\n"),
},
}
func TestSerializeMetricString(t *testing.T) {
now := time.Now()
tags := map[string]string{
"cpu": "cpu0",
func TestSerializer(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
serializer := NewSerializer()
serializer.SetMaxLineBytes(tt.maxBytes)
serializer.SetFieldSortOrder(SortFields)
output, err := serializer.Serialize(tt.input)
require.Equal(t, tt.err, err)
require.Equal(t, string(tt.output), string(output))
})
}
}
func BenchmarkSerializer(b *testing.B) {
for _, tt := range tests {
b.Run(tt.name, func(b *testing.B) {
serializer := NewSerializer()
serializer.SetMaxLineBytes(tt.maxBytes)
for n := 0; n < b.N; n++ {
output, err := serializer.Serialize(tt.input)
_ = err
_ = output
}
})
}
fields := map[string]interface{}{
"usage_idle": "foobar",
}
m, err := metric.New("cpu", tags, fields, now)
assert.NoError(t, err)
s := InfluxSerializer{}
buf, _ := s.Serialize(m)
mS := strings.Split(strings.TrimSpace(string(buf)), "\n")
assert.NoError(t, err)
expS := []string{fmt.Sprintf("cpu,cpu=cpu0 usage_idle=\"foobar\" %d", now.UnixNano())}
assert.Equal(t, expS, mS)
}

View File

@@ -0,0 +1,58 @@
package influx
import (
"bytes"
"io"
"github.com/influxdata/telegraf"
)
// reader is an io.Reader for line protocol.
type reader struct {
metrics []telegraf.Metric
serializer *Serializer
offset int
buf *bytes.Buffer
}
// NewReader creates a new reader over the given metrics.
func NewReader(metrics []telegraf.Metric, serializer *Serializer) io.Reader {
return &reader{
metrics: metrics,
serializer: serializer,
offset: 0,
buf: bytes.NewBuffer(make([]byte, 0, serializer.maxLineBytes)),
}
}
// SetMetrics changes the metrics to be read.
func (r *reader) SetMetrics(metrics []telegraf.Metric) {
r.metrics = metrics
r.offset = 0
r.buf.Reset()
}
// Read reads up to len(p) bytes of the current metric into p, each call will
// only serialize at most one metric so the number of bytes read may be less
// than p. Subsequent calls to Read will read the next metric until all are
// emitted. If a metric cannot be serialized, an error will be returned, you
// may resume with the next metric by calling Read again. When all metrics
// are emitted the err is io.EOF.
func (r *reader) Read(p []byte) (int, error) {
if r.buf.Len() > 0 {
return r.buf.Read(p)
}
if r.offset >= len(r.metrics) {
return 0, io.EOF
}
_, err := r.serializer.Write(r.buf, r.metrics[r.offset])
r.offset += 1
if err != nil {
r.buf.Reset()
return 0, err
}
return r.buf.Read(p)
}

View File

@@ -0,0 +1,135 @@
package influx
import (
"bytes"
"io"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/stretchr/testify/require"
)
func TestReader(t *testing.T) {
tests := []struct {
name string
maxLineBytes int
bufferSize int
input []telegraf.Metric
expected []byte
}{
{
name: "minimal",
maxLineBytes: 4096,
bufferSize: 20,
input: []telegraf.Metric{
MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
),
},
expected: []byte("cpu value=42 0\n"),
},
{
name: "multiple lines",
maxLineBytes: 4096,
bufferSize: 20,
input: []telegraf.Metric{
MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
),
MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
),
},
expected: []byte("cpu value=42 0\ncpu value=42 0\n"),
},
{
name: "exact fit",
maxLineBytes: 4096,
bufferSize: 15,
input: []telegraf.Metric{
MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
),
},
expected: []byte("cpu value=42 0\n"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
serializer := NewSerializer()
serializer.SetMaxLineBytes(tt.maxLineBytes)
serializer.SetFieldSortOrder(SortFields)
reader := NewReader(tt.input, serializer)
data := new(bytes.Buffer)
readbuf := make([]byte, tt.bufferSize)
total := 0
for {
n, err := reader.Read(readbuf)
total += n
if err == io.EOF {
break
}
data.Write(readbuf[:n])
require.NoError(t, err)
}
require.Equal(t, tt.expected, data.Bytes())
require.Equal(t, len(tt.expected), total)
})
}
}
func TestZeroLengthBufferNoError(t *testing.T) {
m := MustMetric(
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
)
serializer := NewSerializer()
serializer.SetFieldSortOrder(SortFields)
reader := NewReader([]telegraf.Metric{m}, serializer)
readbuf := make([]byte, 0)
n, err := reader.Read(readbuf)
require.NoError(t, err)
require.Equal(t, 0, n)
}

View File

@@ -22,7 +22,7 @@ func (s *JsonSerializer) Serialize(metric telegraf.Metric) ([]byte, error) {
m["tags"] = metric.Tags()
m["fields"] = metric.Fields()
m["name"] = metric.Name()
m["timestamp"] = metric.UnixNano() / units_nanoseconds
m["timestamp"] = metric.Time().UnixNano() / units_nanoseconds
serialized, err := ejson.Marshal(m)
if err != nil {
return []byte{}, err

View File

@@ -33,6 +33,13 @@ type Config struct {
// Dataformat can be one of: influx, graphite, or json
DataFormat string
// Maximum line length in bytes; influx format only
InfluxMaxLineBytes int
// Sort field keys, set to true only when debugging as it less performant
// than unsorted fields; influx format only
InfluxSortFields bool
// Prefix to add to all measurements, only supports Graphite
Prefix string
@@ -50,7 +57,7 @@ func NewSerializer(config *Config) (Serializer, error) {
var serializer Serializer
switch config.DataFormat {
case "influx":
serializer, err = NewInfluxSerializer()
serializer, err = NewInfluxSerializerConfig(config)
case "graphite":
serializer, err = NewGraphiteSerializer(config.Prefix, config.Template)
case "json":
@@ -65,8 +72,19 @@ func NewJsonSerializer(timestampUnits time.Duration) (Serializer, error) {
return &json.JsonSerializer{TimestampUnits: timestampUnits}, nil
}
func NewInfluxSerializerConfig(config *Config) (Serializer, error) {
var sort influx.FieldSortOrder
if config.InfluxSortFields {
sort = influx.SortFields
}
s := influx.NewSerializer()
s.SetMaxLineBytes(config.InfluxMaxLineBytes)
s.SetFieldSortOrder(sort)
return s, nil
}
func NewInfluxSerializer() (Serializer, error) {
return &influx.InfluxSerializer{}, nil
return influx.NewSerializer(), nil
}
func NewGraphiteSerializer(prefix, template string) (Serializer, error) {