Fix unit tests for new metric implementation

This commit is contained in:
Cameron Sparr
2016-11-28 18:19:35 +00:00
parent db7a4b24b6
commit e5c7a71d8e
29 changed files with 226 additions and 180 deletions

View File

@@ -15,6 +15,8 @@ import (
"github.com/influxdata/influxdb/client/v2"
)
const MaxInt = int(^uint(0) >> 1)
var (
// escaper is for escaping:
// - tag keys
@@ -63,7 +65,6 @@ func New(
m.tags = append(m.tags, []byte("="+escaper.Replace(v))...)
}
m.fields = []byte{' '}
i := 0
for k, v := range fields {
if i != 0 {
@@ -72,7 +73,6 @@ func New(
m.fields = appendField(m.fields, k, v)
i++
}
m.fields = append(m.fields, ' ')
return m, nil
}
@@ -103,6 +103,9 @@ func indexUnescapedByte(buf []byte, b byte) int {
func countBackslashes(buf []byte, index int) int {
var count int
for {
if index < 0 {
return count
}
if buf[index] == '\\' {
count++
index--
@@ -130,7 +133,8 @@ type metric struct {
}
func (m *metric) Point() *client.Point {
return &client.Point{}
c, _ := client.NewPoint(m.Name(), m.Tags(), m.Fields(), m.Time())
return c
}
func (m *metric) String() string {
@@ -150,16 +154,25 @@ func (m *metric) Type() telegraf.ValueType {
}
func (m *metric) Len() int {
return len(m.name) + len(m.tags) + len(m.fields) + len(m.t) + 1
return len(m.name) + len(m.tags) + 1 + len(m.fields) + 1 + len(m.t) + 1
}
func (m *metric) Serialize() []byte {
tmp := make([]byte, m.Len())
copy(tmp, m.name)
copy(tmp[len(m.name):], m.tags)
copy(tmp[len(m.name)+len(m.tags):], m.fields)
copy(tmp[len(m.name)+len(m.tags)+len(m.fields):], m.t)
tmp[len(tmp)-1] = '\n'
i := 0
copy(tmp[i:], m.name)
i += len(m.name)
copy(tmp[i:], m.tags)
i += len(m.tags)
tmp[i] = ' '
i++
copy(tmp[i:], m.fields)
i += len(m.fields)
tmp[i] = ' '
i++
copy(tmp[i:], m.t)
i += len(m.t)
tmp[i] = '\n'
return tmp
}
@@ -170,7 +183,7 @@ func (m *metric) Fields() map[string]interface{} {
}
m.fieldMap = map[string]interface{}{}
i := 1
i := 0
for {
if i >= len(m.fields) {
break
@@ -182,10 +195,20 @@ func (m *metric) Fields() map[string]interface{} {
}
// start index of field value
i2 := i1 + 1
// end index of field value
i3 := indexUnescapedByte(m.fields[i:], ',')
if i3 == -1 {
i3 = len(m.fields[i:]) - 1
var i3 int
if m.fields[i:][i2] == '"' {
i3 = indexUnescapedByte(m.fields[i:][i2+1:], '"')
if i3 == -1 {
i3 = len(m.fields[i:])
}
i3 += i2 + 2 // increment index to the comma
} else {
i3 = indexUnescapedByte(m.fields[i:], ',')
if i3 == -1 {
i3 = len(m.fields[i:])
}
}
switch m.fields[i:][i2] {
@@ -213,9 +236,9 @@ func (m *metric) Fields() map[string]interface{} {
}
}
case 'T', 't':
// TODO handle "true" booleans
m.fieldMap[string(m.fields[i:][0:i1])] = true
case 'F', 'f':
// TODO handle "false" booleans
m.fieldMap[string(m.fields[i:][0:i1])] = false
default:
// TODO handle unsupported field type
}
@@ -309,6 +332,7 @@ func (m *metric) HasTag(key string) bool {
func (m *metric) RemoveTag(key string) bool {
m.tagMap = nil
m.hashID = 0
i := bytes.Index(m.tags, []byte(escaper.Replace(key)+"="))
if i == -1 {
return false
@@ -355,21 +379,17 @@ func (m *metric) RemoveField(key string) bool {
}
func (m *metric) Copy() telegraf.Metric {
name := make([]byte, len(m.name))
tags := make([]byte, len(m.tags))
fields := make([]byte, len(m.fields))
t := make([]byte, len(m.t))
copy(name, m.name)
copy(tags, m.tags)
copy(fields, m.fields)
copy(t, m.t)
return &metric{
name: name,
tags: tags,
fields: fields,
t: t,
hashID: m.hashID,
mOut := metric{
name: make([]byte, len(m.name)),
tags: make([]byte, len(m.tags)),
fields: make([]byte, len(m.fields)),
t: make([]byte, len(m.t)),
}
copy(mOut.name, m.name)
copy(mOut.tags, m.tags)
copy(mOut.fields, m.fields)
copy(mOut.t, m.t)
return &mOut
}
func (m *metric) HashID() uint64 {
@@ -423,6 +443,16 @@ func appendField(b []byte, k string, v interface{}) []byte {
case int:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
case uint64:
// Cap uints above the maximum int value
var intv int64
if v <= uint64(MaxInt) {
intv = int64(v)
} else {
intv = int64(MaxInt)
}
b = strconv.AppendInt(b, intv, 10)
b = append(b, 'i')
case uint32:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
@@ -432,11 +462,15 @@ func appendField(b []byte, k string, v interface{}) []byte {
case uint8:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
// TODO: 'uint' should be considered just as "dangerous" as a uint64,
// perhaps the value should be checked and capped at MaxInt64? We could
// then include uint64 as an accepted value
case uint:
b = strconv.AppendInt(b, int64(v), 10)
// Cap uints above the maximum int value
var intv int64
if v <= uint(MaxInt) {
intv = int64(v)
} else {
intv = int64(MaxInt)
}
b = strconv.AppendInt(b, intv, 10)
b = append(b, 'i')
case float32:
b = strconv.AppendFloat(b, float64(v), 'f', -1, 32)

View File

@@ -4,6 +4,8 @@ import (
"fmt"
"testing"
"time"
"github.com/influxdata/telegraf"
)
// vars for making sure that the compiler doesnt optimize out the benchmarks:
@@ -15,9 +17,9 @@ var (
)
func BenchmarkNewMetric(b *testing.B) {
var mt Metric
var mt telegraf.Metric
for n := 0; n < b.N; n++ {
mt, _ = NewMetric("test_metric",
mt, _ = New("test_metric",
map[string]string{
"test_tag_1": "tag_value_1",
"test_tag_2": "tag_value_2",
@@ -35,9 +37,9 @@ func BenchmarkNewMetric(b *testing.B) {
}
func BenchmarkNewMetricAndInspect(b *testing.B) {
var mt Metric
var mt telegraf.Metric
for n := 0; n < b.N; n++ {
mt, _ = NewMetric("test_metric",
mt, _ = New("test_metric",
map[string]string{
"test_tag_1": "tag_value_1",
"test_tag_2": "tag_value_2",
@@ -59,7 +61,7 @@ func BenchmarkNewMetricAndInspect(b *testing.B) {
}
func BenchmarkTags(b *testing.B) {
var mt, _ = NewMetric("test_metric",
var mt, _ = New("test_metric",
map[string]string{
"test_tag_1": "tag_value_1",
"test_tag_2": "tag_value_2",
@@ -79,7 +81,7 @@ func BenchmarkTags(b *testing.B) {
}
func BenchmarkFields(b *testing.B) {
var mt, _ = NewMetric("test_metric",
var mt, _ = New("test_metric",
map[string]string{
"test_tag_1": "tag_value_1",
"test_tag_2": "tag_value_2",
@@ -99,7 +101,7 @@ func BenchmarkFields(b *testing.B) {
}
func BenchmarkSerializeMetric(b *testing.B) {
mt, _ := NewMetric("test_metric",
mt, _ := New("test_metric",
map[string]string{
"test_tag_1": "tag_value_1",
"test_tag_2": "tag_value_2",
@@ -120,7 +122,7 @@ func BenchmarkSerializeMetric(b *testing.B) {
}
func BenchmarkSerializeMetricBytes(b *testing.B) {
mt, _ := NewMetric("test_metric",
mt, _ := New("test_metric",
map[string]string{
"test_tag_1": "tag_value_1",
"test_tag_2": "tag_value_2",

View File

@@ -6,6 +6,8 @@ import (
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/stretchr/testify/assert"
)
@@ -23,7 +25,7 @@ func TestNewMetric(t *testing.T) {
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Equal(t, Untyped, m.Type())
assert.Equal(t, telegraf.Untyped, m.Type())
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
@@ -42,10 +44,10 @@ func TestNewGaugeMetric(t *testing.T) {
"usage_idle": float64(99),
"usage_busy": float64(1),
}
m, err := New("cpu", tags, fields, now, Gauge)
m, err := New("cpu", tags, fields, now, telegraf.Gauge)
assert.NoError(t, err)
assert.Equal(t, Gauge, m.Type())
assert.Equal(t, telegraf.Gauge, m.Type())
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
@@ -64,10 +66,10 @@ func TestNewCounterMetric(t *testing.T) {
"usage_idle": float64(99),
"usage_busy": float64(1),
}
m, err := New("cpu", tags, fields, now, Counter)
m, err := New("cpu", tags, fields, now, telegraf.Counter)
assert.NoError(t, err)
assert.Equal(t, Counter, m.Type())
assert.Equal(t, telegraf.Counter, m.Type())
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())

View File

@@ -45,28 +45,25 @@ func Parse(buf []byte) ([]telegraf.Metric, error) {
func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) {
metrics := make([]telegraf.Metric, 0, bytes.Count(buf, []byte("\n"))+1)
var (
errStr string
line []byte
err error
)
b := bytes.NewBuffer(buf)
var errStr string
i := 0
for {
line, err = b.ReadBytes('\n')
if err != nil {
j := bytes.IndexByte(buf[i:], '\n')
if j == -1 {
break
}
if len(line) < 2 {
if len(buf[i:i+j]) < 2 {
i += j + 1 // increment i past the previous newline
continue
}
// trim the newline:
line = line[0 : len(line)-1]
m, err := parseMetric(line, t)
m, err := parseMetric(buf[i:i+j], t)
if err != nil {
i += j + 1 // increment i past the previous newline
errStr += " " + err.Error()
continue
}
i += j + 1 // increment i past the previous newline
metrics = append(metrics, m)
}
@@ -135,7 +132,10 @@ func parseMetric(buf []byte, defaultTime time.Time) (telegraf.Metric, error) {
m.t = []byte(dTime)
}
return m, nil
// here we copy on return because this allows us to later call
// AddTag, AddField, RemoveTag, RemoveField, etc. without worrying about
// modifying 'tag' bytes having an affect on 'field' bytes, for example.
return m.Copy(), nil
}
// scanKey scans buf starting at i for the measurement and tag portion of the point.