Enable gofmt code simplification (#4887)
This commit is contained in:
parent
4a311830c6
commit
ee056278f5
4
Makefile
4
Makefile
|
@ -12,7 +12,7 @@ PREFIX := /usr/local
|
|||
BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
|
||||
COMMIT := $(shell git rev-parse --short HEAD)
|
||||
GOFILES ?= $(shell git ls-files '*.go')
|
||||
GOFMT ?= $(shell gofmt -l $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)))
|
||||
GOFMT ?= $(shell gofmt -l -s $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)))
|
||||
BUILDFLAGS ?=
|
||||
|
||||
ifdef GOBIN
|
||||
|
@ -55,7 +55,7 @@ test:
|
|||
|
||||
.PHONY: fmt
|
||||
fmt:
|
||||
@gofmt -w $(filter-out plugins/parsers/influx/machine.go, $(GOFILES))
|
||||
@gofmt -s -w $(filter-out plugins/parsers/influx/machine.go, $(GOFILES))
|
||||
|
||||
.PHONY: fmtcheck
|
||||
fmtcheck:
|
||||
|
|
|
@ -296,13 +296,13 @@ func main() {
|
|||
switch {
|
||||
case *fOutputList:
|
||||
fmt.Println("Available Output Plugins:")
|
||||
for k, _ := range outputs.Outputs {
|
||||
for k := range outputs.Outputs {
|
||||
fmt.Printf(" %s\n", k)
|
||||
}
|
||||
return
|
||||
case *fInputList:
|
||||
fmt.Println("Available Input Plugins:")
|
||||
for k, _ := range inputs.Inputs {
|
||||
for k := range inputs.Inputs {
|
||||
fmt.Printf(" %s\n", k)
|
||||
}
|
||||
return
|
||||
|
|
|
@ -32,13 +32,13 @@ func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) {
|
|||
FieldDrop: []string{"other", "stuff"},
|
||||
FieldPass: []string{"some", "strings"},
|
||||
TagDrop: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
},
|
||||
TagPass: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
|
@ -71,13 +71,13 @@ func TestConfig_LoadSingleInput(t *testing.T) {
|
|||
FieldDrop: []string{"other", "stuff"},
|
||||
FieldPass: []string{"some", "strings"},
|
||||
TagDrop: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
},
|
||||
TagPass: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
|
@ -117,13 +117,13 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
|||
FieldDrop: []string{"other", "stuff"},
|
||||
FieldPass: []string{"some", "strings"},
|
||||
TagDrop: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
},
|
||||
TagPass: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
|
|
|
@ -79,13 +79,13 @@ func (f *Filter) Compile() error {
|
|||
return fmt.Errorf("Error compiling 'taginclude', %s", err)
|
||||
}
|
||||
|
||||
for i, _ := range f.TagDrop {
|
||||
for i := range f.TagDrop {
|
||||
f.TagDrop[i].filter, err = filter.Compile(f.TagDrop[i].Filter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'tagdrop', %s", err)
|
||||
}
|
||||
}
|
||||
for i, _ := range f.TagPass {
|
||||
for i := range f.TagPass {
|
||||
f.TagPass[i].filter, err = filter.Compile(f.TagPass[i].Filter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'tagpass', %s", err)
|
||||
|
|
|
@ -24,7 +24,7 @@ func TestFilter_ApplyEmpty(t *testing.T) {
|
|||
|
||||
func TestFilter_ApplyTagsDontPass(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
|
@ -244,11 +244,11 @@ func TestFilter_FieldDrop(t *testing.T) {
|
|||
|
||||
func TestFilter_TagPass(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
TagFilter{
|
||||
{
|
||||
Name: "mem",
|
||||
Filter: []string{"mem_free"},
|
||||
}}
|
||||
|
@ -258,19 +258,19 @@ func TestFilter_TagPass(t *testing.T) {
|
|||
require.NoError(t, f.Compile())
|
||||
|
||||
passes := [][]*telegraf.Tag{
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-total"}},
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-0"}},
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-1"}},
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-2"}},
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "mem", Value: "mem_free"}},
|
||||
{{Key: "cpu", Value: "cpu-total"}},
|
||||
{{Key: "cpu", Value: "cpu-0"}},
|
||||
{{Key: "cpu", Value: "cpu-1"}},
|
||||
{{Key: "cpu", Value: "cpu-2"}},
|
||||
{{Key: "mem", Value: "mem_free"}},
|
||||
}
|
||||
|
||||
drops := [][]*telegraf.Tag{
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cputotal"}},
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu0"}},
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu1"}},
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu2"}},
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "mem", Value: "mem_used"}},
|
||||
{{Key: "cpu", Value: "cputotal"}},
|
||||
{{Key: "cpu", Value: "cpu0"}},
|
||||
{{Key: "cpu", Value: "cpu1"}},
|
||||
{{Key: "cpu", Value: "cpu2"}},
|
||||
{{Key: "mem", Value: "mem_used"}},
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
|
@ -288,11 +288,11 @@ func TestFilter_TagPass(t *testing.T) {
|
|||
|
||||
func TestFilter_TagDrop(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
TagFilter{
|
||||
{
|
||||
Name: "mem",
|
||||
Filter: []string{"mem_free"},
|
||||
}}
|
||||
|
@ -302,19 +302,19 @@ func TestFilter_TagDrop(t *testing.T) {
|
|||
require.NoError(t, f.Compile())
|
||||
|
||||
drops := [][]*telegraf.Tag{
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-total"}},
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-0"}},
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-1"}},
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-2"}},
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "mem", Value: "mem_free"}},
|
||||
{{Key: "cpu", Value: "cpu-total"}},
|
||||
{{Key: "cpu", Value: "cpu-0"}},
|
||||
{{Key: "cpu", Value: "cpu-1"}},
|
||||
{{Key: "cpu", Value: "cpu-2"}},
|
||||
{{Key: "mem", Value: "mem_free"}},
|
||||
}
|
||||
|
||||
passes := [][]*telegraf.Tag{
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cputotal"}},
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu0"}},
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu1"}},
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu2"}},
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "mem", Value: "mem_used"}},
|
||||
{{Key: "cpu", Value: "cputotal"}},
|
||||
{{Key: "cpu", Value: "cpu0"}},
|
||||
{{Key: "cpu", Value: "cpu1"}},
|
||||
{{Key: "cpu", Value: "cpu2"}},
|
||||
{{Key: "mem", Value: "mem_used"}},
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
|
@ -442,27 +442,27 @@ func TestFilter_FilterFieldPassAndDrop(t *testing.T) {
|
|||
// see: https://github.com/influxdata/telegraf/issues/2860
|
||||
func TestFilter_FilterTagsPassAndDrop(t *testing.T) {
|
||||
inputData := [][]*telegraf.Tag{
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "tag1", Value: "1"}, &telegraf.Tag{Key: "tag2", Value: "3"}},
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "tag1", Value: "1"}, &telegraf.Tag{Key: "tag2", Value: "2"}},
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "tag1", Value: "2"}, &telegraf.Tag{Key: "tag2", Value: "1"}},
|
||||
[]*telegraf.Tag{&telegraf.Tag{Key: "tag1", Value: "4"}, &telegraf.Tag{Key: "tag2", Value: "1"}},
|
||||
{{Key: "tag1", Value: "1"}, {Key: "tag2", Value: "3"}},
|
||||
{{Key: "tag1", Value: "1"}, {Key: "tag2", Value: "2"}},
|
||||
{{Key: "tag1", Value: "2"}, {Key: "tag2", Value: "1"}},
|
||||
{{Key: "tag1", Value: "4"}, {Key: "tag2", Value: "1"}},
|
||||
}
|
||||
|
||||
expectedResult := []bool{false, true, false, false}
|
||||
|
||||
filterPass := []TagFilter{
|
||||
TagFilter{
|
||||
{
|
||||
Name: "tag1",
|
||||
Filter: []string{"1", "4"},
|
||||
},
|
||||
}
|
||||
|
||||
filterDrop := []TagFilter{
|
||||
TagFilter{
|
||||
{
|
||||
Name: "tag1",
|
||||
Filter: []string{"4"},
|
||||
},
|
||||
TagFilter{
|
||||
{
|
||||
Name: "tag2",
|
||||
Filter: []string{"3"},
|
||||
},
|
||||
|
|
|
@ -59,7 +59,7 @@ func prettyToBytes(v string) uint64 {
|
|||
}
|
||||
var factor uint64
|
||||
factor = 1
|
||||
prefix := v[len(v)-1 : len(v)]
|
||||
prefix := v[len(v)-1:]
|
||||
if factors[prefix] != 0 {
|
||||
v = v[:len(v)-1]
|
||||
factor = factors[prefix]
|
||||
|
|
|
@ -278,7 +278,7 @@ func flatten(data interface{}) []*metric {
|
|||
|
||||
switch val := data.(type) {
|
||||
case float64:
|
||||
metrics = []*metric{&metric{make([]string, 0, 1), val}}
|
||||
metrics = []*metric{{make([]string, 0, 1), val}}
|
||||
case map[string]interface{}:
|
||||
metrics = make([]*metric, 0, len(val))
|
||||
for k, v := range val {
|
||||
|
|
|
@ -81,7 +81,7 @@ func TestGather(t *testing.T) {
|
|||
}()
|
||||
|
||||
findSockets = func(c *Ceph) ([]*socket, error) {
|
||||
return []*socket{&socket{"osd.1", typeOsd, ""}}, nil
|
||||
return []*socket{{"osd.1", typeOsd, ""}}, nil
|
||||
}
|
||||
|
||||
perfDump = func(binary string, s *socket) (string, error) {
|
||||
|
@ -190,17 +190,17 @@ type SockTest struct {
|
|||
}
|
||||
|
||||
var sockTestParams = []*SockTest{
|
||||
&SockTest{
|
||||
{
|
||||
osds: 2,
|
||||
mons: 2,
|
||||
},
|
||||
&SockTest{
|
||||
{
|
||||
mons: 1,
|
||||
},
|
||||
&SockTest{
|
||||
{
|
||||
osds: 1,
|
||||
},
|
||||
&SockTest{},
|
||||
{},
|
||||
}
|
||||
|
||||
var monPerfDump = `
|
||||
|
|
|
@ -173,7 +173,7 @@ const valuePattern = "[\\d-]+"
|
|||
|
||||
var fileFormats = [...]fileFormat{
|
||||
// VAL\n
|
||||
fileFormat{
|
||||
{
|
||||
name: "Single value",
|
||||
pattern: "^" + valuePattern + "\n$",
|
||||
parser: func(measurement string, fields map[string]interface{}, b []byte) {
|
||||
|
@ -185,7 +185,7 @@ var fileFormats = [...]fileFormat{
|
|||
// VAL0\n
|
||||
// VAL1\n
|
||||
// ...
|
||||
fileFormat{
|
||||
{
|
||||
name: "New line separated values",
|
||||
pattern: "^(" + valuePattern + "\n){2,}$",
|
||||
parser: func(measurement string, fields map[string]interface{}, b []byte) {
|
||||
|
@ -197,7 +197,7 @@ var fileFormats = [...]fileFormat{
|
|||
},
|
||||
},
|
||||
// VAL0 VAL1 ...\n
|
||||
fileFormat{
|
||||
{
|
||||
name: "Space separated values",
|
||||
pattern: "^(" + valuePattern + " )+\n$",
|
||||
parser: func(measurement string, fields map[string]interface{}, b []byte) {
|
||||
|
@ -211,7 +211,7 @@ var fileFormats = [...]fileFormat{
|
|||
// KEY0 VAL0\n
|
||||
// KEY1 VAL1\n
|
||||
// ...
|
||||
fileFormat{
|
||||
{
|
||||
name: "New line separated key-space-value's",
|
||||
pattern: "^(" + keyPattern + " " + valuePattern + "\n)+$",
|
||||
parser: func(measurement string, fields map[string]interface{}, b []byte) {
|
||||
|
|
|
@ -18,7 +18,7 @@ func (m *mockGatherCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsI
|
|||
Namespace: params.Namespace,
|
||||
MetricName: aws.String("Latency"),
|
||||
Dimensions: []*cloudwatch.Dimension{
|
||||
&cloudwatch.Dimension{
|
||||
{
|
||||
Name: aws.String("LoadBalancerName"),
|
||||
Value: aws.String("p-example"),
|
||||
},
|
||||
|
@ -100,7 +100,7 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListM
|
|||
Namespace: aws.String("AWS/ELB"),
|
||||
MetricName: aws.String(m),
|
||||
Dimensions: []*cloudwatch.Dimension{
|
||||
&cloudwatch.Dimension{
|
||||
{
|
||||
Name: aws.String("LoadBalancerName"),
|
||||
Value: aws.String(lb),
|
||||
},
|
||||
|
@ -112,11 +112,11 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListM
|
|||
Namespace: aws.String("AWS/ELB"),
|
||||
MetricName: aws.String(m),
|
||||
Dimensions: []*cloudwatch.Dimension{
|
||||
&cloudwatch.Dimension{
|
||||
{
|
||||
Name: aws.String("LoadBalancerName"),
|
||||
Value: aws.String(lb),
|
||||
},
|
||||
&cloudwatch.Dimension{
|
||||
{
|
||||
Name: aws.String("AvailabilityZone"),
|
||||
Value: aws.String(az),
|
||||
},
|
||||
|
@ -148,14 +148,14 @@ func TestSelectMetrics(t *testing.T) {
|
|||
Period: internalDuration,
|
||||
RateLimit: 200,
|
||||
Metrics: []*Metric{
|
||||
&Metric{
|
||||
{
|
||||
MetricNames: []string{"Latency", "RequestCount"},
|
||||
Dimensions: []*Dimension{
|
||||
&Dimension{
|
||||
{
|
||||
Name: "LoadBalancerName",
|
||||
Value: "*",
|
||||
},
|
||||
&Dimension{
|
||||
{
|
||||
Name: "AvailabilityZone",
|
||||
Value: "*",
|
||||
},
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
)
|
||||
|
||||
var sampleChecks = []*api.HealthCheck{
|
||||
&api.HealthCheck{
|
||||
{
|
||||
Node: "localhost",
|
||||
CheckID: "foo.health123",
|
||||
Name: "foo.health",
|
||||
|
|
|
@ -163,7 +163,7 @@ func TestCPUCountIncrease(t *testing.T) {
|
|||
|
||||
mps.On("CPUTimes").Return(
|
||||
[]cpu.TimesStat{
|
||||
cpu.TimesStat{
|
||||
{
|
||||
CPU: "cpu0",
|
||||
},
|
||||
}, nil)
|
||||
|
@ -173,10 +173,10 @@ func TestCPUCountIncrease(t *testing.T) {
|
|||
|
||||
mps2.On("CPUTimes").Return(
|
||||
[]cpu.TimesStat{
|
||||
cpu.TimesStat{
|
||||
{
|
||||
CPU: "cpu0",
|
||||
},
|
||||
cpu.TimesStat{
|
||||
{
|
||||
CPU: "cpu1",
|
||||
},
|
||||
}, nil)
|
||||
|
|
|
@ -115,8 +115,8 @@ func TestGetSummary(t *testing.T) {
|
|||
expectedValue: &Summary{
|
||||
Cluster: "a",
|
||||
Slaves: []Slave{
|
||||
Slave{ID: "a"},
|
||||
Slave{ID: "b"},
|
||||
{ID: "a"},
|
||||
{ID: "b"},
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
|
|
|
@ -385,8 +385,8 @@ func TestGatherFilterNode(t *testing.T) {
|
|||
return &Summary{
|
||||
Cluster: "a",
|
||||
Slaves: []Slave{
|
||||
Slave{ID: "x"},
|
||||
Slave{ID: "y"},
|
||||
{ID: "x"},
|
||||
{ID: "y"},
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
|
|
|
@ -138,7 +138,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) {
|
|||
},
|
||||
},
|
||||
usageStats: []*disk.UsageStat{
|
||||
&disk.UsageStat{
|
||||
{
|
||||
Path: "/",
|
||||
Total: 42,
|
||||
},
|
||||
|
@ -170,7 +170,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) {
|
|||
},
|
||||
},
|
||||
usageStats: []*disk.UsageStat{
|
||||
&disk.UsageStat{
|
||||
{
|
||||
Path: "/hostfs/var",
|
||||
Total: 42,
|
||||
},
|
||||
|
@ -203,7 +203,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) {
|
|||
},
|
||||
},
|
||||
usageStats: []*disk.UsageStat{
|
||||
&disk.UsageStat{
|
||||
{
|
||||
Path: "/hostfs",
|
||||
Total: 42,
|
||||
},
|
||||
|
|
|
@ -30,7 +30,7 @@ func TestDiskIO(t *testing.T) {
|
|||
name: "minimal",
|
||||
result: Result{
|
||||
stats: map[string]disk.IOCountersStat{
|
||||
"sda": disk.IOCountersStat{
|
||||
"sda": {
|
||||
ReadCount: 888,
|
||||
WriteCount: 5341,
|
||||
ReadBytes: 100000,
|
||||
|
@ -46,7 +46,7 @@ func TestDiskIO(t *testing.T) {
|
|||
},
|
||||
err: nil,
|
||||
metrics: []Metric{
|
||||
Metric{
|
||||
{
|
||||
tags: map[string]string{
|
||||
"name": "sda",
|
||||
"serial": "ab-123-ad",
|
||||
|
@ -70,11 +70,11 @@ func TestDiskIO(t *testing.T) {
|
|||
devices: []string{"sd*"},
|
||||
result: Result{
|
||||
stats: map[string]disk.IOCountersStat{
|
||||
"sda": disk.IOCountersStat{
|
||||
"sda": {
|
||||
Name: "sda",
|
||||
ReadCount: 42,
|
||||
},
|
||||
"vda": disk.IOCountersStat{
|
||||
"vda": {
|
||||
Name: "vda",
|
||||
ReadCount: 42,
|
||||
},
|
||||
|
@ -83,7 +83,7 @@ func TestDiskIO(t *testing.T) {
|
|||
},
|
||||
err: nil,
|
||||
metrics: []Metric{
|
||||
Metric{
|
||||
{
|
||||
tags: map[string]string{
|
||||
"name": "sda",
|
||||
"serial": "unknown",
|
||||
|
|
|
@ -678,35 +678,35 @@ func TestContainerStateFilter(t *testing.T) {
|
|||
{
|
||||
name: "default",
|
||||
expected: map[string][]string{
|
||||
"status": []string{"running"},
|
||||
"status": {"running"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "include running",
|
||||
include: []string{"running"},
|
||||
expected: map[string][]string{
|
||||
"status": []string{"running"},
|
||||
"status": {"running"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "include glob",
|
||||
include: []string{"r*"},
|
||||
expected: map[string][]string{
|
||||
"status": []string{"restarting", "running", "removing"},
|
||||
"status": {"restarting", "running", "removing"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "include all",
|
||||
include: []string{"*"},
|
||||
expected: map[string][]string{
|
||||
"status": []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"},
|
||||
"status": {"created", "restarting", "running", "removing", "paused", "exited", "dead"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "exclude all",
|
||||
exclude: []string{"*"},
|
||||
expected: map[string][]string{
|
||||
"status": []string{},
|
||||
"status": {},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -714,7 +714,7 @@ func TestContainerStateFilter(t *testing.T) {
|
|||
include: []string{"*"},
|
||||
exclude: []string{"exited"},
|
||||
expected: map[string][]string{
|
||||
"status": []string{"created", "restarting", "running", "removing", "paused", "dead"},
|
||||
"status": {"created", "restarting", "running", "removing", "paused", "dead"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ var info = types.Info{
|
|||
}
|
||||
|
||||
var containerList = []types.Container{
|
||||
types.Container{
|
||||
{
|
||||
ID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb",
|
||||
Names: []string{"/etcd"},
|
||||
Image: "quay.io/coreos/etcd:v2.2.2",
|
||||
|
@ -68,22 +68,22 @@ var containerList = []types.Container{
|
|||
Created: 1455941930,
|
||||
Status: "Up 4 hours",
|
||||
Ports: []types.Port{
|
||||
types.Port{
|
||||
{
|
||||
PrivatePort: 7001,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
{
|
||||
PrivatePort: 4001,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
{
|
||||
PrivatePort: 2380,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
{
|
||||
PrivatePort: 2379,
|
||||
PublicPort: 2379,
|
||||
Type: "tcp",
|
||||
|
@ -97,7 +97,7 @@ var containerList = []types.Container{
|
|||
SizeRw: 0,
|
||||
SizeRootFs: 0,
|
||||
},
|
||||
types.Container{
|
||||
{
|
||||
ID: "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
||||
Names: []string{"/etcd2"},
|
||||
Image: "quay.io:4443/coreos/etcd:v2.2.2",
|
||||
|
@ -105,22 +105,22 @@ var containerList = []types.Container{
|
|||
Created: 1455941933,
|
||||
Status: "Up 4 hours",
|
||||
Ports: []types.Port{
|
||||
types.Port{
|
||||
{
|
||||
PrivatePort: 7002,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
{
|
||||
PrivatePort: 4002,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
{
|
||||
PrivatePort: 2381,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
{
|
||||
PrivatePort: 2382,
|
||||
PublicPort: 2382,
|
||||
Type: "tcp",
|
||||
|
@ -134,15 +134,15 @@ var containerList = []types.Container{
|
|||
SizeRw: 0,
|
||||
SizeRootFs: 0,
|
||||
},
|
||||
types.Container{
|
||||
{
|
||||
ID: "e8a713dd90604f5a257b97c15945e047ab60ed5b2c4397c5a6b5bf40e1bd2791",
|
||||
Names: []string{"/acme"},
|
||||
},
|
||||
types.Container{
|
||||
{
|
||||
ID: "9bc6faf9ba8106fae32e8faafd38a1dd6f6d262bec172398cc10bc03c0d6841a",
|
||||
Names: []string{"/acme-test"},
|
||||
},
|
||||
types.Container{
|
||||
{
|
||||
ID: "d4ccced494a1d5fe8ebdb0a86335a0dab069319912221e5838a132ab18a8bc84",
|
||||
Names: []string{"/foo"},
|
||||
},
|
||||
|
@ -150,7 +150,7 @@ var containerList = []types.Container{
|
|||
|
||||
var two = uint64(2)
|
||||
var ServiceList = []swarm.Service{
|
||||
swarm.Service{
|
||||
{
|
||||
ID: "qolkls9g5iasdiuihcyz9rnx2",
|
||||
Spec: swarm.ServiceSpec{
|
||||
Annotations: swarm.Annotations{
|
||||
|
@ -163,7 +163,7 @@ var ServiceList = []swarm.Service{
|
|||
},
|
||||
},
|
||||
},
|
||||
swarm.Service{
|
||||
{
|
||||
ID: "qolkls9g5iasdiuihcyz9rn3",
|
||||
Spec: swarm.ServiceSpec{
|
||||
Annotations: swarm.Annotations{
|
||||
|
@ -177,7 +177,7 @@ var ServiceList = []swarm.Service{
|
|||
}
|
||||
|
||||
var TaskList = []swarm.Task{
|
||||
swarm.Task{
|
||||
{
|
||||
ID: "kwh0lv7hwwbh",
|
||||
ServiceID: "qolkls9g5iasdiuihcyz9rnx2",
|
||||
NodeID: "0cl4jturcyd1ks3fwpd010kor",
|
||||
|
@ -186,7 +186,7 @@ var TaskList = []swarm.Task{
|
|||
},
|
||||
DesiredState: "running",
|
||||
},
|
||||
swarm.Task{
|
||||
{
|
||||
ID: "u78m5ojbivc3",
|
||||
ServiceID: "qolkls9g5iasdiuihcyz9rnx2",
|
||||
NodeID: "0cl4jturcyd1ks3fwpd010kor",
|
||||
|
@ -195,7 +195,7 @@ var TaskList = []swarm.Task{
|
|||
},
|
||||
DesiredState: "running",
|
||||
},
|
||||
swarm.Task{
|
||||
{
|
||||
ID: "1n1uilkhr98l",
|
||||
ServiceID: "qolkls9g5iasdiuihcyz9rn3",
|
||||
NodeID: "0cl4jturcyd1ks3fwpd010kor",
|
||||
|
@ -207,13 +207,13 @@ var TaskList = []swarm.Task{
|
|||
}
|
||||
|
||||
var NodeList = []swarm.Node{
|
||||
swarm.Node{
|
||||
{
|
||||
ID: "0cl4jturcyd1ks3fwpd010kor",
|
||||
Status: swarm.NodeStatus{
|
||||
State: "ready",
|
||||
},
|
||||
},
|
||||
swarm.Node{
|
||||
{
|
||||
ID: "0cl4jturcyd1ks3fwpd010kor",
|
||||
Status: swarm.NodeStatus{
|
||||
State: "ready",
|
||||
|
|
|
@ -135,7 +135,7 @@ func (c *mockHTTPClient) HTTPClient() *http.Client {
|
|||
// *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client
|
||||
func genMockGrayLog(response string, statusCode int) []*GrayLog {
|
||||
return []*GrayLog{
|
||||
&GrayLog{
|
||||
{
|
||||
client: &mockHTTPClient{responseBody: response, statusCode: statusCode},
|
||||
Servers: []string{
|
||||
"http://localhost:12900/system/metrics/multiple",
|
||||
|
|
|
@ -14,13 +14,13 @@ type mockFetcher struct {
|
|||
|
||||
func (h *mockFetcher) Fetch(address string) ([]hddtemp.Disk, error) {
|
||||
return []hddtemp.Disk{
|
||||
hddtemp.Disk{
|
||||
{
|
||||
DeviceName: "Disk1",
|
||||
Model: "Model1",
|
||||
Temperature: 13,
|
||||
Unit: "C",
|
||||
},
|
||||
hddtemp.Disk{
|
||||
{
|
||||
DeviceName: "Disk2",
|
||||
Model: "Model2",
|
||||
Temperature: 14,
|
||||
|
|
|
@ -163,7 +163,7 @@ func (c *mockHTTPClient) HTTPClient() *http.Client {
|
|||
// *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client
|
||||
func genMockHttpJson(response string, statusCode int) []*HttpJson {
|
||||
return []*HttpJson{
|
||||
&HttpJson{
|
||||
{
|
||||
client: &mockHTTPClient{responseBody: response, statusCode: statusCode},
|
||||
Servers: []string{
|
||||
"http://server1.example.com/metrics/",
|
||||
|
@ -180,7 +180,7 @@ func genMockHttpJson(response string, statusCode int) []*HttpJson {
|
|||
"apiVersion": "v1",
|
||||
},
|
||||
},
|
||||
&HttpJson{
|
||||
{
|
||||
client: &mockHTTPClient{responseBody: response, statusCode: statusCode},
|
||||
Servers: []string{
|
||||
"http://server3.example.com/metrics/",
|
||||
|
|
|
@ -60,7 +60,7 @@ scan:
|
|||
}
|
||||
irqid := strings.TrimRight(fields[0], ":")
|
||||
irq := NewIRQ(irqid)
|
||||
irqvals := fields[1:len(fields)]
|
||||
irqvals := fields[1:]
|
||||
for i := 0; i < cpucount; i++ {
|
||||
if i < len(irqvals) {
|
||||
irqval, err := strconv.ParseInt(irqvals[i], 10, 64)
|
||||
|
|
|
@ -19,31 +19,31 @@ NET_RX: 867028 225
|
|||
TASKLET: 205 0`
|
||||
f := bytes.NewBufferString(interruptStr)
|
||||
parsed := []IRQ{
|
||||
IRQ{
|
||||
{
|
||||
ID: "0", Type: "IO-APIC-edge", Device: "timer",
|
||||
Cpus: []int64{int64(134), int64(0)}, Total: int64(134),
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "1", Type: "IO-APIC-edge", Device: "i8042",
|
||||
Cpus: []int64{int64(7), int64(3)}, Total: int64(10),
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "NMI", Type: "Non-maskable interrupts",
|
||||
Cpus: []int64{int64(0), int64(0)}, Total: int64(0),
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "LOC", Type: "Local timer interrupts",
|
||||
Cpus: []int64{int64(2338608687), int64(2334309625)},
|
||||
Total: int64(4672918312),
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "MIS", Cpus: []int64{int64(0)}, Total: int64(0),
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "NET_RX", Cpus: []int64{int64(867028), int64(225)},
|
||||
Total: int64(867253),
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "TASKLET", Cpus: []int64{int64(205), int64(0)},
|
||||
Total: int64(205),
|
||||
},
|
||||
|
@ -88,91 +88,91 @@ func TestParseInterruptsBad(t *testing.T) {
|
|||
IPI6: 0 0 0 0 completion interrupts`
|
||||
f := bytes.NewBufferString(interruptStr)
|
||||
parsed := []IRQ{
|
||||
IRQ{
|
||||
{
|
||||
ID: "16", Type: "bcm2836-timer", Device: "0 Edge arch_timer",
|
||||
Cpus: []int64{0, 0, 0, 0},
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "17", Type: "bcm2836-timer", Device: "1 Edge arch_timer",
|
||||
Cpus: []int64{127224250, 118424219, 127224437, 117885416}, Total: 490758322,
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "21", Type: "bcm2836-pmu", Device: "9 Edge arm-pmu",
|
||||
Cpus: []int64{0, 0, 0, 0},
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "23", Type: "ARMCTRL-level", Device: "1 Edge 3f00b880.mailbox",
|
||||
Cpus: []int64{1549514, 0, 0, 0}, Total: 1549514,
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "24", Type: "ARMCTRL-level", Device: "2 Edge VCHIQ doorbell",
|
||||
Cpus: []int64{2, 0, 0, 0}, Total: 2,
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "46", Type: "ARMCTRL-level", Device: "48 Edge bcm2708_fb dma",
|
||||
Cpus: []int64{0, 0, 0, 0},
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "48", Type: "ARMCTRL-level", Device: "50 Edge DMA IRQ",
|
||||
Cpus: []int64{0, 0, 0, 0},
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "50", Type: "ARMCTRL-level", Device: "52 Edge DMA IRQ",
|
||||
Cpus: []int64{0, 0, 0, 0},
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "51", Type: "ARMCTRL-level", Device: "53 Edge DMA IRQ",
|
||||
Cpus: []int64{208, 0, 0, 0}, Total: 208,
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "54", Type: "ARMCTRL-level", Device: "56 Edge DMA IRQ",
|
||||
Cpus: []int64{883002, 0, 0, 0}, Total: 883002,
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "59", Type: "ARMCTRL-level", Device: "61 Edge bcm2835-auxirq",
|
||||
Cpus: []int64{0, 0, 0, 0},
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "62", Type: "ARMCTRL-level", Device: "64 Edge dwc_otg, dwc_otg_pcd, dwc_otg_hcd:usb1",
|
||||
Cpus: []int64{521451447, 0, 0, 0}, Total: 521451447,
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "86", Type: "ARMCTRL-level", Device: "88 Edge mmc0",
|
||||
Cpus: []int64{857597, 0, 0, 0}, Total: 857597,
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "87", Type: "ARMCTRL-level", Device: "89 Edge uart-pl011",
|
||||
Cpus: []int64{4938, 0, 0, 0}, Total: 4938,
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "92", Type: "ARMCTRL-level", Device: "94 Edge mmc1",
|
||||
Cpus: []int64{5669, 0, 0, 0}, Total: 5669,
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "IPI0", Type: "CPU wakeup interrupts",
|
||||
Cpus: []int64{0, 0, 0, 0},
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "IPI1", Type: "Timer broadcast interrupts",
|
||||
Cpus: []int64{0, 0, 0, 0},
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "IPI2", Type: "Rescheduling interrupts",
|
||||
Cpus: []int64{23564958, 23464876, 23531165, 23040826}, Total: 93601825,
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "IPI3", Type: "Function call interrupts",
|
||||
Cpus: []int64{148438, 639704, 644266, 588150}, Total: 2020558,
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "IPI4", Type: "CPU stop interrupts",
|
||||
Cpus: []int64{0, 0, 0, 0},
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "IPI5", Type: "IRQ work interrupts",
|
||||
Cpus: []int64{4348149, 1843985, 3819457, 1822877}, Total: 11834468,
|
||||
},
|
||||
IRQ{
|
||||
{
|
||||
ID: "IPI6", Type: "completion interrupts",
|
||||
Cpus: []int64{0, 0, 0, 0},
|
||||
},
|
||||
|
|
|
@ -28,7 +28,7 @@ func NewConnection(server string, privilege string) *Connection {
|
|||
|
||||
if inx1 > 0 {
|
||||
security := server[0:inx1]
|
||||
connstr = server[inx1+1 : len(server)]
|
||||
connstr = server[inx1+1:]
|
||||
up := strings.SplitN(security, ":", 2)
|
||||
conn.Username = up[0]
|
||||
conn.Password = up[1]
|
||||
|
|
|
@ -50,8 +50,8 @@ func TestIpset(t *testing.T) {
|
|||
add myset 3.4.5.6 packets 3 bytes 222
|
||||
`,
|
||||
tags: []map[string]string{
|
||||
map[string]string{"set": "myset", "rule": "1.2.3.4"},
|
||||
map[string]string{"set": "myset", "rule": "3.4.5.6"},
|
||||
{"set": "myset", "rule": "1.2.3.4"},
|
||||
{"set": "myset", "rule": "3.4.5.6"},
|
||||
},
|
||||
fields: [][]map[string]interface{}{
|
||||
{map[string]interface{}{"packets_total": uint64(1328), "bytes_total": uint64(79680)}},
|
||||
|
@ -66,8 +66,8 @@ func TestIpset(t *testing.T) {
|
|||
add myset 3.4.5.6 packets 3 bytes 222 "3rd IP"
|
||||
`,
|
||||
tags: []map[string]string{
|
||||
map[string]string{"set": "myset", "rule": "1.2.3.4"},
|
||||
map[string]string{"set": "myset", "rule": "3.4.5.6"},
|
||||
{"set": "myset", "rule": "1.2.3.4"},
|
||||
{"set": "myset", "rule": "3.4.5.6"},
|
||||
},
|
||||
fields: [][]map[string]interface{}{
|
||||
{map[string]interface{}{"packets_total": uint64(1328), "bytes_total": uint64(79680)}},
|
||||
|
|
|
@ -42,7 +42,7 @@ func TestIptables_Gather(t *testing.T) {
|
|||
pkts bytes target prot opt in out source destination
|
||||
57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* foobar */
|
||||
`},
|
||||
tags: []map[string]string{map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foobar"}},
|
||||
tags: []map[string]string{{"table": "filter", "chain": "INPUT", "ruleid": "foobar"}},
|
||||
fields: [][]map[string]interface{}{
|
||||
{map[string]interface{}{"pkts": uint64(57), "bytes": uint64(4520)}},
|
||||
},
|
||||
|
@ -98,9 +98,9 @@ func TestIptables_Gather(t *testing.T) {
|
|||
`,
|
||||
},
|
||||
tags: []map[string]string{
|
||||
map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foo"},
|
||||
map[string]string{"table": "filter", "chain": "FORWARD", "ruleid": "bar"},
|
||||
map[string]string{"table": "filter", "chain": "FORWARD", "ruleid": "foobar"},
|
||||
{"table": "filter", "chain": "INPUT", "ruleid": "foo"},
|
||||
{"table": "filter", "chain": "FORWARD", "ruleid": "bar"},
|
||||
{"table": "filter", "chain": "FORWARD", "ruleid": "foobar"},
|
||||
},
|
||||
fields: [][]map[string]interface{}{
|
||||
{map[string]interface{}{"pkts": uint64(200), "bytes": uint64(4520)}},
|
||||
|
@ -118,7 +118,7 @@ func TestIptables_Gather(t *testing.T) {
|
|||
100 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:80
|
||||
`},
|
||||
tags: []map[string]string{
|
||||
map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foobar"},
|
||||
{"table": "filter", "chain": "INPUT", "ruleid": "foobar"},
|
||||
},
|
||||
fields: [][]map[string]interface{}{
|
||||
{map[string]interface{}{"pkts": uint64(57), "bytes": uint64(4520)}},
|
||||
|
@ -134,8 +134,8 @@ func TestIptables_Gather(t *testing.T) {
|
|||
0 0 CLASSIFY all -- * * 1.3.5.7 0.0.0.0/0 /* test2 */ CLASSIFY set 1:4
|
||||
`},
|
||||
tags: []map[string]string{
|
||||
map[string]string{"table": "mangle", "chain": "SHAPER", "ruleid": "test"},
|
||||
map[string]string{"table": "mangle", "chain": "SHAPER", "ruleid": "test2"},
|
||||
{"table": "mangle", "chain": "SHAPER", "ruleid": "test"},
|
||||
{"table": "mangle", "chain": "SHAPER", "ruleid": "test2"},
|
||||
},
|
||||
fields: [][]map[string]interface{}{
|
||||
{map[string]interface{}{"pkts": uint64(0), "bytes": uint64(0)}},
|
||||
|
@ -163,7 +163,7 @@ func TestIptables_Gather(t *testing.T) {
|
|||
123 456 all -- eth0 * 0.0.0.0/0 0.0.0.0/0 /* all_recv */
|
||||
`},
|
||||
tags: []map[string]string{
|
||||
map[string]string{"table": "all_recv", "chain": "accountfwd", "ruleid": "all_recv"},
|
||||
{"table": "all_recv", "chain": "accountfwd", "ruleid": "all_recv"},
|
||||
},
|
||||
fields: [][]map[string]interface{}{
|
||||
{map[string]interface{}{"pkts": uint64(123), "bytes": uint64(456)}},
|
||||
|
|
|
@ -117,7 +117,7 @@ const invalidJSON = "I don't think this is JSON"
|
|||
|
||||
const empty = ""
|
||||
|
||||
var Servers = []Server{Server{Name: "as1", Host: "127.0.0.1", Port: "8080"}}
|
||||
var Servers = []Server{{Name: "as1", Host: "127.0.0.1", Port: "8080"}}
|
||||
var HeapMetric = Metric{Name: "heap_memory_usage",
|
||||
Mbean: "java.lang:type=Memory", Attribute: "HeapMemoryUsage"}
|
||||
var UsedHeapMetric = Metric{Name: "heap_memory_usage",
|
||||
|
|
|
@ -17,7 +17,7 @@ func TestJolokia2_makeReadRequests(t *testing.T) {
|
|||
Mbean: "test:foo=bar",
|
||||
},
|
||||
expected: []ReadRequest{
|
||||
ReadRequest{
|
||||
{
|
||||
Mbean: "test:foo=bar",
|
||||
Attributes: []string{},
|
||||
},
|
||||
|
@ -29,7 +29,7 @@ func TestJolokia2_makeReadRequests(t *testing.T) {
|
|||
Paths: []string{"biz"},
|
||||
},
|
||||
expected: []ReadRequest{
|
||||
ReadRequest{
|
||||
{
|
||||
Mbean: "test:foo=bar",
|
||||
Attributes: []string{"biz"},
|
||||
},
|
||||
|
@ -41,7 +41,7 @@ func TestJolokia2_makeReadRequests(t *testing.T) {
|
|||
Paths: []string{"baz", "biz"},
|
||||
},
|
||||
expected: []ReadRequest{
|
||||
ReadRequest{
|
||||
{
|
||||
Mbean: "test:foo=bar",
|
||||
Attributes: []string{"baz", "biz"},
|
||||
},
|
||||
|
@ -53,7 +53,7 @@ func TestJolokia2_makeReadRequests(t *testing.T) {
|
|||
Paths: []string{"biz/baz"},
|
||||
},
|
||||
expected: []ReadRequest{
|
||||
ReadRequest{
|
||||
{
|
||||
Mbean: "test:foo=bar",
|
||||
Attributes: []string{"biz"},
|
||||
Path: "baz",
|
||||
|
@ -66,7 +66,7 @@ func TestJolokia2_makeReadRequests(t *testing.T) {
|
|||
Paths: []string{"biz/baz/fiz/faz"},
|
||||
},
|
||||
expected: []ReadRequest{
|
||||
ReadRequest{
|
||||
{
|
||||
Mbean: "test:foo=bar",
|
||||
Attributes: []string{"biz"},
|
||||
Path: "baz/fiz/faz",
|
||||
|
@ -79,12 +79,12 @@ func TestJolokia2_makeReadRequests(t *testing.T) {
|
|||
Paths: []string{"baz/biz", "faz/fiz"},
|
||||
},
|
||||
expected: []ReadRequest{
|
||||
ReadRequest{
|
||||
{
|
||||
Mbean: "test:foo=bar",
|
||||
Attributes: []string{"baz"},
|
||||
Path: "biz",
|
||||
},
|
||||
ReadRequest{
|
||||
{
|
||||
Mbean: "test:foo=bar",
|
||||
Attributes: []string{"faz"},
|
||||
Path: "fiz",
|
||||
|
|
|
@ -748,7 +748,7 @@ func setupPlugin(t *testing.T, conf string) telegraf.Input {
|
|||
t.Fatalf("Unable to parse config! %v", err)
|
||||
}
|
||||
|
||||
for name, _ := range table.Fields {
|
||||
for name := range table.Fields {
|
||||
object := table.Fields[name]
|
||||
switch name {
|
||||
case "jolokia2_agent":
|
||||
|
|
|
@ -42,8 +42,8 @@ type Mesos struct {
|
|||
}
|
||||
|
||||
var allMetrics = map[Role][]string{
|
||||
MASTER: []string{"resources", "master", "system", "agents", "frameworks", "tasks", "messages", "evqueue", "registrar"},
|
||||
SLAVE: []string{"resources", "agent", "system", "executors", "tasks", "messages"},
|
||||
MASTER: {"resources", "master", "system", "agents", "frameworks", "tasks", "messages", "evqueue", "registrar"},
|
||||
SLAVE: {"resources", "agent", "system", "executors", "tasks", "messages"},
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
|
|
|
@ -53,7 +53,7 @@ func TestAddNonReplStats(t *testing.T) {
|
|||
d.AddDefaultStats()
|
||||
d.flush(&acc)
|
||||
|
||||
for key, _ := range DefaultStats {
|
||||
for key := range DefaultStats {
|
||||
assert.True(t, acc.HasInt64Field("mongodb", key))
|
||||
}
|
||||
}
|
||||
|
@ -74,7 +74,7 @@ func TestAddReplStats(t *testing.T) {
|
|||
d.AddDefaultStats()
|
||||
d.flush(&acc)
|
||||
|
||||
for key, _ := range MmapStats {
|
||||
for key := range MmapStats {
|
||||
assert.True(t, acc.HasInt64Field("mongodb", key))
|
||||
}
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ func TestAddWiredTigerStats(t *testing.T) {
|
|||
d.AddDefaultStats()
|
||||
d.flush(&acc)
|
||||
|
||||
for key, _ := range WiredTigerStats {
|
||||
for key := range WiredTigerStats {
|
||||
assert.True(t, acc.HasFloatField("mongodb", key))
|
||||
}
|
||||
}
|
||||
|
@ -127,7 +127,7 @@ func TestAddShardStats(t *testing.T) {
|
|||
d.AddDefaultStats()
|
||||
d.flush(&acc)
|
||||
|
||||
for key, _ := range DefaultShardStats {
|
||||
for key := range DefaultShardStats {
|
||||
assert.True(t, acc.HasInt64Field("mongodb", key))
|
||||
}
|
||||
}
|
||||
|
@ -156,8 +156,8 @@ func TestAddShardHostStats(t *testing.T) {
|
|||
d.flush(&acc)
|
||||
|
||||
var hostsFound []string
|
||||
for host, _ := range hostStatLines {
|
||||
for key, _ := range ShardHostStats {
|
||||
for host := range hostStatLines {
|
||||
for key := range ShardHostStats {
|
||||
assert.True(t, acc.HasInt64Field("mongodb_shard_stats", key))
|
||||
}
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ func TestAddDefaultStats(t *testing.T) {
|
|||
err = server.gatherData(&acc, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
for key, _ := range DefaultStats {
|
||||
for key := range DefaultStats {
|
||||
assert.True(t, acc.HasInt64Field("mongodb", key))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ func TestNetStats(t *testing.T) {
|
|||
mps.On("NetIO").Return([]net.IOCountersStat{netio}, nil)
|
||||
|
||||
netprotos := []net.ProtoCountersStat{
|
||||
net.ProtoCountersStat{
|
||||
{
|
||||
Protocol: "Udp",
|
||||
Stats: map[string]int64{
|
||||
"InDatagrams": 4655,
|
||||
|
@ -42,16 +42,16 @@ func TestNetStats(t *testing.T) {
|
|||
mps.On("NetProto").Return(netprotos, nil)
|
||||
|
||||
netstats := []net.ConnectionStat{
|
||||
net.ConnectionStat{
|
||||
{
|
||||
Type: syscall.SOCK_DGRAM,
|
||||
},
|
||||
net.ConnectionStat{
|
||||
{
|
||||
Status: "ESTABLISHED",
|
||||
},
|
||||
net.ConnectionStat{
|
||||
{
|
||||
Status: "ESTABLISHED",
|
||||
},
|
||||
net.ConnectionStat{
|
||||
{
|
||||
Status: "CLOSE",
|
||||
},
|
||||
}
|
||||
|
|
|
@ -24,12 +24,12 @@ func TestReadsMetricsFromNSQ(t *testing.T) {
|
|||
|
||||
script := []instruction{
|
||||
// SUB
|
||||
instruction{0, nsq.FrameTypeResponse, []byte("OK")},
|
||||
{0, nsq.FrameTypeResponse, []byte("OK")},
|
||||
// IDENTIFY
|
||||
instruction{0, nsq.FrameTypeResponse, []byte("OK")},
|
||||
instruction{20 * time.Millisecond, nsq.FrameTypeMessage, frameMessage(msg)},
|
||||
{0, nsq.FrameTypeResponse, []byte("OK")},
|
||||
{20 * time.Millisecond, nsq.FrameTypeMessage, frameMessage(msg)},
|
||||
// needed to exit test
|
||||
instruction{100 * time.Millisecond, -1, []byte("exit")},
|
||||
{100 * time.Millisecond, -1, []byte("exit")},
|
||||
}
|
||||
|
||||
addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:4155")
|
||||
|
|
|
@ -18,19 +18,19 @@ var (
|
|||
measurement = "nvidia_smi"
|
||||
metrics = "fan.speed,memory.total,memory.used,memory.free,pstate,temperature.gpu,name,uuid,compute_mode,utilization.gpu,utilization.memory,index,power.draw"
|
||||
metricNames = [][]string{
|
||||
[]string{"fan_speed", "integer"},
|
||||
[]string{"memory_total", "integer"},
|
||||
[]string{"memory_used", "integer"},
|
||||
[]string{"memory_free", "integer"},
|
||||
[]string{"pstate", "tag"},
|
||||
[]string{"temperature_gpu", "integer"},
|
||||
[]string{"name", "tag"},
|
||||
[]string{"uuid", "tag"},
|
||||
[]string{"compute_mode", "tag"},
|
||||
[]string{"utilization_gpu", "integer"},
|
||||
[]string{"utilization_memory", "integer"},
|
||||
[]string{"index", "tag"},
|
||||
[]string{"power_draw", "float"},
|
||||
{"fan_speed", "integer"},
|
||||
{"memory_total", "integer"},
|
||||
{"memory_used", "integer"},
|
||||
{"memory_free", "integer"},
|
||||
{"pstate", "tag"},
|
||||
{"temperature_gpu", "integer"},
|
||||
{"name", "tag"},
|
||||
{"uuid", "tag"},
|
||||
{"compute_mode", "tag"},
|
||||
{"utilization_gpu", "integer"},
|
||||
{"utilization_memory", "integer"},
|
||||
{"index", "tag"},
|
||||
{"power_draw", "float"},
|
||||
}
|
||||
)
|
||||
|
||||
|
|
|
@ -72,11 +72,11 @@ type pfctlOutputStanza struct {
|
|||
}
|
||||
|
||||
var pfctlOutputStanzas = []*pfctlOutputStanza{
|
||||
&pfctlOutputStanza{
|
||||
{
|
||||
HeaderRE: regexp.MustCompile("^State Table"),
|
||||
ParseFunc: parseStateTable,
|
||||
},
|
||||
&pfctlOutputStanza{
|
||||
{
|
||||
HeaderRE: regexp.MustCompile("^Counters"),
|
||||
ParseFunc: parseCounterTable,
|
||||
},
|
||||
|
@ -127,10 +127,10 @@ type Entry struct {
|
|||
}
|
||||
|
||||
var StateTable = []*Entry{
|
||||
&Entry{"entries", "current entries", -1},
|
||||
&Entry{"searches", "searches", -1},
|
||||
&Entry{"inserts", "inserts", -1},
|
||||
&Entry{"removals", "removals", -1},
|
||||
{"entries", "current entries", -1},
|
||||
{"searches", "searches", -1},
|
||||
{"inserts", "inserts", -1},
|
||||
{"removals", "removals", -1},
|
||||
}
|
||||
|
||||
var stateTableRE = regexp.MustCompile(`^ (.*?)\s+(\d+)`)
|
||||
|
@ -140,21 +140,21 @@ func parseStateTable(lines []string, fields map[string]interface{}) error {
|
|||
}
|
||||
|
||||
var CounterTable = []*Entry{
|
||||
&Entry{"match", "match", -1},
|
||||
&Entry{"bad-offset", "bad-offset", -1},
|
||||
&Entry{"fragment", "fragment", -1},
|
||||
&Entry{"short", "short", -1},
|
||||
&Entry{"normalize", "normalize", -1},
|
||||
&Entry{"memory", "memory", -1},
|
||||
&Entry{"bad-timestamp", "bad-timestamp", -1},
|
||||
&Entry{"congestion", "congestion", -1},
|
||||
&Entry{"ip-option", "ip-option", -1},
|
||||
&Entry{"proto-cksum", "proto-cksum", -1},
|
||||
&Entry{"state-mismatch", "state-mismatch", -1},
|
||||
&Entry{"state-insert", "state-insert", -1},
|
||||
&Entry{"state-limit", "state-limit", -1},
|
||||
&Entry{"src-limit", "src-limit", -1},
|
||||
&Entry{"synproxy", "synproxy", -1},
|
||||
{"match", "match", -1},
|
||||
{"bad-offset", "bad-offset", -1},
|
||||
{"fragment", "fragment", -1},
|
||||
{"short", "short", -1},
|
||||
{"normalize", "normalize", -1},
|
||||
{"memory", "memory", -1},
|
||||
{"bad-timestamp", "bad-timestamp", -1},
|
||||
{"congestion", "congestion", -1},
|
||||
{"ip-option", "ip-option", -1},
|
||||
{"proto-cksum", "proto-cksum", -1},
|
||||
{"state-mismatch", "state-mismatch", -1},
|
||||
{"state-insert", "state-insert", -1},
|
||||
{"state-limit", "state-limit", -1},
|
||||
{"src-limit", "src-limit", -1},
|
||||
{"synproxy", "synproxy", -1},
|
||||
}
|
||||
|
||||
var counterTableRE = regexp.MustCompile(`^ (.*?)\s+(\d+)`)
|
||||
|
|
|
@ -23,13 +23,13 @@ func TestPfctlInvocation(t *testing.T) {
|
|||
|
||||
var testCases = []pfctlInvocationTestCase{
|
||||
// 0: no sudo
|
||||
pfctlInvocationTestCase{
|
||||
{
|
||||
config: PF{UseSudo: false},
|
||||
cmd: "fakepfctl",
|
||||
args: []string{"-s", "info"},
|
||||
},
|
||||
// 1: with sudo
|
||||
pfctlInvocationTestCase{
|
||||
{
|
||||
config: PF{UseSudo: true},
|
||||
cmd: "fakesudo",
|
||||
args: []string{"fakepfctl", "-s", "info"},
|
||||
|
@ -60,9 +60,9 @@ func TestPfMeasurements(t *testing.T) {
|
|||
|
||||
testCases := []pfTestCase{
|
||||
// 0: nil input should raise an error
|
||||
pfTestCase{TestInput: "", err: errParseHeader},
|
||||
{TestInput: "", err: errParseHeader},
|
||||
// 1: changes to pfctl output should raise an error
|
||||
pfTestCase{TestInput: `Status: Enabled for 161 days 21:24:45 Debug: Urgent
|
||||
{TestInput: `Status: Enabled for 161 days 21:24:45 Debug: Urgent
|
||||
|
||||
Interface Stats for re1 IPv4 IPv6
|
||||
Bytes In 2585823744614 1059233657221
|
||||
|
@ -99,7 +99,7 @@ Counters
|
|||
err: errMissingData("current entries"),
|
||||
},
|
||||
// 2: bad numbers should raise an error
|
||||
pfTestCase{TestInput: `Status: Enabled for 0 days 00:26:05 Debug: Urgent
|
||||
{TestInput: `Status: Enabled for 0 days 00:26:05 Debug: Urgent
|
||||
|
||||
State Table Total Rate
|
||||
current entries -23
|
||||
|
@ -125,7 +125,7 @@ Counters
|
|||
`,
|
||||
err: errMissingData("current entries"),
|
||||
},
|
||||
pfTestCase{TestInput: `Status: Enabled for 0 days 00:26:05 Debug: Urgent
|
||||
{TestInput: `Status: Enabled for 0 days 00:26:05 Debug: Urgent
|
||||
|
||||
State Table Total Rate
|
||||
current entries 2
|
||||
|
@ -150,7 +150,7 @@ Counters
|
|||
synproxy 0 0.0/s
|
||||
`,
|
||||
measurements: []measurementResult{
|
||||
measurementResult{
|
||||
{
|
||||
fields: map[string]interface{}{
|
||||
"entries": int64(2),
|
||||
"searches": int64(11325),
|
||||
|
@ -175,7 +175,7 @@ Counters
|
|||
},
|
||||
},
|
||||
},
|
||||
pfTestCase{TestInput: `Status: Enabled for 161 days 21:24:45 Debug: Urgent
|
||||
{TestInput: `Status: Enabled for 161 days 21:24:45 Debug: Urgent
|
||||
|
||||
Interface Stats for re1 IPv4 IPv6
|
||||
Bytes In 2585823744614 1059233657221
|
||||
|
@ -210,7 +210,7 @@ Counters
|
|||
synproxy 0 0.0/s
|
||||
`,
|
||||
measurements: []measurementResult{
|
||||
measurementResult{
|
||||
{
|
||||
fields: map[string]interface{}{
|
||||
"entries": int64(649),
|
||||
"searches": int64(18421725761),
|
||||
|
|
|
@ -61,26 +61,26 @@ func init() {
|
|||
|
||||
// BEGIN GO GENERATE CONTENT
|
||||
var mockedCommandResults = map[string]mockedCommandResult{
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0": mockedCommandResult{stdout: "TEST::testTable\ntestTable OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 0 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.1.1": mockedCommandResult{stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.1.2": mockedCommandResult{stdout: "TEST::1.2\nanonymous#1 OBJECT-TYPE\n -- FROM\tTEST\n::= { iso(1) 0 testOID(0) 1 2 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x001.0.0.1.1": mockedCommandResult{stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1": mockedCommandResult{stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1.0": mockedCommandResult{stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.5": mockedCommandResult{stdout: "TEST::testTableEntry.5\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 5 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.2.3": mockedCommandResult{stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00.iso.2.3": mockedCommandResult{stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.999": mockedCommandResult{stdout: ".999\n [TRUNCATED]\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00TEST::server": mockedCommandResult{stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00TEST::server.0": mockedCommandResult{stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00TEST::testTable": mockedCommandResult{stdout: "TEST::testTable\ntestTable OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 0 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00TEST::connections": mockedCommandResult{stdout: "TEST::connections\nconnections OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tINTEGER\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 2 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00TEST::latency": mockedCommandResult{stdout: "TEST::latency\nlatency OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 3 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00TEST::description": mockedCommandResult{stdout: "TEST::description\ndescription OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 4 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00TEST::hostname": mockedCommandResult{stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00IF-MIB::ifPhysAddress.1": mockedCommandResult{stdout: "IF-MIB::ifPhysAddress.1\nifPhysAddress OBJECT-TYPE\n -- FROM\tIF-MIB\n -- TEXTUAL CONVENTION PhysAddress\n SYNTAX\tOCTET STRING\n DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"The interface's address at its protocol sub-layer. For\n example, for an 802.x interface, this object normally\n contains a MAC address. The interface's media-specific MIB\n must define the bit and byte ordering and the format of the\n value of this object. For interfaces which do not have such\n an address (e.g., a serial line), this object should contain\n an octet string of zero length.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) interfaces(2) ifTable(2) ifEntry(1) ifPhysAddress(6) 1 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00BRIDGE-MIB::dot1dTpFdbAddress.1": mockedCommandResult{stdout: "BRIDGE-MIB::dot1dTpFdbAddress.1\ndot1dTpFdbAddress OBJECT-TYPE\n -- FROM\tBRIDGE-MIB\n -- TEXTUAL CONVENTION MacAddress\n SYNTAX\tOCTET STRING (6) \n DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"A unicast MAC address for which the bridge has\n forwarding and/or filtering information.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) dot1dBridge(17) dot1dTp(4) dot1dTpFdbTable(3) dot1dTpFdbEntry(1) dot1dTpFdbAddress(1) 1 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00TCP-MIB::tcpConnectionLocalAddress.1": mockedCommandResult{stdout: "TCP-MIB::tcpConnectionLocalAddress.1\ntcpConnectionLocalAddress OBJECT-TYPE\n -- FROM\tTCP-MIB\n -- TEXTUAL CONVENTION InetAddress\n SYNTAX\tOCTET STRING (0..255) \n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n DESCRIPTION\t\"The local IP address for this TCP connection. The type\n of this address is determined by the value of\n tcpConnectionLocalAddressType.\n\n As this object is used in the index for the\n tcpConnectionTable, implementors should be\n careful not to create entries that would result in OIDs\n with more than 128 subidentifiers; otherwise the information\n cannot be accessed by using SNMPv1, SNMPv2c, or SNMPv3.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) tcp(6) tcpConnectionTable(19) tcpConnectionEntry(1) tcpConnectionLocalAddress(2) 1 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00TEST::testTable.1": mockedCommandResult{stdout: "TEST::testTableEntry\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) 1 }\n", stderr: "", exitError: false},
|
||||
"snmptable\x00-Ch\x00-Cl\x00-c\x00public\x00127.0.0.1\x00TEST::testTable": mockedCommandResult{stdout: "server connections latency description \nTEST::testTable: No entries\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0": {stdout: "TEST::testTable\ntestTable OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 0 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.1.1": {stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.1.2": {stdout: "TEST::1.2\nanonymous#1 OBJECT-TYPE\n -- FROM\tTEST\n::= { iso(1) 0 testOID(0) 1 2 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x001.0.0.1.1": {stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1": {stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1.0": {stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.5": {stdout: "TEST::testTableEntry.5\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 5 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.2.3": {stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00.iso.2.3": {stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.999": {stdout: ".999\n [TRUNCATED]\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00TEST::server": {stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00TEST::server.0": {stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00TEST::testTable": {stdout: "TEST::testTable\ntestTable OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 0 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00TEST::connections": {stdout: "TEST::connections\nconnections OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tINTEGER\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 2 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00TEST::latency": {stdout: "TEST::latency\nlatency OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 3 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00TEST::description": {stdout: "TEST::description\ndescription OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 4 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00TEST::hostname": {stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00IF-MIB::ifPhysAddress.1": {stdout: "IF-MIB::ifPhysAddress.1\nifPhysAddress OBJECT-TYPE\n -- FROM\tIF-MIB\n -- TEXTUAL CONVENTION PhysAddress\n SYNTAX\tOCTET STRING\n DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"The interface's address at its protocol sub-layer. For\n example, for an 802.x interface, this object normally\n contains a MAC address. The interface's media-specific MIB\n must define the bit and byte ordering and the format of the\n value of this object. For interfaces which do not have such\n an address (e.g., a serial line), this object should contain\n an octet string of zero length.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) interfaces(2) ifTable(2) ifEntry(1) ifPhysAddress(6) 1 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00BRIDGE-MIB::dot1dTpFdbAddress.1": {stdout: "BRIDGE-MIB::dot1dTpFdbAddress.1\ndot1dTpFdbAddress OBJECT-TYPE\n -- FROM\tBRIDGE-MIB\n -- TEXTUAL CONVENTION MacAddress\n SYNTAX\tOCTET STRING (6) \n DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"A unicast MAC address for which the bridge has\n forwarding and/or filtering information.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) dot1dBridge(17) dot1dTp(4) dot1dTpFdbTable(3) dot1dTpFdbEntry(1) dot1dTpFdbAddress(1) 1 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00-Ob\x00TCP-MIB::tcpConnectionLocalAddress.1": {stdout: "TCP-MIB::tcpConnectionLocalAddress.1\ntcpConnectionLocalAddress OBJECT-TYPE\n -- FROM\tTCP-MIB\n -- TEXTUAL CONVENTION InetAddress\n SYNTAX\tOCTET STRING (0..255) \n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n DESCRIPTION\t\"The local IP address for this TCP connection. The type\n of this address is determined by the value of\n tcpConnectionLocalAddressType.\n\n As this object is used in the index for the\n tcpConnectionTable, implementors should be\n careful not to create entries that would result in OIDs\n with more than 128 subidentifiers; otherwise the information\n cannot be accessed by using SNMPv1, SNMPv2c, or SNMPv3.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) tcp(6) tcpConnectionTable(19) tcpConnectionEntry(1) tcpConnectionLocalAddress(2) 1 }\n", stderr: "", exitError: false},
|
||||
"snmptranslate\x00-Td\x00TEST::testTable.1": {stdout: "TEST::testTableEntry\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) 1 }\n", stderr: "", exitError: false},
|
||||
"snmptable\x00-Ch\x00-Cl\x00-c\x00public\x00127.0.0.1\x00TEST::testTable": {stdout: "server connections latency description \nTEST::testTable: No entries\n", stderr: "", exitError: false},
|
||||
}
|
||||
|
|
|
@ -721,7 +721,7 @@ func TestSnmpTranslateCache_miss(t *testing.T) {
|
|||
|
||||
func TestSnmpTranslateCache_hit(t *testing.T) {
|
||||
snmpTranslateCaches = map[string]snmpTranslateCache{
|
||||
"foo": snmpTranslateCache{
|
||||
"foo": {
|
||||
mibName: "a",
|
||||
oidNum: "b",
|
||||
oidText: "c",
|
||||
|
@ -754,7 +754,7 @@ func TestSnmpTableCache_miss(t *testing.T) {
|
|||
|
||||
func TestSnmpTableCache_hit(t *testing.T) {
|
||||
snmpTableCaches = map[string]snmpTableCache{
|
||||
"foo": snmpTableCache{
|
||||
"foo": {
|
||||
mibName: "a",
|
||||
oidNum: "b",
|
||||
oidText: "c",
|
||||
|
|
|
@ -874,21 +874,21 @@ func TestParse_DataDogTags(t *testing.T) {
|
|||
}
|
||||
|
||||
testTags := map[string]map[string]string{
|
||||
"my_counter": map[string]string{
|
||||
"my_counter": {
|
||||
"host": "localhost",
|
||||
"environment": "prod",
|
||||
"endpoint": "/:tenant?/oauth/ro",
|
||||
},
|
||||
|
||||
"my_gauge": map[string]string{
|
||||
"my_gauge": {
|
||||
"live": "",
|
||||
},
|
||||
|
||||
"my_set": map[string]string{
|
||||
"my_set": {
|
||||
"host": "localhost",
|
||||
},
|
||||
|
||||
"my_timer": map[string]string{
|
||||
"my_timer": {
|
||||
"live": "",
|
||||
"host": "localhost",
|
||||
},
|
||||
|
|
|
@ -34,7 +34,7 @@ func getTestCasesForRFC5425() []testCase5425 {
|
|||
name: "1st/avg/ok",
|
||||
data: []byte(`188 <29>1 2016-02-21T04:32:57+00:00 web1 someservice 2341 2 [origin][meta sequence="14125553" service="someservice"] "GET /v1/ok HTTP/1.1" 200 145 "-" "hacheck 0.9.0" 24306 127.0.0.1:40124 575`),
|
||||
wantStrict: []testutil.Metric{
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "syslog",
|
||||
Fields: map[string]interface{}{
|
||||
"version": uint16(1),
|
||||
|
@ -58,7 +58,7 @@ func getTestCasesForRFC5425() []testCase5425 {
|
|||
},
|
||||
},
|
||||
wantBestEffort: []testutil.Metric{
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "syslog",
|
||||
Fields: map[string]interface{}{
|
||||
"version": uint16(1),
|
||||
|
@ -86,7 +86,7 @@ func getTestCasesForRFC5425() []testCase5425 {
|
|||
name: "1st/min/ok//2nd/min/ok",
|
||||
data: []byte("16 <1>2 - - - - - -17 <4>11 - - - - - -"),
|
||||
wantStrict: []testutil.Metric{
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "syslog",
|
||||
Fields: map[string]interface{}{
|
||||
"version": uint16(2),
|
||||
|
@ -99,7 +99,7 @@ func getTestCasesForRFC5425() []testCase5425 {
|
|||
},
|
||||
Time: defaultTime,
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "syslog",
|
||||
Fields: map[string]interface{}{
|
||||
"version": uint16(11),
|
||||
|
@ -114,7 +114,7 @@ func getTestCasesForRFC5425() []testCase5425 {
|
|||
},
|
||||
},
|
||||
wantBestEffort: []testutil.Metric{
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "syslog",
|
||||
Fields: map[string]interface{}{
|
||||
"version": uint16(2),
|
||||
|
@ -127,7 +127,7 @@ func getTestCasesForRFC5425() []testCase5425 {
|
|||
},
|
||||
Time: defaultTime,
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "syslog",
|
||||
Fields: map[string]interface{}{
|
||||
"version": uint16(11),
|
||||
|
@ -146,7 +146,7 @@ func getTestCasesForRFC5425() []testCase5425 {
|
|||
name: "1st/utf8/ok",
|
||||
data: []byte("23 <1>1 - - - - - - hellø"),
|
||||
wantStrict: []testutil.Metric{
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "syslog",
|
||||
Fields: map[string]interface{}{
|
||||
"version": uint16(1),
|
||||
|
@ -162,7 +162,7 @@ func getTestCasesForRFC5425() []testCase5425 {
|
|||
},
|
||||
},
|
||||
wantBestEffort: []testutil.Metric{
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "syslog",
|
||||
Fields: map[string]interface{}{
|
||||
"version": uint16(1),
|
||||
|
@ -182,7 +182,7 @@ func getTestCasesForRFC5425() []testCase5425 {
|
|||
name: "1st/nl/ok", // newline
|
||||
data: []byte("28 <1>3 - - - - - - hello\nworld"),
|
||||
wantStrict: []testutil.Metric{
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "syslog",
|
||||
Fields: map[string]interface{}{
|
||||
"version": uint16(3),
|
||||
|
@ -198,7 +198,7 @@ func getTestCasesForRFC5425() []testCase5425 {
|
|||
},
|
||||
},
|
||||
wantBestEffort: []testutil.Metric{
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "syslog",
|
||||
Fields: map[string]interface{}{
|
||||
"version": uint16(3),
|
||||
|
@ -219,7 +219,7 @@ func getTestCasesForRFC5425() []testCase5425 {
|
|||
data: []byte("16 <1>2"),
|
||||
wantStrict: nil,
|
||||
wantBestEffort: []testutil.Metric{
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "syslog",
|
||||
Fields: map[string]interface{}{
|
||||
"version": uint16(2),
|
||||
|
@ -239,7 +239,7 @@ func getTestCasesForRFC5425() []testCase5425 {
|
|||
name: "1st/min/ok",
|
||||
data: []byte("16 <1>1 - - - - - -"),
|
||||
wantStrict: []testutil.Metric{
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "syslog",
|
||||
Fields: map[string]interface{}{
|
||||
"version": uint16(1),
|
||||
|
@ -254,7 +254,7 @@ func getTestCasesForRFC5425() []testCase5425 {
|
|||
},
|
||||
},
|
||||
wantBestEffort: []testutil.Metric{
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "syslog",
|
||||
Fields: map[string]interface{}{
|
||||
"version": uint16(1),
|
||||
|
@ -274,7 +274,7 @@ func getTestCasesForRFC5425() []testCase5425 {
|
|||
data: []byte("16 <1>217 <11>1 - - - - - -"),
|
||||
wantStrict: nil,
|
||||
wantBestEffort: []testutil.Metric{
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "syslog",
|
||||
Fields: map[string]interface{}{
|
||||
"version": uint16(217),
|
||||
|
@ -299,7 +299,7 @@ func getTestCasesForRFC5425() []testCase5425 {
|
|||
name: "1st/max/ok",
|
||||
data: []byte(fmt.Sprintf("8192 <%d>%d %s %s %s %s %s - %s", maxP, maxV, maxTS, maxH, maxA, maxPID, maxMID, message7681)),
|
||||
wantStrict: []testutil.Metric{
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "syslog",
|
||||
Fields: map[string]interface{}{
|
||||
"version": maxV,
|
||||
|
@ -320,7 +320,7 @@ func getTestCasesForRFC5425() []testCase5425 {
|
|||
},
|
||||
},
|
||||
wantBestEffort: []testutil.Metric{
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "syslog",
|
||||
Fields: map[string]interface{}{
|
||||
"version": maxV,
|
||||
|
|
|
@ -111,7 +111,7 @@ func (t *Tail) tailNewFiles(fromBeginning bool) error {
|
|||
if err != nil {
|
||||
t.acc.AddError(fmt.Errorf("E! Error Glob %s failed to compile, %s", filepath, err))
|
||||
}
|
||||
for file, _ := range g.Match() {
|
||||
for file := range g.Match() {
|
||||
if _, ok := t.tailers[file]; ok {
|
||||
// we're already tailing this file
|
||||
continue
|
||||
|
|
|
@ -113,16 +113,16 @@ MEMPOOL.vbc.sz_wanted 88 . Size requested
|
|||
`
|
||||
|
||||
var parsedSmOutput = map[string]map[string]interface{}{
|
||||
"MAIN": map[string]interface{}{
|
||||
"MAIN": {
|
||||
"uptime": uint64(895),
|
||||
"cache_hit": uint64(95),
|
||||
"cache_miss": uint64(5),
|
||||
},
|
||||
"MGT": map[string]interface{}{
|
||||
"MGT": {
|
||||
"uptime": uint64(896),
|
||||
"child_start": uint64(1),
|
||||
},
|
||||
"MEMPOOL": map[string]interface{}{
|
||||
"MEMPOOL": {
|
||||
"vbc.live": uint64(0),
|
||||
"vbc.pool": uint64(10),
|
||||
"vbc.sz_wanted": uint64(88),
|
||||
|
|
|
@ -382,7 +382,7 @@ func TestNewBinaryAnnotations(t *testing.T) {
|
|||
name: "myservice",
|
||||
},
|
||||
want: []trace.BinaryAnnotation{
|
||||
trace.BinaryAnnotation{
|
||||
{
|
||||
Host: "myhost",
|
||||
ServiceName: "myservice",
|
||||
Key: "mykey",
|
||||
|
@ -424,7 +424,7 @@ func TestNewAnnotations(t *testing.T) {
|
|||
name: "myservice",
|
||||
},
|
||||
want: []trace.Annotation{
|
||||
trace.Annotation{
|
||||
{
|
||||
Host: "myhost",
|
||||
ServiceName: "myservice",
|
||||
Timestamp: time.Unix(0, 0).UTC(),
|
||||
|
|
|
@ -113,7 +113,7 @@ func TestUnmarshalThrift(t *testing.T) {
|
|||
Duration: addr(53106),
|
||||
Annotations: []*zipkincore.Annotation{},
|
||||
BinaryAnnotations: []*zipkincore.BinaryAnnotation{
|
||||
&zipkincore.BinaryAnnotation{
|
||||
{
|
||||
Key: "lc",
|
||||
AnnotationType: zipkincore.AnnotationType_STRING,
|
||||
Value: []byte("trivial"),
|
||||
|
@ -133,7 +133,7 @@ func TestUnmarshalThrift(t *testing.T) {
|
|||
Duration: addr(50410),
|
||||
Annotations: []*zipkincore.Annotation{},
|
||||
BinaryAnnotations: []*zipkincore.BinaryAnnotation{
|
||||
&zipkincore.BinaryAnnotation{
|
||||
{
|
||||
Key: "lc",
|
||||
AnnotationType: zipkincore.AnnotationType_STRING,
|
||||
Value: []byte("trivial"),
|
||||
|
@ -151,7 +151,7 @@ func TestUnmarshalThrift(t *testing.T) {
|
|||
Timestamp: addr(1498688360851318),
|
||||
Duration: addr(103680),
|
||||
Annotations: []*zipkincore.Annotation{
|
||||
&zipkincore.Annotation{
|
||||
{
|
||||
Timestamp: 1498688360851325,
|
||||
Value: "Starting child #0",
|
||||
Host: &zipkincore.Endpoint{
|
||||
|
@ -159,7 +159,7 @@ func TestUnmarshalThrift(t *testing.T) {
|
|||
ServiceName: "trivial",
|
||||
},
|
||||
},
|
||||
&zipkincore.Annotation{
|
||||
{
|
||||
Timestamp: 1498688360904545,
|
||||
Value: "Starting child #1",
|
||||
Host: &zipkincore.Endpoint{
|
||||
|
@ -167,7 +167,7 @@ func TestUnmarshalThrift(t *testing.T) {
|
|||
ServiceName: "trivial",
|
||||
},
|
||||
},
|
||||
&zipkincore.Annotation{
|
||||
{
|
||||
Timestamp: 1498688360954992,
|
||||
Value: "A Log",
|
||||
Host: &zipkincore.Endpoint{
|
||||
|
@ -177,7 +177,7 @@ func TestUnmarshalThrift(t *testing.T) {
|
|||
},
|
||||
},
|
||||
BinaryAnnotations: []*zipkincore.BinaryAnnotation{
|
||||
&zipkincore.BinaryAnnotation{
|
||||
{
|
||||
Key: "lc",
|
||||
AnnotationType: zipkincore.AnnotationType_STRING,
|
||||
Value: []byte("trivial"),
|
||||
|
|
|
@ -108,7 +108,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
|||
},
|
||||
},
|
||||
want: []testutil.Metric{
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"id": "8090652509916334619",
|
||||
|
@ -122,7 +122,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
|||
},
|
||||
Time: time.Unix(0, 1498688360851331000).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"id": "8090652509916334619",
|
||||
|
@ -139,7 +139,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
|||
},
|
||||
Time: time.Unix(0, 1498688360851331000).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"id": "103618986556047333",
|
||||
|
@ -153,7 +153,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
|||
},
|
||||
Time: time.Unix(0, 1498688360904552000).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"id": "103618986556047333",
|
||||
|
@ -170,7 +170,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
|||
},
|
||||
Time: time.Unix(0, 1498688360904552000).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"id": "22964302721410078",
|
||||
|
@ -184,7 +184,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
|||
},
|
||||
Time: time.Unix(0, 1498688360851318000).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"service_name": "trivial",
|
||||
|
@ -200,7 +200,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
|||
},
|
||||
Time: time.Unix(0, 1498688360851318000).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"service_name": "trivial",
|
||||
|
@ -216,7 +216,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
|||
},
|
||||
Time: time.Unix(0, 1498688360851318000).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"parent_id": "22964302721410078",
|
||||
|
@ -232,7 +232,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
|||
},
|
||||
Time: time.Unix(0, 1498688360851318000).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"trace_id": "2505404965370368069",
|
||||
|
@ -283,7 +283,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
|||
},
|
||||
},
|
||||
want: []testutil.Metric{
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"id": "6802735349851856000",
|
||||
|
@ -297,7 +297,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
|||
},
|
||||
Time: time.Unix(1, 0).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "cs",
|
||||
|
|
|
@ -27,7 +27,7 @@ func TestZipkinPlugin(t *testing.T) {
|
|||
datafile: "testdata/threespans.dat",
|
||||
contentType: "application/x-thrift",
|
||||
want: []testutil.Metric{
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"id": "7047c59776af8a1b",
|
||||
|
@ -41,7 +41,7 @@ func TestZipkinPlugin(t *testing.T) {
|
|||
},
|
||||
Time: time.Unix(0, 1498688360851331000).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"id": "7047c59776af8a1b",
|
||||
|
@ -58,7 +58,7 @@ func TestZipkinPlugin(t *testing.T) {
|
|||
},
|
||||
Time: time.Unix(0, 1498688360851331000).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"id": "17020eb55a8bfe5",
|
||||
|
@ -72,7 +72,7 @@ func TestZipkinPlugin(t *testing.T) {
|
|||
},
|
||||
Time: time.Unix(0, 1498688360904552000).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"id": "17020eb55a8bfe5",
|
||||
|
@ -89,7 +89,7 @@ func TestZipkinPlugin(t *testing.T) {
|
|||
},
|
||||
Time: time.Unix(0, 1498688360904552000).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"id": "5195e96239641e",
|
||||
|
@ -103,7 +103,7 @@ func TestZipkinPlugin(t *testing.T) {
|
|||
},
|
||||
Time: time.Unix(0, 1498688360851318000).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"service_name": "trivial",
|
||||
|
@ -119,7 +119,7 @@ func TestZipkinPlugin(t *testing.T) {
|
|||
},
|
||||
Time: time.Unix(0, 1498688360851318000).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"service_name": "trivial",
|
||||
|
@ -135,7 +135,7 @@ func TestZipkinPlugin(t *testing.T) {
|
|||
},
|
||||
Time: time.Unix(0, 1498688360851318000).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"parent_id": "5195e96239641e",
|
||||
|
@ -151,7 +151,7 @@ func TestZipkinPlugin(t *testing.T) {
|
|||
},
|
||||
Time: time.Unix(0, 1498688360851318000).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"trace_id": "22c4fc8ab3669045",
|
||||
|
@ -176,7 +176,7 @@ func TestZipkinPlugin(t *testing.T) {
|
|||
datafile: "testdata/distributed_trace_sample.dat",
|
||||
contentType: "application/x-thrift",
|
||||
want: []testutil.Metric{
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"id": "5e682bc21ce99c80",
|
||||
|
@ -190,7 +190,7 @@ func TestZipkinPlugin(t *testing.T) {
|
|||
},
|
||||
Time: time.Unix(0, 1433330263415871*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "cs",
|
||||
|
@ -206,7 +206,7 @@ func TestZipkinPlugin(t *testing.T) {
|
|||
},
|
||||
Time: time.Unix(0, 1433330263415871*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "cr",
|
||||
|
@ -486,7 +486,7 @@ func TestZipkinPlugin(t *testing.T) {
|
|||
},
|
||||
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "ss",
|
||||
|
@ -502,7 +502,7 @@ func TestZipkinPlugin(t *testing.T) {
|
|||
},
|
||||
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "Demo2Application",
|
||||
|
@ -519,7 +519,7 @@ func TestZipkinPlugin(t *testing.T) {
|
|||
},
|
||||
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "hi",
|
||||
|
@ -536,7 +536,7 @@ func TestZipkinPlugin(t *testing.T) {
|
|||
},
|
||||
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "192.168.0.8:test:8010",
|
||||
|
|
|
@ -402,7 +402,7 @@ func translate(m telegraf.Metric, prefix string) (*azureMonitorMetric, error) {
|
|||
Namespace: ns,
|
||||
DimensionNames: dimensionNames,
|
||||
Series: []*azureMonitorSeries{
|
||||
&azureMonitorSeries{
|
||||
{
|
||||
DimensionValues: dimensionValues,
|
||||
Min: min,
|
||||
Max: max,
|
||||
|
|
|
@ -29,7 +29,7 @@ func TestBuildDimensions(t *testing.T) {
|
|||
|
||||
tagKeys := make([]string, len(testPoint.Tags()))
|
||||
i := 0
|
||||
for k, _ := range testPoint.Tags() {
|
||||
for k := range testPoint.Tags() {
|
||||
tagKeys[i] = k
|
||||
i += 1
|
||||
}
|
||||
|
|
|
@ -165,7 +165,7 @@ func escapeObject(m map[string]interface{}) (string, error) {
|
|||
// We find all keys and sort them first because iterating a map in go is
|
||||
// randomized and we need consistent output for our unit tests.
|
||||
keys := make([]string, 0, len(m))
|
||||
for k, _ := range m {
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
|
|
@ -85,11 +85,11 @@ func TestBuildTags(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
[]*telegraf.Tag{
|
||||
&telegraf.Tag{
|
||||
{
|
||||
Key: "one",
|
||||
Value: "two",
|
||||
},
|
||||
&telegraf.Tag{
|
||||
{
|
||||
Key: "three",
|
||||
Value: "four",
|
||||
},
|
||||
|
@ -98,7 +98,7 @@ func TestBuildTags(t *testing.T) {
|
|||
},
|
||||
{
|
||||
[]*telegraf.Tag{
|
||||
&telegraf.Tag{
|
||||
{
|
||||
Key: "aaa",
|
||||
Value: "bbb",
|
||||
},
|
||||
|
|
|
@ -244,7 +244,7 @@ func TestUDP_WriteWithRealConn(t *testing.T) {
|
|||
go func() {
|
||||
defer wg.Done()
|
||||
var total int
|
||||
for _, _ = range metrics {
|
||||
for range metrics {
|
||||
n, _, err := conn.ReadFrom(buf[total:])
|
||||
if err != nil {
|
||||
break
|
||||
|
|
|
@ -154,7 +154,7 @@ func (p *PrometheusClient) Start() error {
|
|||
}
|
||||
|
||||
registry := prometheus.NewRegistry()
|
||||
for collector, _ := range defaultCollectors {
|
||||
for collector := range defaultCollectors {
|
||||
switch collector {
|
||||
case "gocollector":
|
||||
registry.Register(prometheus.NewGoCollector())
|
||||
|
@ -236,7 +236,7 @@ func (p *PrometheusClient) Expire() {
|
|||
for name, family := range p.fam {
|
||||
for key, sample := range family.Samples {
|
||||
if p.ExpirationInterval.Duration != 0 && now.After(sample.Expiration) {
|
||||
for k, _ := range sample.Labels {
|
||||
for k := range sample.Labels {
|
||||
family.LabelSet[k]--
|
||||
}
|
||||
delete(family.Samples, key)
|
||||
|
@ -323,7 +323,7 @@ func CreateSampleID(tags map[string]string) SampleID {
|
|||
|
||||
func addSample(fam *MetricFamily, sample *Sample, sampleID SampleID) {
|
||||
|
||||
for k, _ := range sample.Labels {
|
||||
for k := range sample.Labels {
|
||||
fam.LabelSet[k]++
|
||||
}
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ type testCase struct {
|
|||
|
||||
var singleMetric = testCase{
|
||||
[]api.ValueList{
|
||||
api.ValueList{
|
||||
{
|
||||
Identifier: api.Identifier{
|
||||
Host: "xyzzy",
|
||||
Plugin: "cpu",
|
||||
|
@ -48,7 +48,7 @@ var singleMetric = testCase{
|
|||
},
|
||||
},
|
||||
[]metricData{
|
||||
metricData{
|
||||
{
|
||||
"cpu_value",
|
||||
map[string]string{
|
||||
"type_instance": "user",
|
||||
|
@ -65,7 +65,7 @@ var singleMetric = testCase{
|
|||
|
||||
var multiMetric = testCase{
|
||||
[]api.ValueList{
|
||||
api.ValueList{
|
||||
{
|
||||
Identifier: api.Identifier{
|
||||
Host: "xyzzy",
|
||||
Plugin: "cpu",
|
||||
|
@ -81,7 +81,7 @@ var multiMetric = testCase{
|
|||
},
|
||||
},
|
||||
[]metricData{
|
||||
metricData{
|
||||
{
|
||||
"cpu_0",
|
||||
map[string]string{
|
||||
"type_instance": "user",
|
||||
|
@ -93,7 +93,7 @@ var multiMetric = testCase{
|
|||
"value": float64(42),
|
||||
},
|
||||
},
|
||||
metricData{
|
||||
{
|
||||
"cpu_1",
|
||||
map[string]string{
|
||||
"type_instance": "user",
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -53,7 +53,7 @@ func TestFieldConversions(t *testing.T) {
|
|||
name: "Should change existing field to lowercase",
|
||||
plugin: &Strings{
|
||||
Lowercase: []converter{
|
||||
converter{
|
||||
{
|
||||
Field: "request",
|
||||
},
|
||||
},
|
||||
|
@ -68,7 +68,7 @@ func TestFieldConversions(t *testing.T) {
|
|||
name: "Should change existing field to uppercase",
|
||||
plugin: &Strings{
|
||||
Uppercase: []converter{
|
||||
converter{
|
||||
{
|
||||
Field: "request",
|
||||
},
|
||||
},
|
||||
|
@ -83,7 +83,7 @@ func TestFieldConversions(t *testing.T) {
|
|||
name: "Should add new lowercase field",
|
||||
plugin: &Strings{
|
||||
Lowercase: []converter{
|
||||
converter{
|
||||
{
|
||||
Field: "request",
|
||||
Dest: "lowercase_request",
|
||||
},
|
||||
|
@ -103,7 +103,7 @@ func TestFieldConversions(t *testing.T) {
|
|||
name: "Should trim from both sides",
|
||||
plugin: &Strings{
|
||||
Trim: []converter{
|
||||
converter{
|
||||
{
|
||||
Field: "request",
|
||||
Cutset: "/w",
|
||||
},
|
||||
|
@ -119,13 +119,13 @@ func TestFieldConversions(t *testing.T) {
|
|||
name: "Should trim from both sides and make lowercase",
|
||||
plugin: &Strings{
|
||||
Trim: []converter{
|
||||
converter{
|
||||
{
|
||||
Field: "request",
|
||||
Cutset: "/w",
|
||||
},
|
||||
},
|
||||
Lowercase: []converter{
|
||||
converter{
|
||||
{
|
||||
Field: "request",
|
||||
},
|
||||
},
|
||||
|
@ -140,7 +140,7 @@ func TestFieldConversions(t *testing.T) {
|
|||
name: "Should trim from left side",
|
||||
plugin: &Strings{
|
||||
TrimLeft: []converter{
|
||||
converter{
|
||||
{
|
||||
Field: "request",
|
||||
Cutset: "/w",
|
||||
},
|
||||
|
@ -156,7 +156,7 @@ func TestFieldConversions(t *testing.T) {
|
|||
name: "Should trim from right side",
|
||||
plugin: &Strings{
|
||||
TrimRight: []converter{
|
||||
converter{
|
||||
{
|
||||
Field: "request",
|
||||
Cutset: "/w",
|
||||
},
|
||||
|
@ -172,7 +172,7 @@ func TestFieldConversions(t *testing.T) {
|
|||
name: "Should trim prefix '/mixed'",
|
||||
plugin: &Strings{
|
||||
TrimPrefix: []converter{
|
||||
converter{
|
||||
{
|
||||
Field: "request",
|
||||
Prefix: "/mixed",
|
||||
},
|
||||
|
@ -188,7 +188,7 @@ func TestFieldConversions(t *testing.T) {
|
|||
name: "Should trim suffix '-1D&to=now'",
|
||||
plugin: &Strings{
|
||||
TrimSuffix: []converter{
|
||||
converter{
|
||||
{
|
||||
Field: "request",
|
||||
Suffix: "-1D&to=now",
|
||||
},
|
||||
|
@ -204,7 +204,7 @@ func TestFieldConversions(t *testing.T) {
|
|||
name: "Trim without cutset removes whitespace",
|
||||
plugin: &Strings{
|
||||
Trim: []converter{
|
||||
converter{
|
||||
{
|
||||
Field: "whitespace",
|
||||
},
|
||||
},
|
||||
|
@ -219,7 +219,7 @@ func TestFieldConversions(t *testing.T) {
|
|||
name: "Trim left without cutset removes whitespace",
|
||||
plugin: &Strings{
|
||||
TrimLeft: []converter{
|
||||
converter{
|
||||
{
|
||||
Field: "whitespace",
|
||||
},
|
||||
},
|
||||
|
@ -234,7 +234,7 @@ func TestFieldConversions(t *testing.T) {
|
|||
name: "Trim right without cutset removes whitespace",
|
||||
plugin: &Strings{
|
||||
TrimRight: []converter{
|
||||
converter{
|
||||
{
|
||||
Field: "whitespace",
|
||||
},
|
||||
},
|
||||
|
@ -249,7 +249,7 @@ func TestFieldConversions(t *testing.T) {
|
|||
name: "No change if field missing",
|
||||
plugin: &Strings{
|
||||
Lowercase: []converter{
|
||||
converter{
|
||||
{
|
||||
Field: "xyzzy",
|
||||
Suffix: "-1D&to=now",
|
||||
},
|
||||
|
@ -281,7 +281,7 @@ func TestTagConversions(t *testing.T) {
|
|||
name: "Should change existing tag to lowercase",
|
||||
plugin: &Strings{
|
||||
Lowercase: []converter{
|
||||
converter{
|
||||
{
|
||||
Tag: "s-computername",
|
||||
},
|
||||
},
|
||||
|
@ -300,7 +300,7 @@ func TestTagConversions(t *testing.T) {
|
|||
name: "Should add new lowercase tag",
|
||||
plugin: &Strings{
|
||||
Lowercase: []converter{
|
||||
converter{
|
||||
{
|
||||
Tag: "s-computername",
|
||||
Dest: "s-computername_lowercase",
|
||||
},
|
||||
|
@ -324,7 +324,7 @@ func TestTagConversions(t *testing.T) {
|
|||
name: "Should add new uppercase tag",
|
||||
plugin: &Strings{
|
||||
Uppercase: []converter{
|
||||
converter{
|
||||
{
|
||||
Tag: "s-computername",
|
||||
Dest: "s-computername_uppercase",
|
||||
},
|
||||
|
@ -365,7 +365,7 @@ func TestMeasurementConversions(t *testing.T) {
|
|||
name: "lowercase measurement",
|
||||
plugin: &Strings{
|
||||
Lowercase: []converter{
|
||||
converter{
|
||||
{
|
||||
Measurement: "IIS_log",
|
||||
},
|
||||
},
|
||||
|
@ -388,19 +388,19 @@ func TestMeasurementConversions(t *testing.T) {
|
|||
func TestMultipleConversions(t *testing.T) {
|
||||
plugin := &Strings{
|
||||
Lowercase: []converter{
|
||||
converter{
|
||||
{
|
||||
Tag: "s-computername",
|
||||
},
|
||||
converter{
|
||||
{
|
||||
Field: "request",
|
||||
},
|
||||
converter{
|
||||
{
|
||||
Field: "cs-host",
|
||||
Dest: "cs-host_lowercase",
|
||||
},
|
||||
},
|
||||
Uppercase: []converter{
|
||||
converter{
|
||||
{
|
||||
Tag: "verb",
|
||||
},
|
||||
},
|
||||
|
@ -428,18 +428,18 @@ func TestMultipleConversions(t *testing.T) {
|
|||
func TestReadmeExample(t *testing.T) {
|
||||
plugin := &Strings{
|
||||
Lowercase: []converter{
|
||||
converter{
|
||||
{
|
||||
Tag: "uri_stem",
|
||||
},
|
||||
},
|
||||
TrimPrefix: []converter{
|
||||
converter{
|
||||
{
|
||||
Tag: "uri_stem",
|
||||
Prefix: "/api/",
|
||||
},
|
||||
},
|
||||
Uppercase: []converter{
|
||||
converter{
|
||||
{
|
||||
Field: "cs-host",
|
||||
Dest: "cs-host_normalised",
|
||||
},
|
||||
|
@ -492,7 +492,7 @@ func newMetric(name string) telegraf.Metric {
|
|||
func TestMeasurementReplace(t *testing.T) {
|
||||
plugin := &Strings{
|
||||
Replace: []converter{
|
||||
converter{
|
||||
{
|
||||
Old: "_",
|
||||
New: "-",
|
||||
Measurement: "*",
|
||||
|
@ -513,7 +513,7 @@ func TestMeasurementReplace(t *testing.T) {
|
|||
func TestMeasurementCharDeletion(t *testing.T) {
|
||||
plugin := &Strings{
|
||||
Replace: []converter{
|
||||
converter{
|
||||
{
|
||||
Old: "foo",
|
||||
New: "",
|
||||
Measurement: "*",
|
||||
|
|
|
@ -405,7 +405,7 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr
|
|||
}
|
||||
// Divide by the number of recorded measurements collected for every field
|
||||
noMeasurementsFound := true // Canary to check if no field with values was found, so we can return nil
|
||||
for k, _ := range mean {
|
||||
for k := range mean {
|
||||
if meanCounters[k] == 0 {
|
||||
mean[k] = 0
|
||||
continue
|
||||
|
|
|
@ -178,11 +178,11 @@ func TestTopkMeanAddAggregateFields(t *testing.T) {
|
|||
// Generate the answer
|
||||
chng := fieldList(field{"a_topk_aggregate", float64(28.044)})
|
||||
changeSet := map[int]metricChange{
|
||||
0: metricChange{newFields: chng},
|
||||
1: metricChange{newFields: chng},
|
||||
2: metricChange{newFields: chng},
|
||||
3: metricChange{newFields: chng},
|
||||
4: metricChange{newFields: chng},
|
||||
0: {newFields: chng},
|
||||
1: {newFields: chng},
|
||||
2: {newFields: chng},
|
||||
3: {newFields: chng},
|
||||
4: {newFields: chng},
|
||||
}
|
||||
answer := generateAns(input, changeSet)
|
||||
|
||||
|
@ -208,11 +208,11 @@ func TestTopkSumAddAggregateFields(t *testing.T) {
|
|||
// Generate the answer
|
||||
chng := fieldList(field{"a_topk_aggregate", float64(140.22)})
|
||||
changeSet := map[int]metricChange{
|
||||
0: metricChange{newFields: chng},
|
||||
1: metricChange{newFields: chng},
|
||||
2: metricChange{newFields: chng},
|
||||
3: metricChange{newFields: chng},
|
||||
4: metricChange{newFields: chng},
|
||||
0: {newFields: chng},
|
||||
1: {newFields: chng},
|
||||
2: {newFields: chng},
|
||||
3: {newFields: chng},
|
||||
4: {newFields: chng},
|
||||
}
|
||||
answer := generateAns(input, changeSet)
|
||||
|
||||
|
@ -238,11 +238,11 @@ func TestTopkMaxAddAggregateFields(t *testing.T) {
|
|||
// Generate the answer
|
||||
chng := fieldList(field{"a_topk_aggregate", float64(50.5)})
|
||||
changeSet := map[int]metricChange{
|
||||
0: metricChange{newFields: chng},
|
||||
1: metricChange{newFields: chng},
|
||||
2: metricChange{newFields: chng},
|
||||
3: metricChange{newFields: chng},
|
||||
4: metricChange{newFields: chng},
|
||||
0: {newFields: chng},
|
||||
1: {newFields: chng},
|
||||
2: {newFields: chng},
|
||||
3: {newFields: chng},
|
||||
4: {newFields: chng},
|
||||
}
|
||||
answer := generateAns(input, changeSet)
|
||||
|
||||
|
@ -268,11 +268,11 @@ func TestTopkMinAddAggregateFields(t *testing.T) {
|
|||
// Generate the answer
|
||||
chng := fieldList(field{"a_topk_aggregate", float64(0.3)})
|
||||
changeSet := map[int]metricChange{
|
||||
0: metricChange{newFields: chng},
|
||||
1: metricChange{newFields: chng},
|
||||
2: metricChange{newFields: chng},
|
||||
3: metricChange{newFields: chng},
|
||||
4: metricChange{newFields: chng},
|
||||
0: {newFields: chng},
|
||||
1: {newFields: chng},
|
||||
2: {newFields: chng},
|
||||
3: {newFields: chng},
|
||||
4: {newFields: chng},
|
||||
}
|
||||
answer := generateAns(input, changeSet)
|
||||
|
||||
|
@ -297,10 +297,10 @@ func TestTopkGroupby1(t *testing.T) {
|
|||
|
||||
// Generate the answer
|
||||
changeSet := map[int]metricChange{
|
||||
2: metricChange{newFields: fieldList(field{"value_topk_aggregate", float64(74.18)})},
|
||||
3: metricChange{newFields: fieldList(field{"value_topk_aggregate", float64(72)})},
|
||||
4: metricChange{newFields: fieldList(field{"value_topk_aggregate", float64(163.22)})},
|
||||
5: metricChange{newFields: fieldList(field{"value_topk_aggregate", float64(163.22)})},
|
||||
2: {newFields: fieldList(field{"value_topk_aggregate", float64(74.18)})},
|
||||
3: {newFields: fieldList(field{"value_topk_aggregate", float64(72)})},
|
||||
4: {newFields: fieldList(field{"value_topk_aggregate", float64(163.22)})},
|
||||
5: {newFields: fieldList(field{"value_topk_aggregate", float64(163.22)})},
|
||||
}
|
||||
answer := generateAns(input, changeSet)
|
||||
|
||||
|
@ -326,11 +326,11 @@ func TestTopkGroupby2(t *testing.T) {
|
|||
chng2 := fieldList(field{"value_topk_aggregate", float64(72)})
|
||||
chng3 := fieldList(field{"value_topk_aggregate", float64(81.61)})
|
||||
changeSet := map[int]metricChange{
|
||||
1: metricChange{newFields: chng1},
|
||||
2: metricChange{newFields: chng1},
|
||||
3: metricChange{newFields: chng2},
|
||||
4: metricChange{newFields: chng3},
|
||||
5: metricChange{newFields: chng3},
|
||||
1: {newFields: chng1},
|
||||
2: {newFields: chng1},
|
||||
3: {newFields: chng2},
|
||||
4: {newFields: chng3},
|
||||
5: {newFields: chng3},
|
||||
}
|
||||
answer := generateAns(input, changeSet)
|
||||
|
||||
|
@ -354,8 +354,8 @@ func TestTopkGroupby3(t *testing.T) {
|
|||
// Generate the answer
|
||||
chng := fieldList(field{"value_topk_aggregate", float64(75.3)})
|
||||
changeSet := map[int]metricChange{
|
||||
4: metricChange{newFields: chng},
|
||||
5: metricChange{newFields: chng},
|
||||
4: {newFields: chng},
|
||||
5: {newFields: chng},
|
||||
}
|
||||
answer := generateAns(input, changeSet)
|
||||
|
||||
|
@ -381,10 +381,10 @@ func TestTopkGroupbyFields1(t *testing.T) {
|
|||
|
||||
// Generate the answer
|
||||
changeSet := map[int]metricChange{
|
||||
0: metricChange{newFields: fieldList(field{"A_topk_aggregate", float64(95.36)})},
|
||||
1: metricChange{newFields: fieldList(field{"A_topk_aggregate", float64(39.01)})},
|
||||
2: metricChange{newFields: fieldList(field{"A_topk_aggregate", float64(39.01)})},
|
||||
5: metricChange{newFields: fieldList(field{"A_topk_aggregate", float64(29.45)})},
|
||||
0: {newFields: fieldList(field{"A_topk_aggregate", float64(95.36)})},
|
||||
1: {newFields: fieldList(field{"A_topk_aggregate", float64(39.01)})},
|
||||
2: {newFields: fieldList(field{"A_topk_aggregate", float64(39.01)})},
|
||||
5: {newFields: fieldList(field{"A_topk_aggregate", float64(29.45)})},
|
||||
}
|
||||
answer := generateAns(input, changeSet)
|
||||
|
||||
|
@ -409,10 +409,10 @@ func TestTopkGroupbyFields2(t *testing.T) {
|
|||
|
||||
// Generate the answer
|
||||
changeSet := map[int]metricChange{
|
||||
0: metricChange{newFields: fieldList(field{"C_topk_aggregate", float64(72.41)})},
|
||||
2: metricChange{newFields: fieldList(field{"B_topk_aggregate", float64(60.96)})},
|
||||
4: metricChange{newFields: fieldList(field{"B_topk_aggregate", float64(81.55)}, field{"C_topk_aggregate", float64(49.96)})},
|
||||
5: metricChange{newFields: fieldList(field{"C_topk_aggregate", float64(49.96)})},
|
||||
0: {newFields: fieldList(field{"C_topk_aggregate", float64(72.41)})},
|
||||
2: {newFields: fieldList(field{"B_topk_aggregate", float64(60.96)})},
|
||||
4: {newFields: fieldList(field{"B_topk_aggregate", float64(81.55)}, field{"C_topk_aggregate", float64(49.96)})},
|
||||
5: {newFields: fieldList(field{"C_topk_aggregate", float64(49.96)})},
|
||||
}
|
||||
answer := generateAns(input, changeSet)
|
||||
|
||||
|
@ -438,9 +438,9 @@ func TestTopkGroupbyMetricName1(t *testing.T) {
|
|||
// Generate the answer
|
||||
chng := fieldList(field{"value_topk_aggregate", float64(235.22000000000003)})
|
||||
changeSet := map[int]metricChange{
|
||||
3: metricChange{newFields: chng},
|
||||
4: metricChange{newFields: chng},
|
||||
5: metricChange{newFields: chng},
|
||||
3: {newFields: chng},
|
||||
4: {newFields: chng},
|
||||
5: {newFields: chng},
|
||||
}
|
||||
answer := generateAns(input, changeSet)
|
||||
|
||||
|
@ -465,10 +465,10 @@ func TestTopkGroupbyMetricName2(t *testing.T) {
|
|||
|
||||
// Generate the answer
|
||||
changeSet := map[int]metricChange{
|
||||
0: metricChange{newFields: fieldList(field{"A_topk_aggregate", float64(95.36)})},
|
||||
1: metricChange{newFields: fieldList(field{"A_topk_aggregate", float64(78.02)}, field{"value_topk_aggregate", float64(133.61)})},
|
||||
2: metricChange{newFields: fieldList(field{"A_topk_aggregate", float64(78.02)}, field{"value_topk_aggregate", float64(133.61)})},
|
||||
4: metricChange{newFields: fieldList(field{"value_topk_aggregate", float64(87.92)})},
|
||||
0: {newFields: fieldList(field{"A_topk_aggregate", float64(95.36)})},
|
||||
1: {newFields: fieldList(field{"A_topk_aggregate", float64(78.02)}, field{"value_topk_aggregate", float64(133.61)})},
|
||||
2: {newFields: fieldList(field{"A_topk_aggregate", float64(78.02)}, field{"value_topk_aggregate", float64(133.61)})},
|
||||
4: {newFields: fieldList(field{"value_topk_aggregate", float64(87.92)})},
|
||||
}
|
||||
answer := generateAns(input, changeSet)
|
||||
|
||||
|
@ -493,9 +493,9 @@ func TestTopkBottomk(t *testing.T) {
|
|||
|
||||
// Generate the answer
|
||||
changeSet := map[int]metricChange{
|
||||
0: metricChange{},
|
||||
1: metricChange{},
|
||||
3: metricChange{},
|
||||
0: {},
|
||||
1: {},
|
||||
3: {},
|
||||
}
|
||||
answer := generateAns(input, changeSet)
|
||||
|
||||
|
@ -520,10 +520,10 @@ func TestTopkGroupByKeyTag(t *testing.T) {
|
|||
|
||||
// Generate the answer
|
||||
changeSet := map[int]metricChange{
|
||||
2: metricChange{newTags: tagList(tag{"gbt", "metric1&tag1=TWO&tag3=SIX&"})},
|
||||
3: metricChange{newTags: tagList(tag{"gbt", "metric2&tag1=ONE&tag3=THREE&"})},
|
||||
4: metricChange{newTags: tagList(tag{"gbt", "metric2&tag1=TWO&tag3=SEVEN&"})},
|
||||
5: metricChange{newTags: tagList(tag{"gbt", "metric2&tag1=TWO&tag3=SEVEN&"})},
|
||||
2: {newTags: tagList(tag{"gbt", "metric1&tag1=TWO&tag3=SIX&"})},
|
||||
3: {newTags: tagList(tag{"gbt", "metric2&tag1=ONE&tag3=THREE&"})},
|
||||
4: {newTags: tagList(tag{"gbt", "metric2&tag1=TWO&tag3=SEVEN&"})},
|
||||
5: {newTags: tagList(tag{"gbt", "metric2&tag1=TWO&tag3=SEVEN&"})},
|
||||
}
|
||||
answer := generateAns(input, changeSet)
|
||||
|
||||
|
|
|
@ -237,7 +237,7 @@ func (a *Accumulator) NFields() int {
|
|||
defer a.Unlock()
|
||||
counter := 0
|
||||
for _, pt := range a.Metrics {
|
||||
for _, _ = range pt.Fields {
|
||||
for range pt.Fields {
|
||||
counter++
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue