Merge pull request #1 from kelwang/jenkins-gollar-changes
Jenkins gollar changes
This commit is contained in:
commit
8ff50e4327
|
@ -465,6 +465,12 @@
|
|||
packages = ["."]
|
||||
revision = "95032a82bc518f77982ea72343cc1ade730072f0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/kelwang/gojenkins"
|
||||
packages = ["."]
|
||||
revision = "4ea2f2d0a3e1350cf32d33b31ad175a2521425de"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kr/logfmt"
|
||||
|
@ -968,6 +974,6 @@
|
|||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "024194b983d91b9500fe97e0aa0ddb5fe725030cb51ddfb034e386cae1098370"
|
||||
inputs-digest = "ef689441bcd85892bf7b2818fd5def10c2d59112ad13cdf44512efb1f2e48c6a"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
|
@ -108,6 +108,10 @@
|
|||
name = "github.com/kballard/go-shellquote"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/kelwang/gojenkins"
|
||||
version = "=v1.0.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
version = "1.0.1"
|
||||
|
|
|
@ -42,6 +42,7 @@ import (
|
|||
_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ipset"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jenkins"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia2"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry"
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
# Jenkins Plugin
|
||||
|
||||
The jenkins plugin gathers information about the nodes and jobs running in a jenkins instance.
|
||||
|
||||
This plugin does not require a plugin on jenkins and it makes use of Jenkins API to retrieve all the information needed.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
url = "http://my-jenkins-instance:8080"
|
||||
# username = "admin"
|
||||
# password = "admin"
|
||||
## Set response_timeout
|
||||
response_timeout = "5s"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = /path/to/cafile
|
||||
# ssl_cert = /path/to/certfile
|
||||
# ssl_key = /path/to/keyfile
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Job & build filter
|
||||
# max_build_age = "1h"
|
||||
## jenkins can have unlimited layer of sub jobs
|
||||
## this config will limit the layers of pull, default value 0 means
|
||||
## unlimited pulling until no more sub jobs
|
||||
# max_subjob_depth = 0
|
||||
## in workflow-multibranch-plugin, each branch will be created as a sub job
|
||||
## this config will limit to call only the lasted branches
|
||||
## sub jobs fetch in each layer
|
||||
# empty will use default value 10
|
||||
# max_subjob_per_layer = 10
|
||||
# job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"]
|
||||
|
||||
## Node filter
|
||||
# node_exclude = [ "node1", "node2" ]
|
||||
|
||||
## Woker pool for jenkins plugin only
|
||||
# empty this field will use default value 30
|
||||
# max_connections = 30
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- jenkins_node
|
||||
- disk_available
|
||||
- temp_available
|
||||
- memory_available
|
||||
- memory_total
|
||||
- swap_available
|
||||
- swap_total
|
||||
- response_time
|
||||
|
||||
- jenkins_job
|
||||
- duration
|
||||
- result_code (0 = SUCCESS, 1 = FAILURE, 2 = NOT_BUILD, 3 = UNSTABLE, 4 = ABORTED)
|
||||
|
||||
### Tags:
|
||||
|
||||
- jenkins_node
|
||||
- arch
|
||||
- disk_path
|
||||
- temp_path
|
||||
- node_name
|
||||
- status ("online", "offline")
|
||||
|
||||
- jenkins_job
|
||||
- job_name
|
||||
- result
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ ./telegraf --config telegraf.conf --input-filter jenkins --test
|
||||
jenkins_node,arch=Linux\ (amd64),disk_path=/var/jenkins_home,temp_path=/tmp,host=myhost,node_name=master swap_total=4294963200,memory_available=586711040,memory_total=6089498624,status=online,response_time=1000i,disk_available=152392036352,temp_available=152392036352,swap_available=3503263744 1516031535000000000
|
||||
jenkins_job,host=myhost,job_name=JOB1,result=SUCCESS duration=2831i,result_code=0i 1516026630000000000
|
||||
jenkins_job,host=myhost,job_name=JOB2,result=SUCCESS duration=2285i,result_code=0i 1516027230000000000
|
||||
```
|
|
@ -0,0 +1,444 @@
|
|||
package jenkins
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/kelwang/gojenkins"
|
||||
)
|
||||
|
||||
// Jenkins plugin gathers information about the nodes and jobs running in a jenkins instance.
|
||||
type Jenkins struct {
|
||||
URL string
|
||||
Username string
|
||||
Password string
|
||||
// HTTP Timeout specified as a string - 3s, 1m, 1h
|
||||
ResponseTimeout internal.Duration
|
||||
|
||||
tls.ClientConfig
|
||||
instance *gojenkins.Jenkins
|
||||
|
||||
MaxConnections int `toml:"max_connections"`
|
||||
MaxBuildAge internal.Duration `toml:"max_build_age"`
|
||||
MaxSubJobDepth int `toml:"max_subjob_depth"`
|
||||
MaxSubJobPerLayer int `toml:"max_subjob_per_layer"`
|
||||
JobExclude []string `toml:"job_exclude"`
|
||||
jobFilter filter.Filter
|
||||
|
||||
NodeExclude []string `toml:"node_exclude"`
|
||||
nodeFilter filter.Filter
|
||||
|
||||
semaphore chan struct{}
|
||||
}
|
||||
|
||||
type byBuildNumber []gojenkins.JobBuild
|
||||
|
||||
const sampleConfig = `
|
||||
url = "http://my-jenkins-instance:8080"
|
||||
# username = "admin"
|
||||
# password = "admin"
|
||||
## Set response_timeout
|
||||
response_timeout = "5s"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = /path/to/cafile
|
||||
# ssl_cert = /path/to/certfile
|
||||
# ssl_key = /path/to/keyfile
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Job & build filter
|
||||
# max_build_age = "1h"
|
||||
## jenkins can have unlimited layer of sub jobs
|
||||
## this config will limit the layers of pull, default value 0 means
|
||||
## unlimited pulling until no more sub jobs
|
||||
# max_subjob_depth = 0
|
||||
## in workflow-multibranch-plugin, each branch will be created as a sub job
|
||||
## this config will limit to call only the lasted branches
|
||||
## sub jobs fetch in each layer
|
||||
# empty will use default value 10
|
||||
# max_subjob_per_layer = 10
|
||||
# job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"]
|
||||
|
||||
## Node filter
|
||||
# node_exclude = [ "node1", "node2" ]
|
||||
|
||||
## Woker pool for jenkins plugin only
|
||||
# empty this field will use default value 30
|
||||
# max_connections = 30
|
||||
`
|
||||
|
||||
// measurement
|
||||
const (
|
||||
measurementNode = "jenkins_node"
|
||||
measurementJob = "jenkins_job"
|
||||
)
|
||||
|
||||
func badFormatErr(url string, field interface{}, want string, fieldName string) error {
|
||||
return fmt.Errorf("error bad format[%s]: fieldName: %s, want %s, got %T", url, fieldName, want, field)
|
||||
}
|
||||
|
||||
// SampleConfig implements telegraf.Input interface
|
||||
func (j *Jenkins) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
// Description implements telegraf.Input interface
|
||||
func (j *Jenkins) Description() string {
|
||||
return "Read jobs and cluster metrics from Jenkins instances"
|
||||
}
|
||||
|
||||
// Gather implements telegraf.Input interface
|
||||
func (j *Jenkins) Gather(acc telegraf.Accumulator) error {
|
||||
if j.instance == nil {
|
||||
client, te := j.initClient()
|
||||
if te != nil {
|
||||
return te
|
||||
}
|
||||
if te = j.newInstance(client); te != nil {
|
||||
return te
|
||||
}
|
||||
}
|
||||
|
||||
j.gatherNodesData(acc)
|
||||
j.gatherJobs(acc)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *Jenkins) initClient() (*http.Client, error) {
|
||||
tlsCfg, err := j.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parse jenkins config[%s]: %v", j.URL, err)
|
||||
}
|
||||
return &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: tlsCfg,
|
||||
MaxIdleConns: j.MaxConnections,
|
||||
},
|
||||
Timeout: j.ResponseTimeout.Duration,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// seperate the client as dependency to use httptest Client for mocking
|
||||
func (j *Jenkins) newInstance(client *http.Client) error {
|
||||
// create instance
|
||||
var err error
|
||||
j.instance, err = gojenkins.CreateJenkins(client, j.URL, j.Username, j.Password).Init()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error connect jenkins instance[%s]: %v", j.URL, err)
|
||||
}
|
||||
|
||||
// init job filter
|
||||
j.jobFilter, err = filter.Compile(j.JobExclude)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error compile job filters[%s]: %v", j.URL, err)
|
||||
}
|
||||
|
||||
// init node filter
|
||||
j.nodeFilter, err = filter.Compile(j.NodeExclude)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error compile node filters[%s]: %v", j.URL, err)
|
||||
}
|
||||
|
||||
// init tcp pool with default value
|
||||
if j.MaxConnections <= 0 {
|
||||
j.MaxConnections = 30
|
||||
}
|
||||
|
||||
// default sub jobs can be acquired
|
||||
if j.MaxSubJobPerLayer <= 0 {
|
||||
j.MaxSubJobPerLayer = 10
|
||||
}
|
||||
|
||||
j.semaphore = make(chan struct{}, j.MaxConnections)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *Jenkins) gatherNodeData(node *gojenkins.Node, url string, acc telegraf.Accumulator) error {
|
||||
tags := map[string]string{}
|
||||
fields := make(map[string]interface{})
|
||||
|
||||
info := node.Raw
|
||||
|
||||
// detect the parsing error, since gojenkins lib won't do it
|
||||
if info == nil || info.DisplayName == "" {
|
||||
return fmt.Errorf("error empty node name[%s]: ", url)
|
||||
}
|
||||
|
||||
tags["node_name"] = info.DisplayName
|
||||
var ok bool
|
||||
// filter out excluded node_name
|
||||
if j.nodeFilter != nil && j.nodeFilter.Match(tags["node_name"]) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.MonitorData.Hudson_NodeMonitors_ArchitectureMonitor == nil {
|
||||
return fmt.Errorf("error empty monitor data[%s]: ", url)
|
||||
}
|
||||
tags["arch"], ok = info.MonitorData.Hudson_NodeMonitors_ArchitectureMonitor.(string)
|
||||
if !ok {
|
||||
return badFormatErr(url, info.MonitorData.Hudson_NodeMonitors_ArchitectureMonitor, "string", "hudson.node_monitors.ArchitectureMonitor")
|
||||
}
|
||||
|
||||
tags["status"] = "online"
|
||||
if info.Offline {
|
||||
tags["status"] = "offline"
|
||||
}
|
||||
fields["response_time"] = info.MonitorData.Hudson_NodeMonitors_ResponseTimeMonitor.Average
|
||||
if diskSpaceMonitor := info.MonitorData.Hudson_NodeMonitors_DiskSpaceMonitor; diskSpaceMonitor != nil {
|
||||
diskSpaceMonitorRoute := "hudson.node_monitors.DiskSpaceMonitor"
|
||||
var diskSpace map[string]interface{}
|
||||
if diskSpace, ok = diskSpaceMonitor.(map[string]interface{}); !ok {
|
||||
return badFormatErr(url, diskSpaceMonitor, "map[string]interface{}", diskSpaceMonitorRoute)
|
||||
}
|
||||
if tags["disk_path"], ok = diskSpace["path"].(string); !ok {
|
||||
return badFormatErr(url, diskSpace["path"], "string", diskSpaceMonitorRoute+".path")
|
||||
}
|
||||
if fields["disk_available"], ok = diskSpace["size"].(float64); !ok {
|
||||
return badFormatErr(url, diskSpace["size"], "float64", diskSpaceMonitorRoute+".size")
|
||||
}
|
||||
}
|
||||
|
||||
if tempSpaceMonitor := info.MonitorData.Hudson_NodeMonitors_TemporarySpaceMonitor; tempSpaceMonitor != nil {
|
||||
tempSpaceMonitorRoute := "hudson.node_monitors.TemporarySpaceMonitor"
|
||||
var tempSpace map[string]interface{}
|
||||
if tempSpace, ok = tempSpaceMonitor.(map[string]interface{}); !ok {
|
||||
return badFormatErr(url, tempSpaceMonitor, "map[string]interface{}", tempSpaceMonitorRoute)
|
||||
}
|
||||
if tags["temp_path"], ok = tempSpace["path"].(string); !ok {
|
||||
return badFormatErr(url, tempSpace["path"], "string", tempSpaceMonitorRoute+".path")
|
||||
}
|
||||
if fields["temp_available"], ok = tempSpace["size"].(float64); !ok {
|
||||
return badFormatErr(url, tempSpace["size"], "float64", tempSpaceMonitorRoute+".size")
|
||||
}
|
||||
}
|
||||
|
||||
if swapSpaceMonitor := info.MonitorData.Hudson_NodeMonitors_SwapSpaceMonitor; swapSpaceMonitor != nil {
|
||||
swapSpaceMonitorRouter := "hudson.node_monitors.SwapSpaceMonitor"
|
||||
var swapSpace map[string]interface{}
|
||||
if swapSpace, ok = swapSpaceMonitor.(map[string]interface{}); !ok {
|
||||
return badFormatErr(url, swapSpaceMonitor, "map[string]interface{}", swapSpaceMonitorRouter)
|
||||
}
|
||||
if fields["swap_available"], ok = swapSpace["availableSwapSpace"].(float64); !ok {
|
||||
return badFormatErr(url, swapSpace["availableSwapSpace"], "float64", swapSpaceMonitorRouter+".availableSwapSpace")
|
||||
}
|
||||
if fields["swap_total"], ok = swapSpace["totalSwapSpace"].(float64); !ok {
|
||||
return badFormatErr(url, swapSpace["totalSwapSpace"], "float64", swapSpaceMonitorRouter+".totalSwapSpace")
|
||||
}
|
||||
if fields["memory_available"], ok = swapSpace["availablePhysicalMemory"].(float64); !ok {
|
||||
return badFormatErr(url, swapSpace["availablePhysicalMemory"], "float64", swapSpaceMonitorRouter+".availablePhysicalMemory")
|
||||
}
|
||||
if fields["memory_total"], ok = swapSpace["totalPhysicalMemory"].(float64); !ok {
|
||||
return badFormatErr(url, swapSpace["totalPhysicalMemory"], "float64", swapSpaceMonitorRouter+".totalPhysicalMemory")
|
||||
}
|
||||
}
|
||||
acc.AddFields(measurementNode, fields, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *Jenkins) gatherNodesData(acc telegraf.Accumulator) {
|
||||
var nodes []*gojenkins.Node
|
||||
var err error
|
||||
err = j.doGet(func() error {
|
||||
nodes, err = j.instance.GetAllNodes()
|
||||
return err
|
||||
})
|
||||
|
||||
url := j.URL + "/computer/api/json"
|
||||
// since gojenkins lib will never return error
|
||||
// returns error for len(nodes) is 0
|
||||
if err != nil || len(nodes) == 0 {
|
||||
acc.AddError(fmt.Errorf("error retrieving nodes[%s]: %v", url, err))
|
||||
return
|
||||
}
|
||||
// get node data
|
||||
for _, node := range nodes {
|
||||
te := j.gatherNodeData(node, url, acc)
|
||||
if te == nil {
|
||||
continue
|
||||
}
|
||||
acc.AddError(te)
|
||||
}
|
||||
}
|
||||
|
||||
func (j *Jenkins) gatherJobs(acc telegraf.Accumulator) {
|
||||
jobs, err := j.instance.GetAllJobNames()
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("error retrieving jobs[%s]: %v", j.URL, err))
|
||||
return
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
for _, job := range jobs {
|
||||
wg.Add(1)
|
||||
go func(name string, wg *sync.WaitGroup, acc telegraf.Accumulator) {
|
||||
defer wg.Done()
|
||||
if te := j.getJobDetail(jobRequest{
|
||||
name: name,
|
||||
parents: []string{},
|
||||
layer: 0,
|
||||
}, wg, acc); te != nil {
|
||||
acc.AddError(te)
|
||||
}
|
||||
}(job.Name, &wg, acc)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// wrap the tcp request with doGet
|
||||
// block tcp request if buffered channel is full
|
||||
func (j *Jenkins) doGet(tcp func() error) error {
|
||||
j.semaphore <- struct{}{}
|
||||
if err := tcp(); err != nil {
|
||||
if err == gojenkins.ErrSessionExpired {
|
||||
// ignore the error here, since config parsing should be finished.
|
||||
client, _ := j.initClient()
|
||||
// SessionExpired use a go routine to create a new session
|
||||
go j.newInstance(client)
|
||||
}
|
||||
<-j.semaphore
|
||||
return err
|
||||
}
|
||||
<-j.semaphore
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *Jenkins) getJobDetail(sj jobRequest, wg *sync.WaitGroup, acc telegraf.Accumulator) error {
|
||||
if j.MaxSubJobDepth > 0 && sj.layer == j.MaxSubJobDepth {
|
||||
return nil
|
||||
}
|
||||
// filter out excluded job.
|
||||
if j.jobFilter != nil && j.jobFilter.Match(sj.hierarchyName()) {
|
||||
return nil
|
||||
}
|
||||
url := j.URL + "/job/" + strings.Join(sj.combined(), "/job/") + "/api/json"
|
||||
var jobDetail *gojenkins.Job
|
||||
var err error
|
||||
err = j.doGet(func() error {
|
||||
jobDetail, err = j.instance.GetJob(sj.name, sj.parents...)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error retrieving inner jobs[%s]: ", url)
|
||||
}
|
||||
|
||||
for k, innerJob := range jobDetail.Raw.Jobs {
|
||||
if k < len(jobDetail.Raw.Jobs)-j.MaxSubJobPerLayer-1 {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
// schedule tcp fetch for inner jobs
|
||||
go func(innerJob gojenkins.InnerJob, sj jobRequest, wg *sync.WaitGroup, acc telegraf.Accumulator) {
|
||||
defer wg.Done()
|
||||
if te := j.getJobDetail(jobRequest{
|
||||
name: innerJob.Name,
|
||||
parents: sj.combined(),
|
||||
layer: sj.layer + 1,
|
||||
}, wg, acc); te != nil {
|
||||
acc.AddError(te)
|
||||
}
|
||||
}(innerJob, sj, wg, acc)
|
||||
}
|
||||
|
||||
// collect build info
|
||||
number := jobDetail.Raw.LastBuild.Number
|
||||
if number < 1 {
|
||||
// no build info
|
||||
return nil
|
||||
}
|
||||
baseURL := "/job/" + strings.Join(sj.combined(), "/job/") + "/" + strconv.Itoa(int(number))
|
||||
// jobDetail.GetBuild is not working, doing poll directly
|
||||
build := &gojenkins.Build{
|
||||
Jenkins: j.instance,
|
||||
Depth: 1,
|
||||
Base: baseURL,
|
||||
Raw: new(gojenkins.BuildResponse),
|
||||
}
|
||||
var status int
|
||||
err = j.doGet(func() error {
|
||||
status, err = build.Poll()
|
||||
return err
|
||||
})
|
||||
if err != nil || status != 200 {
|
||||
if err == nil && status != 200 {
|
||||
err = fmt.Errorf("status code %d", status)
|
||||
}
|
||||
return fmt.Errorf("error retrieving inner jobs[%s]: %v", j.URL+baseURL+"/api/json", err)
|
||||
}
|
||||
|
||||
if build.Raw.Building {
|
||||
log.Printf("D! Ignore running build on %s, build %v", sj.name, number)
|
||||
return nil
|
||||
}
|
||||
|
||||
// stop if build is too old
|
||||
|
||||
if (j.MaxBuildAge != internal.Duration{Duration: 0}) {
|
||||
buildAgo := time.Now().Sub(build.GetTimestamp())
|
||||
if buildAgo.Seconds() > j.MaxBuildAge.Duration.Seconds() {
|
||||
log.Printf("D! Job %s build %v too old (%s ago), skipping to next job", sj.name, number, buildAgo)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
gatherJobBuild(sj, build, acc)
|
||||
return nil
|
||||
}
|
||||
|
||||
type jobRequest struct {
|
||||
name string
|
||||
parents []string
|
||||
layer int
|
||||
}
|
||||
|
||||
func (sj jobRequest) combined() []string {
|
||||
return append(sj.parents, sj.name)
|
||||
}
|
||||
|
||||
func (sj jobRequest) hierarchyName() string {
|
||||
return strings.Join(sj.combined(), "/")
|
||||
}
|
||||
|
||||
func gatherJobBuild(sj jobRequest, build *gojenkins.Build, acc telegraf.Accumulator) {
|
||||
tags := map[string]string{"job_name": sj.hierarchyName(), "result": build.GetResult()}
|
||||
fields := make(map[string]interface{})
|
||||
fields["duration"] = build.GetDuration()
|
||||
fields["result_code"] = mapResultCode(build.GetResult())
|
||||
|
||||
acc.AddFields(measurementJob, fields, tags, build.GetTimestamp())
|
||||
}
|
||||
|
||||
// perform status mapping
|
||||
func mapResultCode(s string) int {
|
||||
switch strings.ToLower(s) {
|
||||
case "success":
|
||||
return 0
|
||||
case "failure":
|
||||
return 1
|
||||
case "not_built":
|
||||
return 2
|
||||
case "unstable":
|
||||
return 3
|
||||
case "aborted":
|
||||
return 4
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("jenkins", func() telegraf.Input {
|
||||
return &Jenkins{}
|
||||
})
|
||||
}
|
|
@ -0,0 +1,636 @@
|
|||
package jenkins
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/kelwang/gojenkins"
|
||||
)
|
||||
|
||||
func TestJobRequest(t *testing.T) {
|
||||
tests := []struct {
|
||||
input jobRequest
|
||||
output string
|
||||
}{
|
||||
{
|
||||
jobRequest{},
|
||||
"",
|
||||
},
|
||||
{
|
||||
jobRequest{
|
||||
name: "1",
|
||||
parents: []string{"3", "2"},
|
||||
},
|
||||
"3/2/1",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
output := test.input.hierarchyName()
|
||||
if output != test.output {
|
||||
t.Errorf("Expected %s, got %s\n", test.output, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResultCode(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
output int
|
||||
}{
|
||||
{"SUCCESS", 0},
|
||||
{"Failure", 1},
|
||||
{"NOT_BUILT", 2},
|
||||
{"UNSTABLE", 3},
|
||||
{"ABORTED", 4},
|
||||
}
|
||||
for _, test := range tests {
|
||||
output := mapResultCode(test.input)
|
||||
if output != test.output {
|
||||
t.Errorf("Expected %d, got %d\n", test.output, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type mockHandler struct {
|
||||
// responseMap is the path to repsonse interface
|
||||
// we will ouput the serialized response in json when serving http
|
||||
// example '/computer/api/json': *gojenkins.
|
||||
responseMap map[string]interface{}
|
||||
}
|
||||
|
||||
func (h mockHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
o, ok := h.responseMap[r.URL.Path]
|
||||
if !ok {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
b, err := json.Marshal(o)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Write(b)
|
||||
}
|
||||
|
||||
// copied the embed struct from gojenkins lib
|
||||
type monitorData struct {
|
||||
Hudson_NodeMonitors_ArchitectureMonitor interface{} `json:"hudson.node_monitors.ArchitectureMonitor"`
|
||||
Hudson_NodeMonitors_ClockMonitor interface{} `json:"hudson.node_monitors.ClockMonitor"`
|
||||
Hudson_NodeMonitors_DiskSpaceMonitor interface{} `json:"hudson.node_monitors.DiskSpaceMonitor"`
|
||||
Hudson_NodeMonitors_ResponseTimeMonitor struct {
|
||||
Average int64 `json:"average"`
|
||||
} `json:"hudson.node_monitors.ResponseTimeMonitor"`
|
||||
Hudson_NodeMonitors_SwapSpaceMonitor interface{} `json:"hudson.node_monitors.SwapSpaceMonitor"`
|
||||
Hudson_NodeMonitors_TemporarySpaceMonitor interface{} `json:"hudson.node_monitors.TemporarySpaceMonitor"`
|
||||
}
|
||||
|
||||
func TestGatherNodeData(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input mockHandler
|
||||
output *testutil.Accumulator
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "bad endpoint",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": struct{}{},
|
||||
"/computer/api/json": nil,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "bad node data",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": struct{}{},
|
||||
"/computer/api/json": gojenkins.Computers{
|
||||
Computers: []*gojenkins.NodeResponse{
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "bad empty monitor data",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": struct{}{},
|
||||
"/computer/api/json": gojenkins.Computers{
|
||||
Computers: []*gojenkins.NodeResponse{
|
||||
{DisplayName: "master"},
|
||||
{DisplayName: "node1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "bad monitor data format",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": struct{}{},
|
||||
"/computer/api/json": gojenkins.Computers{
|
||||
Computers: []*gojenkins.NodeResponse{
|
||||
{DisplayName: "master", MonitorData: monitorData{
|
||||
Hudson_NodeMonitors_ArchitectureMonitor: 1,
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "filtered nodes",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": struct{}{},
|
||||
"/computer/api/json": gojenkins.Computers{
|
||||
Computers: []*gojenkins.NodeResponse{
|
||||
{DisplayName: "ignore-1"},
|
||||
{DisplayName: "ignore-2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "normal data collection",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": struct{}{},
|
||||
"/computer/api/json": gojenkins.Computers{
|
||||
Computers: []*gojenkins.NodeResponse{
|
||||
{
|
||||
DisplayName: "master",
|
||||
MonitorData: monitorData{
|
||||
Hudson_NodeMonitors_ArchitectureMonitor: "linux",
|
||||
Hudson_NodeMonitors_ResponseTimeMonitor: struct {
|
||||
Average int64 `json:"average"`
|
||||
}{
|
||||
Average: 10032,
|
||||
},
|
||||
Hudson_NodeMonitors_DiskSpaceMonitor: map[string]interface{}{
|
||||
"path": "/path/1",
|
||||
"size": 123,
|
||||
},
|
||||
Hudson_NodeMonitors_TemporarySpaceMonitor: map[string]interface{}{
|
||||
"path": "/path/2",
|
||||
"size": 245,
|
||||
},
|
||||
Hudson_NodeMonitors_SwapSpaceMonitor: map[string]interface{}{
|
||||
"availableSwapSpace": 212,
|
||||
"totalSwapSpace": 500,
|
||||
"availablePhysicalMemory": 101,
|
||||
"totalPhysicalMemory": 500,
|
||||
},
|
||||
},
|
||||
Offline: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
output: &testutil.Accumulator{
|
||||
Metrics: []*testutil.Metric{
|
||||
{
|
||||
Tags: map[string]string{
|
||||
"node_name": "master",
|
||||
"arch": "linux",
|
||||
"status": "online",
|
||||
"disk_path": "/path/1",
|
||||
"temp_path": "/path/2",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"response_time": int64(10032),
|
||||
"disk_available": float64(123),
|
||||
"temp_available": float64(245),
|
||||
"swap_available": float64(212),
|
||||
"swap_total": float64(500),
|
||||
"memory_available": float64(101),
|
||||
"memory_total": float64(500),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ts := httptest.NewServer(test.input)
|
||||
defer ts.Close()
|
||||
j := &Jenkins{
|
||||
URL: ts.URL,
|
||||
ResponseTimeout: internal.Duration{Duration: time.Microsecond},
|
||||
NodeExclude: []string{"ignore-1", "ignore-2"},
|
||||
}
|
||||
te := j.newInstance(&http.Client{Transport: &http.Transport{}})
|
||||
acc := new(testutil.Accumulator)
|
||||
j.gatherNodesData(acc)
|
||||
if err := acc.FirstError(); err != nil {
|
||||
te = err
|
||||
}
|
||||
|
||||
if !test.wantErr && te != nil {
|
||||
t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error())
|
||||
} else if test.wantErr && te == nil {
|
||||
t.Fatalf("%s: expected err, got nil", test.name)
|
||||
}
|
||||
if test.output == nil && len(acc.Metrics) > 0 {
|
||||
t.Fatalf("%s: collected extra data", test.name)
|
||||
} else if test.output != nil && len(test.output.Metrics) > 0 {
|
||||
for k, m := range test.output.Metrics[0].Tags {
|
||||
if acc.Metrics[0].Tags[k] != m {
|
||||
t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", test.name, k, m, acc.Metrics[0].Tags[k])
|
||||
}
|
||||
}
|
||||
for k, m := range test.output.Metrics[0].Fields {
|
||||
if acc.Metrics[0].Fields[k] != m {
|
||||
t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", test.name, k, m, m, acc.Metrics[0].Fields[k], acc.Metrics[0].Fields[k])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewInstance(t *testing.T) {
|
||||
mh := mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": struct{}{},
|
||||
},
|
||||
}
|
||||
ts := httptest.NewServer(mh)
|
||||
defer ts.Close()
|
||||
mockClient := &http.Client{Transport: &http.Transport{}}
|
||||
tests := []struct {
|
||||
// name of the test
|
||||
name string
|
||||
input *Jenkins
|
||||
output *Jenkins
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "bad jenkins config",
|
||||
input: &Jenkins{
|
||||
URL: "http://a bad url",
|
||||
ResponseTimeout: internal.Duration{Duration: time.Microsecond},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "has filter",
|
||||
input: &Jenkins{
|
||||
URL: ts.URL,
|
||||
ResponseTimeout: internal.Duration{Duration: time.Microsecond},
|
||||
JobExclude: []string{"job1", "job2"},
|
||||
NodeExclude: []string{"node1", "node2"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "default config",
|
||||
input: &Jenkins{
|
||||
URL: ts.URL,
|
||||
ResponseTimeout: internal.Duration{Duration: time.Microsecond},
|
||||
},
|
||||
output: &Jenkins{
|
||||
MaxConnections: 30,
|
||||
MaxSubJobPerLayer: 10,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
te := test.input.newInstance(mockClient)
|
||||
if !test.wantErr && te != nil {
|
||||
t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error())
|
||||
} else if test.wantErr && te == nil {
|
||||
t.Fatalf("%s: expected err, got nil", test.name)
|
||||
}
|
||||
if test.output != nil {
|
||||
if test.input.instance == nil {
|
||||
t.Fatalf("%s: failed %s, jenkins instance shouldn't be nil", test.name, te.Error())
|
||||
}
|
||||
if test.input.MaxConnections != test.output.MaxConnections {
|
||||
t.Fatalf("%s: different MaxConnections Expected %d, got %d\n", test.name, test.output.MaxConnections, test.input.MaxConnections)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestGatherJobs(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input mockHandler
|
||||
output *testutil.Accumulator
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "empty job",
|
||||
input: mockHandler{},
|
||||
},
|
||||
{
|
||||
name: "bad inner jobs",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": &gojenkins.JobResponse{
|
||||
Jobs: []gojenkins.InnerJob{
|
||||
{Name: "job1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "jobs has no build",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": &gojenkins.JobResponse{
|
||||
Jobs: []gojenkins.InnerJob{
|
||||
{Name: "job1"},
|
||||
},
|
||||
},
|
||||
"/job/job1/api/json": &gojenkins.JobResponse{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bad build info",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": &gojenkins.JobResponse{
|
||||
Jobs: []gojenkins.InnerJob{
|
||||
{Name: "job1"},
|
||||
},
|
||||
},
|
||||
"/job/job1/api/json": &gojenkins.JobResponse{
|
||||
LastBuild: gojenkins.JobBuild{
|
||||
Number: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "ignore building job",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": &gojenkins.JobResponse{
|
||||
Jobs: []gojenkins.InnerJob{
|
||||
{Name: "job1"},
|
||||
},
|
||||
},
|
||||
"/job/job1/api/json": &gojenkins.JobResponse{
|
||||
LastBuild: gojenkins.JobBuild{
|
||||
Number: 1,
|
||||
},
|
||||
},
|
||||
"/job/job1/1/api/json": &gojenkins.BuildResponse{
|
||||
Building: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ignore old build",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": &gojenkins.JobResponse{
|
||||
Jobs: []gojenkins.InnerJob{
|
||||
{Name: "job1"},
|
||||
},
|
||||
},
|
||||
"/job/job1/api/json": &gojenkins.JobResponse{
|
||||
LastBuild: gojenkins.JobBuild{
|
||||
Number: 2,
|
||||
},
|
||||
},
|
||||
"/job/job1/2/api/json": &gojenkins.BuildResponse{
|
||||
Building: false,
|
||||
Timestamp: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gather metrics",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": &gojenkins.JobResponse{
|
||||
Jobs: []gojenkins.InnerJob{
|
||||
{Name: "job1"},
|
||||
{Name: "job2"},
|
||||
},
|
||||
},
|
||||
"/job/job1/api/json": &gojenkins.JobResponse{
|
||||
LastBuild: gojenkins.JobBuild{
|
||||
Number: 3,
|
||||
},
|
||||
},
|
||||
"/job/job2/api/json": &gojenkins.JobResponse{
|
||||
LastBuild: gojenkins.JobBuild{
|
||||
Number: 1,
|
||||
},
|
||||
},
|
||||
"/job/job1/3/api/json": &gojenkins.BuildResponse{
|
||||
Building: false,
|
||||
Result: "SUCCESS",
|
||||
Duration: 25558,
|
||||
Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000,
|
||||
},
|
||||
"/job/job2/1/api/json": &gojenkins.BuildResponse{
|
||||
Building: false,
|
||||
Result: "FAILURE",
|
||||
Duration: 1558,
|
||||
Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000,
|
||||
},
|
||||
},
|
||||
},
|
||||
output: &testutil.Accumulator{
|
||||
Metrics: []*testutil.Metric{
|
||||
{
|
||||
Tags: map[string]string{
|
||||
"job_name": "job1",
|
||||
"result": "SUCCESS",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration": int64(25558),
|
||||
"result_code": 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
Tags: map[string]string{
|
||||
"job_name": "job2",
|
||||
"result": "FAILURE",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration": int64(1558),
|
||||
"result_code": 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gather sub jobs, jobs filter",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": &gojenkins.JobResponse{
|
||||
Jobs: []gojenkins.InnerJob{
|
||||
{Name: "apps"},
|
||||
{Name: "ignore-1"},
|
||||
},
|
||||
},
|
||||
"/job/apps/api/json": &gojenkins.JobResponse{
|
||||
Jobs: []gojenkins.InnerJob{
|
||||
{Name: "k8s-cloud"},
|
||||
{Name: "chronograf"},
|
||||
{Name: "ignore-all"},
|
||||
},
|
||||
},
|
||||
"/job/apps/job/ignore-all/api/json": &gojenkins.JobResponse{
|
||||
Jobs: []gojenkins.InnerJob{
|
||||
{Name: "1"},
|
||||
{Name: "2"},
|
||||
},
|
||||
},
|
||||
"/job/apps/job/chronograf/api/json": &gojenkins.JobResponse{
|
||||
LastBuild: gojenkins.JobBuild{
|
||||
Number: 1,
|
||||
},
|
||||
},
|
||||
"/job/apps/job/k8s-cloud/api/json": &gojenkins.JobResponse{
|
||||
Jobs: []gojenkins.InnerJob{
|
||||
{Name: "PR-100"},
|
||||
{Name: "PR-101"},
|
||||
{Name: "PR-ignore2"},
|
||||
},
|
||||
},
|
||||
"/job/apps/job/k8s-cloud/job/PR-100/api/json": &gojenkins.JobResponse{
|
||||
LastBuild: gojenkins.JobBuild{
|
||||
Number: 1,
|
||||
},
|
||||
},
|
||||
"/job/apps/job/k8s-cloud/job/PR-101/api/json": &gojenkins.JobResponse{
|
||||
LastBuild: gojenkins.JobBuild{
|
||||
Number: 4,
|
||||
},
|
||||
},
|
||||
"/job/apps/job/chronograf/1/api/json": &gojenkins.BuildResponse{
|
||||
Building: false,
|
||||
Result: "FAILURE",
|
||||
Duration: 1558,
|
||||
Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000,
|
||||
},
|
||||
"/job/apps/job/k8s-cloud/job/PR-101/4/api/json": &gojenkins.BuildResponse{
|
||||
Building: false,
|
||||
Result: "SUCCESS",
|
||||
Duration: 76558,
|
||||
Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000,
|
||||
},
|
||||
"/job/apps/job/k8s-cloud/job/PR-100/1/api/json": &gojenkins.BuildResponse{
|
||||
Building: false,
|
||||
Result: "SUCCESS",
|
||||
Duration: 91558,
|
||||
Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000,
|
||||
},
|
||||
},
|
||||
},
|
||||
output: &testutil.Accumulator{
|
||||
Metrics: []*testutil.Metric{
|
||||
{
|
||||
Tags: map[string]string{
|
||||
"job_name": "apps/chronograf",
|
||||
"result": "FAILURE",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration": int64(1558),
|
||||
"result_code": 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
Tags: map[string]string{
|
||||
"job_name": "apps/k8s-cloud/PR-100",
|
||||
"result": "SUCCESS",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration": int64(91558),
|
||||
"result_code": 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
Tags: map[string]string{
|
||||
"job_name": "apps/k8s-cloud/PR-101",
|
||||
"result": "SUCCESS",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration": int64(76558),
|
||||
"result_code": 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ts := httptest.NewServer(test.input)
|
||||
defer ts.Close()
|
||||
j := &Jenkins{
|
||||
URL: ts.URL,
|
||||
MaxBuildAge: internal.Duration{Duration: time.Hour},
|
||||
ResponseTimeout: internal.Duration{Duration: time.Microsecond},
|
||||
JobExclude: []string{
|
||||
"ignore-1",
|
||||
"apps/ignore-all/*",
|
||||
"apps/k8s-cloud/PR-ignore2",
|
||||
},
|
||||
}
|
||||
te := j.newInstance(&http.Client{Transport: &http.Transport{}})
|
||||
acc := new(testutil.Accumulator)
|
||||
j.gatherJobs(acc)
|
||||
if err := acc.FirstError(); err != nil {
|
||||
te = err
|
||||
}
|
||||
if !test.wantErr && te != nil {
|
||||
t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error())
|
||||
} else if test.wantErr && te == nil {
|
||||
t.Fatalf("%s: expected err, got nil", test.name)
|
||||
}
|
||||
|
||||
if test.output != nil && len(test.output.Metrics) > 0 {
|
||||
// sort metrics
|
||||
sort.Slice(acc.Metrics, func(i, j int) bool {
|
||||
return strings.Compare(acc.Metrics[i].Tags["job_name"], acc.Metrics[j].Tags["job_name"]) < 0
|
||||
})
|
||||
for i := range test.output.Metrics {
|
||||
for k, m := range test.output.Metrics[i].Tags {
|
||||
if acc.Metrics[i].Tags[k] != m {
|
||||
t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", test.name, k, m, acc.Metrics[i].Tags[k])
|
||||
}
|
||||
}
|
||||
for k, m := range test.output.Metrics[i].Fields {
|
||||
if acc.Metrics[i].Fields[k] != m {
|
||||
t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", test.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[0].Fields[k])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue