add requested changes

This commit is contained in:
Kelvin Wang 2018-06-22 09:20:26 -07:00
parent 9c85c05fcb
commit 73eaa057d1
3 changed files with 864 additions and 275 deletions

View File

@ -7,38 +7,38 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API
### Configuration: ### Configuration:
```toml ```toml
url = "http://my-jenkins-instance:8080" url = "http://my-jenkins-instance:8080"
username = "admin" # username = "admin"
password = "admin" # password = "admin"
## Set response_timeout ## Set response_timeout
response_timeout = "5s" response_timeout = "5s"
## Optional SSL Config ## Optional SSL Config
# ssl_ca = /path/to/cafile # ssl_ca = /path/to/cafile
# ssl_cert = /path/to/certfile # ssl_cert = /path/to/certfile
# ssl_key = /path/to/keyfile # ssl_key = /path/to/keyfile
## Use SSL but skip chain & host verification ## Use SSL but skip chain & host verification
# insecure_skip_verify = false # insecure_skip_verify = false
## Job & build filter ## Job & build filter
# max_build_age = "1h" # max_build_age = "1h"
## jenkins can have unlimited layer of sub jobs ## jenkins can have unlimited layer of sub jobs
## this config will limit the layers of pull, default value 0 means ## this config will limit the layers of pull, default value 0 means
## unlimited pulling until no more sub jobs ## unlimited pulling until no more sub jobs
# max_sub_jobs_layer = 0 # max_subjob_depth = 0
## in workflow-multibranch-plugin, each branch will be created as a sub job ## in workflow-multibranch-plugin, each branch will be created as a sub job
## this config will limit to call only the lasted branches ## this config will limit to call only the lasted branches
## sub jobs fetch in each layer ## sub jobs fetch in each layer
# empty will use default value 10 # empty will use default value 10
# newest_sub_jobs_each_layer = 10 # max_subjob_per_layer = 10
# job_exclude = [ "MyJob", "MyOtherJob" ] # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"]
## Node filter ## Node filter
# node_exlude = [ "node1", "node2" ] # node_exclude = [ "node1", "node2" ]
## Woker pool for jenkins plugin only ## Woker pool for jenkins plugin only
# empty this field will use default value 30 # empty this field will use default value 30
# max_tcp_concurrent_connections = 30 # max_connections = 30
``` ```
### Measurements & Fields: ### Measurements & Fields:

View File

@ -4,13 +4,13 @@ import (
"fmt" "fmt"
"log" "log"
"net/http" "net/http"
"reflect"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/internal/tls"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
@ -28,23 +28,23 @@ type Jenkins struct {
tls.ClientConfig tls.ClientConfig
instance *gojenkins.Jenkins instance *gojenkins.Jenkins
MaxTCPConcurrentConnections int `toml:"max_tcp_concurrent_connections"` MaxConnections int `toml:"max_connections"`
MaxBuildAge internal.Duration `toml:"max_build_age"` MaxBuildAge internal.Duration `toml:"max_build_age"`
MaxSubJobsLayer int `toml:"max_sub_jobs_layer"` MaxSubJobDepth int `toml:"max_subjob_depth"`
NewestSubJobsEachLayer int `toml:"newest_sub_jobs_each_layer"` MaxSubJobPerLayer int `toml:"max_subjob_per_layer"`
JobExclude []string `toml:"job_exclude"` JobExclude []string `toml:"job_exclude"`
jobFilter map[string]bool jobFilter filter.Filter
NodeExclude []string `toml:"node_exclude"` NodeExclude []string `toml:"node_exclude"`
nodeFilter map[string]bool nodeFilter filter.Filter
} }
type byBuildNumber []gojenkins.JobBuild type byBuildNumber []gojenkins.JobBuild
const sampleConfig = ` const sampleConfig = `
url = "http://my-jenkins-instance:8080" url = "http://my-jenkins-instance:8080"
username = "admin" # username = "admin"
password = "admin" # password = "admin"
## Set response_timeout ## Set response_timeout
response_timeout = "5s" response_timeout = "5s"
@ -60,20 +60,20 @@ response_timeout = "5s"
## jenkins can have unlimited layer of sub jobs ## jenkins can have unlimited layer of sub jobs
## this config will limit the layers of pull, default value 0 means ## this config will limit the layers of pull, default value 0 means
## unlimited pulling until no more sub jobs ## unlimited pulling until no more sub jobs
# max_sub_jobs_layer = 0 # max_subjob_depth = 0
## in workflow-multibranch-plugin, each branch will be created as a sub job ## in workflow-multibranch-plugin, each branch will be created as a sub job
## this config will limit to call only the lasted branches ## this config will limit to call only the lasted branches
## sub jobs fetch in each layer ## sub jobs fetch in each layer
# empty will use default value 10 # empty will use default value 10
# newest_sub_jobs_each_layer = 10 # max_subjob_per_layer = 10
# job_exclude = [ "MyJob", "MyOtherJob" ] # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"]
## Node filter ## Node filter
# node_exlude = [ "node1", "node2" ] # node_exclude = [ "node1", "node2" ]
## Woker pool for jenkins plugin only ## Woker pool for jenkins plugin only
# empty this field will use default value 30 # empty this field will use default value 30
# max_tcp_concurrent_connections = 30 # max_connections = 30
` `
// measurement // measurement
@ -82,53 +82,46 @@ const (
measurementJob = "jenkins_job" measurementJob = "jenkins_job"
) )
type typedErr struct { // Error base type of error.
level int type Error struct {
err error err error
reference string reference string
url string url string
} }
// const of the error level, default 0 to be the errLevel func newError(err error, ref, url string) *Error {
const ( return &Error{
errLevel int = iota
continueLevel
infoLevel
)
func wrapErr(e typedErr, err error, ref string) *typedErr {
return &typedErr{
level: e.level,
err: err, err: err,
reference: ref, reference: ref,
url: e.url, url: url,
} }
} }
func (e *typedErr) Error() string { func (e *Error) Error() string {
if e == nil { if e == nil {
return "" return ""
} }
return fmt.Sprintf("error "+e.reference+"[%s]: %v", e.url, e.err) return fmt.Sprintf("error %s[%s]: %v", e.reference, e.url, e.err)
} }
func badFormatErr(te typedErr, field interface{}, want string, fieldName string) *typedErr { func badFormatErr(url string, field interface{}, want string, fieldName string) *Error {
return &typedErr{ return &Error{
level: te.level, err: fmt.Errorf("fieldName: %s, want %s, got %T", fieldName, want, field),
err: fmt.Errorf("fieldName: %s, want %s, got %s", fieldName, want, reflect.TypeOf(field).String()),
reference: errBadFormat, reference: errBadFormat,
url: te.url, url: url,
} }
} }
// err references // err references
const ( const (
errParseConfig = "parse jenkins config" errParseConfig = "parse jenkins config"
errJobFilterCompile = "compile job filters"
errNodeFilterCompile = "compile node filters"
errConnectJenkins = "connect jenkins instance" errConnectJenkins = "connect jenkins instance"
errInitJenkins = "init jenkins instance" errInitJenkins = "init jenkins instance"
errRetrieveNode = "retrieving nodes" errRetrieveNode = "retrieving nodes"
errRetrieveJobs = "retrieving jobs" errRetrieveJobs = "retrieving jobs"
errReadNodeInfo = "reading node info" errEmptyNodeName = "empty node name"
errEmptyMonitorData = "empty monitor data" errEmptyMonitorData = "empty monitor data"
errBadFormat = "bad format" errBadFormat = "bad format"
errRetrieveInnerJobs = "retrieving inner jobs" errRetrieveInnerJobs = "retrieving inner jobs"
@ -147,113 +140,111 @@ func (j *Jenkins) Description() string {
// Gather implements telegraf.Input interface // Gather implements telegraf.Input interface
func (j *Jenkins) Gather(acc telegraf.Accumulator) error { func (j *Jenkins) Gather(acc telegraf.Accumulator) error {
var err error
te := typedErr{
url: j.URL,
}
if j.instance == nil { if j.instance == nil {
if tErr := j.initJenkins(te); tErr != nil { client, te := j.initClient()
return tErr if te != nil {
return te
}
if te = j.newInstance(client); te != nil {
return te
} }
} }
nodes, err := j.instance.GetAllNodes() j.gatherNodesData(acc)
if err != nil { j.gatherJobs(acc)
return wrapErr(te, err, errRetrieveNode)
}
jobs, err := j.instance.GetAllJobNames()
if err != nil {
return wrapErr(te, err, errRetrieveJobs)
}
j.gatherNodesData(nodes, acc)
j.gatherJobs(jobs, acc)
return nil return nil
} }
func (j *Jenkins) initJenkins(te typedErr) *typedErr { func (j *Jenkins) initClient() (*http.Client, *Error) {
// create instance
tlsCfg, err := j.ClientConfig.TLSConfig() tlsCfg, err := j.ClientConfig.TLSConfig()
if err != nil { if err != nil {
return wrapErr(te, err, errParseConfig) return nil, newError(err, errParseConfig, j.URL)
} }
return &http.Client{
client := &http.Client{
Transport: &http.Transport{ Transport: &http.Transport{
TLSClientConfig: tlsCfg, TLSClientConfig: tlsCfg,
}, },
Timeout: j.ResponseTimeout.Duration, Timeout: j.ResponseTimeout.Duration,
} }, nil
}
// seperate the client as dependency to use httptest Client for mocking
func (j *Jenkins) newInstance(client *http.Client) *Error {
// create instance
var err error
j.instance, err = gojenkins.CreateJenkins(client, j.URL, j.Username, j.Password).Init() j.instance, err = gojenkins.CreateJenkins(client, j.URL, j.Username, j.Password).Init()
if err != nil { if err != nil {
return wrapErr(te, err, errConnectJenkins) return newError(err, errConnectJenkins, j.URL)
}
_, err = j.instance.Init()
if err != nil {
return wrapErr(te, err, errConnectJenkins)
} }
// init job filter // init job filter
j.jobFilter = make(map[string]bool) j.jobFilter, err = filter.Compile(j.JobExclude)
for _, name := range j.JobExclude { if err != nil {
j.jobFilter[name] = false return newError(err, errJobFilterCompile, j.URL)
} }
// init node filter // init node filter
j.nodeFilter = make(map[string]bool) j.nodeFilter, err = filter.Compile(j.NodeExclude)
for _, name := range j.NodeExclude { if err != nil {
j.nodeFilter[name] = false return newError(err, errNodeFilterCompile, j.URL)
} }
// init tcp pool with default value // init tcp pool with default value
if j.MaxTCPConcurrentConnections <= 0 { if j.MaxConnections <= 0 {
j.MaxTCPConcurrentConnections = 30 j.MaxConnections = 30
} }
// default sub jobs can be acquired // default sub jobs can be acquired
if j.NewestSubJobsEachLayer <= 0 { if j.MaxSubJobPerLayer <= 0 {
j.NewestSubJobsEachLayer = 10 j.MaxSubJobPerLayer = 10
} }
return nil return nil
} }
func (j *Jenkins) gatherNodeData(node *gojenkins.Node, te typedErr, fields map[string]interface{}, tags map[string]string) *typedErr { func (j *Jenkins) gatherNodeData(node *gojenkins.Node, url string, acc telegraf.Accumulator) *Error {
tags["node_name"] = node.Raw.DisplayName tags := map[string]string{}
var ok bool fields := make(map[string]interface{})
if _, ok = j.nodeFilter[tags["node_name"]]; ok {
(&te).level = continueLevel
return &te
}
info := node.Raw info := node.Raw
// detect the parsing error, since gojenkins lib won't do it
if info == nil || info.DisplayName == "" {
return newError(nil, errEmptyNodeName, url)
}
tags["node_name"] = info.DisplayName
var ok bool
// filter out excluded node_name
if j.nodeFilter != nil && j.nodeFilter.Match(tags["node_name"]) {
return nil
}
if info.MonitorData.Hudson_NodeMonitors_ArchitectureMonitor == nil { if info.MonitorData.Hudson_NodeMonitors_ArchitectureMonitor == nil {
return wrapErr(te, fmt.Errorf("check your permission"), errEmptyMonitorData) return newError(fmt.Errorf("maybe check your permission"), errEmptyMonitorData, url)
} }
tags["arch"], ok = info.MonitorData.Hudson_NodeMonitors_ArchitectureMonitor.(string) tags["arch"], ok = info.MonitorData.Hudson_NodeMonitors_ArchitectureMonitor.(string)
if !ok { if !ok {
return badFormatErr(te, info.MonitorData.Hudson_NodeMonitors_ArchitectureMonitor, "string", "hudson.node_monitors.ArchitectureMonitor") return badFormatErr(url, info.MonitorData.Hudson_NodeMonitors_ArchitectureMonitor, "string", "hudson.node_monitors.ArchitectureMonitor")
} }
tags["status"] = "online" tags["status"] = "online"
if node.Raw.Offline { if info.Offline {
tags["status"] = "offline" tags["status"] = "offline"
} }
fields["response_time"] = info.MonitorData.Hudson_NodeMonitors_ResponseTimeMonitor.Average fields["response_time"] = info.MonitorData.Hudson_NodeMonitors_ResponseTimeMonitor.Average
if diskSpaceMonitor := info.MonitorData.Hudson_NodeMonitors_DiskSpaceMonitor; diskSpaceMonitor != nil { if diskSpaceMonitor := info.MonitorData.Hudson_NodeMonitors_DiskSpaceMonitor; diskSpaceMonitor != nil {
diskSpaceMonitorRoute := "hudson.node_monitors.DiskSpaceMonitor" diskSpaceMonitorRoute := "hudson.node_monitors.DiskSpaceMonitor"
var diskSpace map[string]interface{} var diskSpace map[string]interface{}
if diskSpace, ok = diskSpaceMonitor.(map[string]interface{}); !ok { if diskSpace, ok = diskSpaceMonitor.(map[string]interface{}); !ok {
return badFormatErr(te, diskSpaceMonitor, "map[string]interface{}", diskSpaceMonitorRoute) return badFormatErr(url, diskSpaceMonitor, "map[string]interface{}", diskSpaceMonitorRoute)
} }
if tags["disk_path"], ok = diskSpace["path"].(string); !ok { if tags["disk_path"], ok = diskSpace["path"].(string); !ok {
return badFormatErr(te, diskSpace["path"], "string", diskSpaceMonitorRoute+".path") return badFormatErr(url, diskSpace["path"], "string", diskSpaceMonitorRoute+".path")
} }
if fields["disk_available"], ok = diskSpace["size"].(float64); !ok { if fields["disk_available"], ok = diskSpace["size"].(float64); !ok {
return badFormatErr(te, diskSpace["size"], "float64", diskSpaceMonitorRoute+".size") return badFormatErr(url, diskSpace["size"], "float64", diskSpaceMonitorRoute+".size")
} }
} }
@ -261,13 +252,13 @@ func (j *Jenkins) gatherNodeData(node *gojenkins.Node, te typedErr, fields map[s
tempSpaceMonitorRoute := "hudson.node_monitors.TemporarySpaceMonitor" tempSpaceMonitorRoute := "hudson.node_monitors.TemporarySpaceMonitor"
var tempSpace map[string]interface{} var tempSpace map[string]interface{}
if tempSpace, ok = tempSpaceMonitor.(map[string]interface{}); !ok { if tempSpace, ok = tempSpaceMonitor.(map[string]interface{}); !ok {
return badFormatErr(te, tempSpaceMonitor, "map[string]interface{}", tempSpaceMonitorRoute) return badFormatErr(url, tempSpaceMonitor, "map[string]interface{}", tempSpaceMonitorRoute)
} }
if tags["temp_path"], ok = tempSpace["path"].(string); !ok { if tags["temp_path"], ok = tempSpace["path"].(string); !ok {
return badFormatErr(te, tempSpace["path"], "string", tempSpaceMonitorRoute+".path") return badFormatErr(url, tempSpace["path"], "string", tempSpaceMonitorRoute+".path")
} }
if fields["temp_available"], ok = tempSpace["size"].(float64); !ok { if fields["temp_available"], ok = tempSpace["size"].(float64); !ok {
return badFormatErr(te, tempSpace["size"], "float64", tempSpaceMonitorRoute+".size") return badFormatErr(url, tempSpace["size"], "float64", tempSpaceMonitorRoute+".size")
} }
} }
@ -275,110 +266,99 @@ func (j *Jenkins) gatherNodeData(node *gojenkins.Node, te typedErr, fields map[s
swapSpaceMonitorRouter := "hudson.node_monitors.SwapSpaceMonitor" swapSpaceMonitorRouter := "hudson.node_monitors.SwapSpaceMonitor"
var swapSpace map[string]interface{} var swapSpace map[string]interface{}
if swapSpace, ok = swapSpaceMonitor.(map[string]interface{}); !ok { if swapSpace, ok = swapSpaceMonitor.(map[string]interface{}); !ok {
return badFormatErr(te, swapSpaceMonitor, "map[string]interface{}", swapSpaceMonitorRouter) return badFormatErr(url, swapSpaceMonitor, "map[string]interface{}", swapSpaceMonitorRouter)
} }
if fields["swap_available"], ok = swapSpace["availableSwapSpace"].(float64); !ok { if fields["swap_available"], ok = swapSpace["availableSwapSpace"].(float64); !ok {
return badFormatErr(te, swapSpace["availableSwapSpace"], "float64", swapSpaceMonitorRouter+".availableSwapSpace") return badFormatErr(url, swapSpace["availableSwapSpace"], "float64", swapSpaceMonitorRouter+".availableSwapSpace")
} }
if fields["swap_total"], ok = swapSpace["totalSwapSpace"].(float64); !ok { if fields["swap_total"], ok = swapSpace["totalSwapSpace"].(float64); !ok {
return badFormatErr(te, swapSpace["totalSwapSpace"], "float64", swapSpaceMonitorRouter+".totalSwapSpace") return badFormatErr(url, swapSpace["totalSwapSpace"], "float64", swapSpaceMonitorRouter+".totalSwapSpace")
} }
if fields["memory_available"], ok = swapSpace["availablePhysicalMemory"].(float64); !ok { if fields["memory_available"], ok = swapSpace["availablePhysicalMemory"].(float64); !ok {
return badFormatErr(te, swapSpace["availablePhysicalMemory"], "float64", swapSpaceMonitorRouter+".availablePhysicalMemory") return badFormatErr(url, swapSpace["availablePhysicalMemory"], "float64", swapSpaceMonitorRouter+".availablePhysicalMemory")
} }
if fields["memory_total"], ok = swapSpace["totalPhysicalMemory"].(float64); !ok { if fields["memory_total"], ok = swapSpace["totalPhysicalMemory"].(float64); !ok {
return badFormatErr(te, swapSpace["totalPhysicalMemory"], "float64", swapSpaceMonitorRouter+".totalPhysicalMemory") return badFormatErr(url, swapSpace["totalPhysicalMemory"], "float64", swapSpaceMonitorRouter+".totalPhysicalMemory")
} }
} }
acc.AddFields(measurementNode, fields, tags)
return nil return nil
} }
func (j *Jenkins) gatherNodesData(nodes []*gojenkins.Node, acc telegraf.Accumulator) { func (j *Jenkins) gatherNodesData(acc telegraf.Accumulator) {
nodes, err := j.instance.GetAllNodes()
tags := map[string]string{} url := j.URL + "/computer/api/json"
fields := make(map[string]interface{}) // since gojenkins lib will never return error
baseTe := typedErr{ // returns error for len(nodes) is 0
url: j.URL + "/computer/api/json", if err != nil || len(nodes) == 0 {
acc.AddError(newError(err, errRetrieveNode, url))
return
} }
// get node data // get node data
for _, node := range nodes { for _, node := range nodes {
te := j.gatherNodeData(node, baseTe, fields, tags) te := j.gatherNodeData(node, url, acc)
if te == nil { if te == nil {
acc.AddFields(measurementNode, fields, tags)
continue continue
} }
switch te.level {
case continueLevel:
continue
default:
acc.AddError(te) acc.AddError(te)
} }
}
} }
func (j *Jenkins) gatherJobs(jobNames []gojenkins.InnerJob, acc telegraf.Accumulator) { func (j *Jenkins) gatherJobs(acc telegraf.Accumulator) {
jobsC := make(chan srcJob, j.MaxTCPConcurrentConnections) jobs, err := j.instance.GetAllJobNames()
errC := make(chan *typedErr) if err != nil {
acc.AddError(newError(err, errRetrieveJobs, j.URL))
return
}
jobsC := make(chan jobRequest, j.MaxConnections)
var wg sync.WaitGroup var wg sync.WaitGroup
for _, job := range jobNames { for _, job := range jobs {
wg.Add(1) wg.Add(1)
go func(job gojenkins.InnerJob) { go func(name string) {
jobsC <- srcJob{ jobsC <- jobRequest{
name: job.Name, name: name,
parents: []string{}, parents: []string{},
layer: 0, layer: 0,
} }
}(job) }(job.Name)
} }
for i := 0; i < j.MaxTCPConcurrentConnections; i++ { for i := 0; i < j.MaxConnections; i++ {
go j.getJobDetail(jobsC, errC, &wg, acc) go func(jobsC chan jobRequest, acc telegraf.Accumulator, wg *sync.WaitGroup) {
} for sj := range jobsC {
if te := j.getJobDetail(sj, jobsC, wg, acc); te != nil {
go func() {
wg.Wait()
close(errC)
}()
select {
case te := <-errC:
if te != nil {
acc.AddError(te) acc.AddError(te)
} }
} }
}(jobsC, acc, &wg)
}
wg.Wait()
} }
func (j *Jenkins) getJobDetail(jobsC chan srcJob, errC chan<- *typedErr, wg *sync.WaitGroup, acc telegraf.Accumulator) { func (j *Jenkins) getJobDetail(sj jobRequest, jobsC chan<- jobRequest, wg *sync.WaitGroup, acc telegraf.Accumulator) *Error {
for sj := range jobsC { defer wg.Done()
if j.MaxSubJobsLayer > 0 && sj.layer == j.MaxSubJobsLayer { if j.MaxSubJobDepth > 0 && sj.layer == j.MaxSubJobDepth {
wg.Done() return nil
continue
} }
// exclude filter // filter out excluded job.
if _, ok := j.jobFilter[sj.name]; ok { if j.jobFilter != nil && j.jobFilter.Match(sj.hierarchyName()) {
wg.Done() return nil
continue
}
te := &typedErr{
url: j.URL + "/job/" + strings.Join(sj.combined(), "/job/") + "/api/json",
} }
url := j.URL + "/job/" + strings.Join(sj.combined(), "/job/") + "/api/json"
jobDetail, err := j.instance.GetJob(sj.name, sj.parents...) jobDetail, err := j.instance.GetJob(sj.name, sj.parents...)
if err != nil { if err != nil {
go func(te typedErr, err error) { return newError(err, errRetrieveInnerJobs, url)
errC <- wrapErr(te, err, errRetrieveInnerJobs)
}(*te, err)
return
} }
for k, innerJob := range jobDetail.Raw.Jobs { for k, innerJob := range jobDetail.Raw.Jobs {
if k < len(jobDetail.Raw.Jobs)-j.NewestSubJobsEachLayer-1 { if k < len(jobDetail.Raw.Jobs)-j.MaxSubJobPerLayer-1 {
continue continue
} }
wg.Add(1) wg.Add(1)
// schedule tcp fetch for inner jobs // schedule tcp fetch for inner jobs
go func(innerJob gojenkins.InnerJob, sj srcJob) { go func(innerJob gojenkins.InnerJob, sj jobRequest) {
jobsC <- srcJob{ jobsC <- jobRequest{
name: innerJob.Name, name: innerJob.Name,
parents: sj.combined(), parents: sj.combined(),
layer: sj.layer + 1, layer: sj.layer + 1,
@ -390,8 +370,7 @@ func (j *Jenkins) getJobDetail(jobsC chan srcJob, errC chan<- *typedErr, wg *syn
number := jobDetail.Raw.LastBuild.Number number := jobDetail.Raw.LastBuild.Number
if number < 1 { if number < 1 {
// no build info // no build info
wg.Done() return nil
continue
} }
baseURL := "/job/" + strings.Join(sj.combined(), "/job/") + "/" + strconv.Itoa(int(number)) baseURL := "/job/" + strings.Join(sj.combined(), "/job/") + "/" + strconv.Itoa(int(number))
// jobDetail.GetBuild is not working, doing poll directly // jobDetail.GetBuild is not working, doing poll directly
@ -406,51 +385,43 @@ func (j *Jenkins) getJobDetail(jobsC chan srcJob, errC chan<- *typedErr, wg *syn
if err == nil && status != 200 { if err == nil && status != 200 {
err = fmt.Errorf("status code %d", status) err = fmt.Errorf("status code %d", status)
} }
te.url = j.URL + baseURL + "/api/json" return newError(err, errRetrieveLatestBuild, j.URL+baseURL+"/api/json")
go func(te typedErr, err error) {
errC <- wrapErr(te, err, errRetrieveLatestBuild)
}(*te, err)
return
} }
if build.Raw.Building { if build.Raw.Building {
log.Printf("D! Ignore running build on %s, build %v", sj.name, number) log.Printf("D! Ignore running build on %s, build %v", sj.name, number)
wg.Done() return nil
continue
} }
// stop if build is too old // stop if build is too old
if (j.MaxBuildAge != internal.Duration{Duration: 0}) { if (j.MaxBuildAge != internal.Duration{Duration: 0}) {
buildSecAgo := time.Now().Sub(build.GetTimestamp()).Seconds() buildAgo := time.Now().Sub(build.GetTimestamp())
if time.Now().Sub(build.GetTimestamp()).Seconds() > j.MaxBuildAge.Duration.Seconds() { if buildAgo.Seconds() > j.MaxBuildAge.Duration.Seconds() {
log.Printf("D! Job %s build %v too old (%v seconds ago), skipping to next job", sj.name, number, buildSecAgo) log.Printf("D! Job %s build %v too old (%s ago), skipping to next job", sj.name, number, buildAgo)
wg.Done() return nil
continue
} }
} }
gatherJobBuild(sj, build, acc) gatherJobBuild(sj, build, acc)
wg.Done() return nil
}
} }
type srcJob struct { type jobRequest struct {
name string name string
parents []string parents []string
layer int layer int
} }
func (sj srcJob) combined() []string { func (sj jobRequest) combined() []string {
return append(sj.parents, sj.name) return append(sj.parents, sj.name)
} }
func (sj srcJob) hierarchyName() string { func (sj jobRequest) hierarchyName() string {
return strings.Join(sj.combined(), "/") return strings.Join(sj.combined(), "/")
} }
func gatherJobBuild(sj srcJob, build *gojenkins.Build, acc telegraf.Accumulator) { func gatherJobBuild(sj jobRequest, build *gojenkins.Build, acc telegraf.Accumulator) {
tags := map[string]string{"job_name": sj.hierarchyName(), "result": build.GetResult()} tags := map[string]string{"job_name": sj.hierarchyName(), "result": build.GetResult()}
fields := make(map[string]interface{}) fields := make(map[string]interface{})
fields["duration"] = build.GetDuration() fields["duration"] = build.GetDuration()

View File

@ -1,13 +1,23 @@
package jenkins package jenkins
import ( import (
"encoding/json"
"errors" "errors"
"net/http"
"net/http/httptest"
"sort"
"strings"
"testing" "testing"
"time"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/testutil"
"github.com/kelwang/gojenkins"
) )
func TestErr(t *testing.T) { func TestErr(t *testing.T) {
tests := []struct { tests := []struct {
err *typedErr err *Error
output string output string
}{ }{
{ {
@ -15,7 +25,7 @@ func TestErr(t *testing.T) {
"", "",
}, },
{ {
&typedErr{ &Error{
reference: errConnectJenkins, reference: errConnectJenkins,
url: "http://badurl.com", url: "http://badurl.com",
err: errors.New("unknown error"), err: errors.New("unknown error"),
@ -23,20 +33,12 @@ func TestErr(t *testing.T) {
"error connect jenkins instance[http://badurl.com]: unknown error", "error connect jenkins instance[http://badurl.com]: unknown error",
}, },
{ {
wrapErr(typedErr{ newError(errors.New("2"), errEmptyMonitorData, "http://badurl.com"),
reference: errConnectJenkins,
url: "http://badurl.com",
err: errors.New("unknown error"),
}, errors.New("2"), errEmptyMonitorData),
"error empty monitor data[http://badurl.com]: 2", "error empty monitor data[http://badurl.com]: 2",
}, },
{ {
badFormatErr(typedErr{ badFormatErr("http://badurl.com", 20.12, "string", "arch"),
reference: errConnectJenkins, "error bad format[http://badurl.com]: fieldName: arch, want string, got float64",
url: "http://badurl.com",
err: errors.New("unknown error"),
}, "20", "float64", "arch"),
"error bad format[http://badurl.com]: fieldName: arch, want float64, got string",
}, },
} }
for _, test := range tests { for _, test := range tests {
@ -47,17 +49,17 @@ func TestErr(t *testing.T) {
} }
} }
func TestSrcJob(t *testing.T) { func TestJobRequest(t *testing.T) {
tests := []struct { tests := []struct {
input srcJob input jobRequest
output string output string
}{ }{
{ {
srcJob{}, jobRequest{},
"", "",
}, },
{ {
srcJob{ jobRequest{
name: "1", name: "1",
parents: []string{"3", "2"}, parents: []string{"3", "2"},
}, },
@ -90,3 +92,619 @@ func TestResultCode(t *testing.T) {
} }
} }
} }
type mockHandler struct {
// responseMap is the path to repsonse interface
// we will ouput the serialized response in json when serving http
// example '/computer/api/json': *gojenkins.
responseMap map[string]interface{}
}
func (h mockHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
o, ok := h.responseMap[r.URL.Path]
if !ok {
w.WriteHeader(http.StatusNotFound)
return
}
b, err := json.Marshal(o)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Write(b)
}
// copied the embed struct from gojenkins lib
type monitorData struct {
Hudson_NodeMonitors_ArchitectureMonitor interface{} `json:"hudson.node_monitors.ArchitectureMonitor"`
Hudson_NodeMonitors_ClockMonitor interface{} `json:"hudson.node_monitors.ClockMonitor"`
Hudson_NodeMonitors_DiskSpaceMonitor interface{} `json:"hudson.node_monitors.DiskSpaceMonitor"`
Hudson_NodeMonitors_ResponseTimeMonitor struct {
Average int64 `json:"average"`
} `json:"hudson.node_monitors.ResponseTimeMonitor"`
Hudson_NodeMonitors_SwapSpaceMonitor interface{} `json:"hudson.node_monitors.SwapSpaceMonitor"`
Hudson_NodeMonitors_TemporarySpaceMonitor interface{} `json:"hudson.node_monitors.TemporarySpaceMonitor"`
}
func TestGatherNodeData(t *testing.T) {
tests := []struct {
name string
input mockHandler
output *testutil.Accumulator
oe *Error
}{
{
name: "bad endpoint",
input: mockHandler{
responseMap: map[string]interface{}{
"/api/json": struct{}{},
"/computer/api/json": nil,
},
},
oe: &Error{
reference: errRetrieveNode,
},
},
{
name: "bad node data",
input: mockHandler{
responseMap: map[string]interface{}{
"/api/json": struct{}{},
"/computer/api/json": gojenkins.Computers{
Computers: []*gojenkins.NodeResponse{
{},
{},
{},
},
},
},
},
oe: &Error{
reference: errEmptyNodeName,
},
},
{
name: "bad empty monitor data",
input: mockHandler{
responseMap: map[string]interface{}{
"/api/json": struct{}{},
"/computer/api/json": gojenkins.Computers{
Computers: []*gojenkins.NodeResponse{
{DisplayName: "master"},
{DisplayName: "node1"},
},
},
},
},
oe: &Error{
reference: errEmptyMonitorData,
},
},
{
name: "bad monitor data format",
input: mockHandler{
responseMap: map[string]interface{}{
"/api/json": struct{}{},
"/computer/api/json": gojenkins.Computers{
Computers: []*gojenkins.NodeResponse{
{DisplayName: "master", MonitorData: monitorData{
Hudson_NodeMonitors_ArchitectureMonitor: 1,
}},
},
},
},
},
oe: &Error{
reference: errBadFormat,
},
},
{
name: "filtered nodes",
input: mockHandler{
responseMap: map[string]interface{}{
"/api/json": struct{}{},
"/computer/api/json": gojenkins.Computers{
Computers: []*gojenkins.NodeResponse{
{DisplayName: "ignore-1"},
{DisplayName: "ignore-2"},
},
},
},
},
},
{
name: "normal data collection",
input: mockHandler{
responseMap: map[string]interface{}{
"/api/json": struct{}{},
"/computer/api/json": gojenkins.Computers{
Computers: []*gojenkins.NodeResponse{
{
DisplayName: "master",
MonitorData: monitorData{
Hudson_NodeMonitors_ArchitectureMonitor: "linux",
Hudson_NodeMonitors_ResponseTimeMonitor: struct {
Average int64 `json:"average"`
}{
Average: 10032,
},
Hudson_NodeMonitors_DiskSpaceMonitor: map[string]interface{}{
"path": "/path/1",
"size": 123,
},
Hudson_NodeMonitors_TemporarySpaceMonitor: map[string]interface{}{
"path": "/path/2",
"size": 245,
},
Hudson_NodeMonitors_SwapSpaceMonitor: map[string]interface{}{
"availableSwapSpace": 212,
"totalSwapSpace": 500,
"availablePhysicalMemory": 101,
"totalPhysicalMemory": 500,
},
},
Offline: false,
},
},
},
},
},
output: &testutil.Accumulator{
Metrics: []*testutil.Metric{
{
Tags: map[string]string{
"node_name": "master",
"arch": "linux",
"status": "online",
"disk_path": "/path/1",
"temp_path": "/path/2",
},
Fields: map[string]interface{}{
"response_time": int64(10032),
"disk_available": float64(123),
"temp_available": float64(245),
"swap_available": float64(212),
"swap_total": float64(500),
"memory_available": float64(101),
"memory_total": float64(500),
},
},
},
},
},
}
for _, test := range tests {
ts := httptest.NewServer(test.input)
defer ts.Close()
j := &Jenkins{
URL: ts.URL,
ResponseTimeout: internal.Duration{Duration: time.Microsecond},
NodeExclude: []string{"ignore-1", "ignore-2"},
}
te := j.newInstance(ts.Client())
acc := new(testutil.Accumulator)
j.gatherNodesData(acc)
if err := acc.FirstError(); err != nil {
te = err.(*Error)
}
if test.oe == nil && te != nil {
t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error())
} else if test.oe != nil {
test.oe.url = ts.URL + "/computer/api/json"
if te == nil {
t.Fatalf("%s: want err: %s, got nil", test.name, test.oe.Error())
}
if test.oe.reference != te.reference {
t.Fatalf("%s: bad error msg Expected %s, got %s\n", test.name, test.oe.reference, te.reference)
}
if test.oe.url != te.url {
t.Fatalf("%s: bad error url Expected %s, got %s\n", test.name, test.oe.url, te.url)
}
}
if test.output == nil && len(acc.Metrics) > 0 {
t.Fatalf("%s: collected extra data", test.name)
} else if test.output != nil && len(test.output.Metrics) > 0 {
for k, m := range test.output.Metrics[0].Tags {
if acc.Metrics[0].Tags[k] != m {
t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", test.name, k, m, acc.Metrics[0].Tags[k])
}
}
for k, m := range test.output.Metrics[0].Fields {
if acc.Metrics[0].Fields[k] != m {
t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", test.name, k, m, m, acc.Metrics[0].Fields[k], acc.Metrics[0].Fields[k])
}
}
}
}
}
func TestNewInstance(t *testing.T) {
mh := mockHandler{
responseMap: map[string]interface{}{
"/api/json": struct{}{},
},
}
ts := httptest.NewServer(mh)
defer ts.Close()
mockClient := ts.Client()
tests := []struct {
// name of the test
name string
input *Jenkins
output *Jenkins
oe *Error
}{
{
name: "bad jenkins config",
input: &Jenkins{
URL: "http://a bad url",
ResponseTimeout: internal.Duration{Duration: time.Microsecond},
},
oe: &Error{
url: "http://a bad url",
reference: errConnectJenkins,
},
},
{
name: "has filter",
input: &Jenkins{
URL: ts.URL,
ResponseTimeout: internal.Duration{Duration: time.Microsecond},
JobExclude: []string{"job1", "job2"},
NodeExclude: []string{"node1", "node2"},
},
},
{
name: "default config",
input: &Jenkins{
URL: ts.URL,
ResponseTimeout: internal.Duration{Duration: time.Microsecond},
},
output: &Jenkins{
MaxConnections: 30,
MaxSubJobPerLayer: 10,
},
},
}
for _, test := range tests {
te := test.input.newInstance(mockClient)
if test.oe == nil && te != nil {
t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error())
} else if test.oe != nil {
if test.oe.reference != te.reference {
t.Fatalf("%s: bad error msg Expected %s, got %s\n", test.name, test.oe.reference, te.reference)
}
if test.oe.url != te.url {
t.Fatalf("%s: bad error url Expected %s, got %s\n", test.name, test.oe.url, te.url)
}
}
if test.output != nil {
if test.input.instance == nil {
t.Fatalf("%s: failed %s, jenkins instance shouldn't be nil", test.name, te.Error())
}
if test.input.MaxConnections != test.output.MaxConnections {
t.Fatalf("%s: different MaxConnections Expected %d, got %d\n", test.name, test.output.MaxConnections, test.input.MaxConnections)
}
}
}
}
func TestGatherJobs(t *testing.T) {
tests := []struct {
name string
input mockHandler
output *testutil.Accumulator
oe *Error
}{
{
name: "empty job",
input: mockHandler{},
},
{
name: "bad inner jobs",
input: mockHandler{
responseMap: map[string]interface{}{
"/api/json": &gojenkins.JobResponse{
Jobs: []gojenkins.InnerJob{
{Name: "job1"},
},
},
},
},
oe: &Error{
reference: errRetrieveInnerJobs,
url: "/job/job1/api/json",
},
},
{
name: "jobs has no build",
input: mockHandler{
responseMap: map[string]interface{}{
"/api/json": &gojenkins.JobResponse{
Jobs: []gojenkins.InnerJob{
{Name: "job1"},
},
},
"/job/job1/api/json": &gojenkins.JobResponse{},
},
},
},
{
name: "bad build info",
input: mockHandler{
responseMap: map[string]interface{}{
"/api/json": &gojenkins.JobResponse{
Jobs: []gojenkins.InnerJob{
{Name: "job1"},
},
},
"/job/job1/api/json": &gojenkins.JobResponse{
LastBuild: gojenkins.JobBuild{
Number: 1,
},
},
},
},
oe: &Error{
url: "/job/job1/1/api/json",
reference: errRetrieveLatestBuild,
},
},
{
name: "ignore building job",
input: mockHandler{
responseMap: map[string]interface{}{
"/api/json": &gojenkins.JobResponse{
Jobs: []gojenkins.InnerJob{
{Name: "job1"},
},
},
"/job/job1/api/json": &gojenkins.JobResponse{
LastBuild: gojenkins.JobBuild{
Number: 1,
},
},
"/job/job1/1/api/json": &gojenkins.BuildResponse{
Building: true,
},
},
},
},
{
name: "ignore old build",
input: mockHandler{
responseMap: map[string]interface{}{
"/api/json": &gojenkins.JobResponse{
Jobs: []gojenkins.InnerJob{
{Name: "job1"},
},
},
"/job/job1/api/json": &gojenkins.JobResponse{
LastBuild: gojenkins.JobBuild{
Number: 2,
},
},
"/job/job1/2/api/json": &gojenkins.BuildResponse{
Building: false,
Timestamp: 100,
},
},
},
},
{
name: "gather metrics",
input: mockHandler{
responseMap: map[string]interface{}{
"/api/json": &gojenkins.JobResponse{
Jobs: []gojenkins.InnerJob{
{Name: "job1"},
{Name: "job2"},
},
},
"/job/job1/api/json": &gojenkins.JobResponse{
LastBuild: gojenkins.JobBuild{
Number: 3,
},
},
"/job/job2/api/json": &gojenkins.JobResponse{
LastBuild: gojenkins.JobBuild{
Number: 1,
},
},
"/job/job1/3/api/json": &gojenkins.BuildResponse{
Building: false,
Result: "SUCCESS",
Duration: 25558,
Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000,
},
"/job/job2/1/api/json": &gojenkins.BuildResponse{
Building: false,
Result: "FAILURE",
Duration: 1558,
Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000,
},
},
},
output: &testutil.Accumulator{
Metrics: []*testutil.Metric{
{
Tags: map[string]string{
"job_name": "job1",
"result": "SUCCESS",
},
Fields: map[string]interface{}{
"duration": int64(25558),
"result_code": 0,
},
},
{
Tags: map[string]string{
"job_name": "job2",
"result": "FAILURE",
},
Fields: map[string]interface{}{
"duration": int64(1558),
"result_code": 1,
},
},
},
},
},
{
name: "gather sub jobs, jobs filter",
input: mockHandler{
responseMap: map[string]interface{}{
"/api/json": &gojenkins.JobResponse{
Jobs: []gojenkins.InnerJob{
{Name: "apps"},
{Name: "ignore-1"},
},
},
"/job/apps/api/json": &gojenkins.JobResponse{
Jobs: []gojenkins.InnerJob{
{Name: "k8s-cloud"},
{Name: "chronograf"},
{Name: "ignore-all"},
},
},
"/job/apps/job/ignore-all/api/json": &gojenkins.JobResponse{
Jobs: []gojenkins.InnerJob{
{Name: "1"},
{Name: "2"},
},
},
"/job/apps/job/chronograf/api/json": &gojenkins.JobResponse{
LastBuild: gojenkins.JobBuild{
Number: 1,
},
},
"/job/apps/job/k8s-cloud/api/json": &gojenkins.JobResponse{
Jobs: []gojenkins.InnerJob{
{Name: "PR-100"},
{Name: "PR-101"},
{Name: "PR-ignore2"},
},
},
"/job/apps/job/k8s-cloud/job/PR-100/api/json": &gojenkins.JobResponse{
LastBuild: gojenkins.JobBuild{
Number: 1,
},
},
"/job/apps/job/k8s-cloud/job/PR-101/api/json": &gojenkins.JobResponse{
LastBuild: gojenkins.JobBuild{
Number: 4,
},
},
"/job/apps/job/chronograf/1/api/json": &gojenkins.BuildResponse{
Building: false,
Result: "FAILURE",
Duration: 1558,
Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000,
},
"/job/apps/job/k8s-cloud/job/PR-101/4/api/json": &gojenkins.BuildResponse{
Building: false,
Result: "SUCCESS",
Duration: 76558,
Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000,
},
"/job/apps/job/k8s-cloud/job/PR-100/1/api/json": &gojenkins.BuildResponse{
Building: false,
Result: "SUCCESS",
Duration: 91558,
Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000,
},
},
},
output: &testutil.Accumulator{
Metrics: []*testutil.Metric{
{
Tags: map[string]string{
"job_name": "apps/chronograf",
"result": "FAILURE",
},
Fields: map[string]interface{}{
"duration": int64(1558),
"result_code": 1,
},
},
{
Tags: map[string]string{
"job_name": "apps/k8s-cloud/PR-100",
"result": "SUCCESS",
},
Fields: map[string]interface{}{
"duration": int64(91558),
"result_code": 0,
},
},
{
Tags: map[string]string{
"job_name": "apps/k8s-cloud/PR-101",
"result": "SUCCESS",
},
Fields: map[string]interface{}{
"duration": int64(76558),
"result_code": 0,
},
},
},
},
},
}
for _, test := range tests {
ts := httptest.NewServer(test.input)
defer ts.Close()
j := &Jenkins{
URL: ts.URL,
MaxBuildAge: internal.Duration{Duration: time.Hour},
ResponseTimeout: internal.Duration{Duration: time.Microsecond},
JobExclude: []string{
"ignore-1",
"apps/ignore-all/*",
"apps/k8s-cloud/PR-ignore2",
},
}
te := j.newInstance(ts.Client())
acc := new(testutil.Accumulator)
j.gatherJobs(acc)
if err := acc.FirstError(); err != nil {
te = err.(*Error)
}
if test.oe == nil && te != nil {
t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error())
} else if test.oe != nil {
test.oe.url = ts.URL + test.oe.url
if te == nil {
t.Fatalf("%s: want err: %s, got nil", test.name, test.oe.Error())
}
if test.oe.reference != te.reference {
t.Fatalf("%s: bad error msg Expected %s, got %s\n", test.name, test.oe.reference, te.reference)
}
if test.oe.url != te.url {
t.Fatalf("%s: bad error url Expected %s, got %s\n", test.name, test.oe.url, te.url)
}
}
if test.output != nil && len(test.output.Metrics) > 0 {
// sort metrics
sort.Slice(acc.Metrics, func(i, j int) bool {
return strings.Compare(acc.Metrics[i].Tags["job_name"], acc.Metrics[j].Tags["job_name"]) < 0
})
for i := range test.output.Metrics {
for k, m := range test.output.Metrics[i].Tags {
if acc.Metrics[i].Tags[k] != m {
t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", test.name, k, m, acc.Metrics[i].Tags[k])
}
}
for k, m := range test.output.Metrics[i].Fields {
if acc.Metrics[i].Fields[k] != m {
t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", test.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[0].Fields[k])
}
}
}
}
}
}