From 3b0cee346c319e98b3fcd24c7f965b562c8ee58e Mon Sep 17 00:00:00 2001 From: kelwang <8237958+kelwang@users.noreply.github.com> Date: Mon, 5 Nov 2018 17:19:08 -0500 Subject: [PATCH] Add jenkins input plugin (#4289) --- Godeps | 0 plugins/inputs/all/all.go | 1 + plugins/inputs/jenkins/README.md | 96 ++++ plugins/inputs/jenkins/client.go | 156 +++++++ plugins/inputs/jenkins/jenkins.go | 443 ++++++++++++++++++ plugins/inputs/jenkins/jenkins_test.go | 615 +++++++++++++++++++++++++ 6 files changed, 1311 insertions(+) create mode 100644 Godeps create mode 100644 plugins/inputs/jenkins/README.md create mode 100644 plugins/inputs/jenkins/client.go create mode 100644 plugins/inputs/jenkins/jenkins.go create mode 100644 plugins/inputs/jenkins/jenkins_test.go diff --git a/Godeps b/Godeps new file mode 100644 index 000000000..e69de29bb diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index c64fec0a7..cfdc12ad2 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -52,6 +52,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/ipset" _ "github.com/influxdata/telegraf/plugins/inputs/iptables" _ "github.com/influxdata/telegraf/plugins/inputs/ipvs" + _ "github.com/influxdata/telegraf/plugins/inputs/jenkins" _ "github.com/influxdata/telegraf/plugins/inputs/jolokia" _ "github.com/influxdata/telegraf/plugins/inputs/jolokia2" _ "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry" diff --git a/plugins/inputs/jenkins/README.md b/plugins/inputs/jenkins/README.md new file mode 100644 index 000000000..16afcaa7c --- /dev/null +++ b/plugins/inputs/jenkins/README.md @@ -0,0 +1,96 @@ +# Jenkins Plugin + +The jenkins plugin gathers information about the nodes and jobs running in a jenkins instance. + +This plugin does not require a plugin on jenkins and it makes use of Jenkins API to retrieve all the information needed. + +### Configuration: + +```toml + ## The Jenkins URL + url = "http://my-jenkins-instance:8080" + # username = "admin" + # password = "admin" + + ## Set response_timeout + response_timeout = "5s" + + ## Optional SSL Config + # ssl_ca = /path/to/cafile + # ssl_cert = /path/to/certfile + # ssl_key = /path/to/keyfile + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false + + ## Optional Max Job Build Age filter + ## Default 1 hour, ignore builds older than max_build_age + # max_build_age = "1h" + + ## Optional Sub Job Depth filter + ## Jenkins can have unlimited layer of sub jobs + ## This config will limit the layers of pulling, default value 0 means + ## unlimited pulling until no more sub jobs + # max_subjob_depth = 0 + + ## Optional Sub Job Per Layer + ## In workflow-multibranch-plugin, each branch will be created as a sub job. + ## This config will limit to call only the lasted branches in each layer, + ## empty will use default value 10 + # max_subjob_per_layer = 10 + + ## Jobs to exclude from gathering + # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] + + ## Nodes to exclude from gathering + # node_exclude = [ "node1", "node2" ] + + ## Worker pool for jenkins plugin only + ## Empty this field will use default value 5 + # max_connections = 5 +``` + +### Metrics: + +- jenkins_node + - tags: + - arch + - disk_path + - temp_path + - node_name + - status ("online", "offline") + - fields: + - disk_available + - temp_available + - memory_available + - memory_total + - swap_available + - swap_total + - response_time + +- jenkins_job + - tags: + - name + - parents + - result + - fields: + - duration + - result_code (0 = SUCCESS, 1 = FAILURE, 2 = NOT_BUILD, 3 = UNSTABLE, 4 = ABORTED) + +### Sample Queries: + +``` +SELECT mean("memory_available") AS "mean_memory_available", mean("memory_total") AS "mean_memory_total", mean("temp_available") AS "mean_temp_available" FROM "jenkins_node" WHERE time > now() - 15m GROUP BY time(:interval:) FILL(null) +``` + +``` +SELECT mean("duration") AS "mean_duration" FROM "jenkins_job" WHERE time > now() - 24h GROUP BY time(:interval:) FILL(null) +``` + +### Example Output: + +``` +$ ./telegraf --config telegraf.conf --input-filter jenkins --test +jenkins_node,arch=Linux\ (amd64),disk_path=/var/jenkins_home,temp_path=/tmp,host=myhost,node_name=master swap_total=4294963200,memory_available=586711040,memory_total=6089498624,status=online,response_time=1000i,disk_available=152392036352,temp_available=152392036352,swap_available=3503263744 1516031535000000000 +jenkins_job,host=myhost,name=JOB1,parents=apps/br1,result=SUCCESS duration=2831i,result_code=0i 1516026630000000000 +jenkins_job,host=myhost,name=JOB2,parents=apps/br2,result=SUCCESS duration=2285i,result_code=0i 1516027230000000000 +``` \ No newline at end of file diff --git a/plugins/inputs/jenkins/client.go b/plugins/inputs/jenkins/client.go new file mode 100644 index 000000000..284b5eccf --- /dev/null +++ b/plugins/inputs/jenkins/client.go @@ -0,0 +1,156 @@ +package jenkins + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strings" +) + +type client struct { + baseURL string + httpClient *http.Client + username string + password string + sessionCookie *http.Cookie + semaphore chan struct{} +} + +func newClient(httpClient *http.Client, url, username, password string, maxConnections int) *client { + return &client{ + baseURL: url, + httpClient: httpClient, + username: username, + password: password, + semaphore: make(chan struct{}, maxConnections), + } +} + +func (c *client) init() error { + // get session cookie + req, err := http.NewRequest("GET", c.baseURL, nil) + if err != nil { + return err + } + if c.username != "" && c.password != "" { + // set auth + req.SetBasicAuth(c.username, c.password) + } + resp, err := c.httpClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + for _, cc := range resp.Cookies() { + if strings.Contains(cc.Name, "JSESSIONID") { + c.sessionCookie = cc + break + } + } + // first api fetch + if err := c.doGet(context.Background(), jobPath, new(jobResponse)); err != nil { + return err + } + return nil +} + +func (c *client) doGet(ctx context.Context, url string, v interface{}) error { + req, err := createGetRequest(c.baseURL+url, c.username, c.password, c.sessionCookie) + if err != nil { + return err + } + select { + case c.semaphore <- struct{}{}: + break + case <-ctx.Done(): + return ctx.Err() + } + resp, err := c.httpClient.Do(req.WithContext(ctx)) + if err != nil { + <-c.semaphore + return err + } + defer func() { + resp.Body.Close() + <-c.semaphore + }() + // Clear invalid token if unauthorized + if resp.StatusCode == http.StatusUnauthorized { + c.sessionCookie = nil + return APIError{ + URL: url, + StatusCode: resp.StatusCode, + Title: resp.Status, + } + } + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return APIError{ + URL: url, + StatusCode: resp.StatusCode, + Title: resp.Status, + } + } + if resp.StatusCode == http.StatusNoContent { + return APIError{ + URL: url, + StatusCode: resp.StatusCode, + Title: resp.Status, + } + } + if err = json.NewDecoder(resp.Body).Decode(v); err != nil { + return err + } + return nil +} + +type APIError struct { + URL string + StatusCode int + Title string + Description string +} + +func (e APIError) Error() string { + if e.Description != "" { + return fmt.Sprintf("[%s] %s: %s", e.URL, e.Title, e.Description) + } + return fmt.Sprintf("[%s] %s", e.URL, e.Title) +} + +func createGetRequest(url string, username, password string, sessionCookie *http.Cookie) (*http.Request, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + if sessionCookie != nil { + req.AddCookie(sessionCookie) + } else if username != "" && password != "" { + req.SetBasicAuth(username, password) + } + req.Header.Add("Accept", "application/json") + return req, nil +} + +func (c *client) getJobs(ctx context.Context, jr *jobRequest) (js *jobResponse, err error) { + js = new(jobResponse) + url := jobPath + if jr != nil { + url = jr.URL() + } + err = c.doGet(ctx, url, js) + return js, err +} + +func (c *client) getBuild(ctx context.Context, jr jobRequest, number int64) (b *buildResponse, err error) { + b = new(buildResponse) + url := jr.buildURL(number) + err = c.doGet(ctx, url, b) + return b, err +} + +func (c *client) getAllNodes(ctx context.Context) (nodeResp *nodeResponse, err error) { + nodeResp = new(nodeResponse) + err = c.doGet(ctx, nodePath, nodeResp) + return nodeResp, err +} diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go new file mode 100644 index 000000000..b052b22a9 --- /dev/null +++ b/plugins/inputs/jenkins/jenkins.go @@ -0,0 +1,443 @@ +package jenkins + +import ( + "context" + "errors" + "fmt" + "log" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Jenkins plugin gathers information about the nodes and jobs running in a jenkins instance. +type Jenkins struct { + URL string + Username string + Password string + // HTTP Timeout specified as a string - 3s, 1m, 1h + ResponseTimeout internal.Duration + + tls.ClientConfig + client *client + + MaxConnections int `toml:"max_connections"` + MaxBuildAge internal.Duration `toml:"max_build_age"` + MaxSubJobDepth int `toml:"max_subjob_depth"` + MaxSubJobPerLayer int `toml:"max_subjob_per_layer"` + JobExclude []string `toml:"job_exclude"` + jobFilter filter.Filter + + NodeExclude []string `toml:"node_exclude"` + nodeFilter filter.Filter + + semaphore chan struct{} +} + +const sampleConfig = ` + ## The Jenkins URL + url = "http://my-jenkins-instance:8080" + # username = "admin" + # password = "admin" + + ## Set response_timeout + response_timeout = "5s" + + ## Optional SSL Config + # ssl_ca = /path/to/cafile + # ssl_cert = /path/to/certfile + # ssl_key = /path/to/keyfile + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false + + ## Optional Max Job Build Age filter + ## Default 1 hour, ignore builds older than max_build_age + # max_build_age = "1h" + + ## Optional Sub Job Depth filter + ## Jenkins can have unlimited layer of sub jobs + ## This config will limit the layers of pulling, default value 0 means + ## unlimited pulling until no more sub jobs + # max_subjob_depth = 0 + + ## Optional Sub Job Per Layer + ## In workflow-multibranch-plugin, each branch will be created as a sub job. + ## This config will limit to call only the lasted branches in each layer, + ## empty will use default value 10 + # max_subjob_per_layer = 10 + + ## Jobs to exclude from gathering + # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] + + ## Nodes to exclude from gathering + # node_exclude = [ "node1", "node2" ] + + ## Worker pool for jenkins plugin only + ## Empty this field will use default value 5 + # max_connections = 5 +` + +// measurement +const ( + measurementNode = "jenkins_node" + measurementJob = "jenkins_job" +) + +// SampleConfig implements telegraf.Input interface +func (j *Jenkins) SampleConfig() string { + return sampleConfig +} + +// Description implements telegraf.Input interface +func (j *Jenkins) Description() string { + return "Read jobs and cluster metrics from Jenkins instances" +} + +// Gather implements telegraf.Input interface +func (j *Jenkins) Gather(acc telegraf.Accumulator) error { + if j.client == nil { + client, err := j.newHTTPClient() + if err != nil { + return err + } + if err = j.initialize(client); err != nil { + return err + } + } + + j.gatherNodesData(acc) + j.gatherJobs(acc) + + return nil +} + +func (j *Jenkins) newHTTPClient() (*http.Client, error) { + tlsCfg, err := j.ClientConfig.TLSConfig() + if err != nil { + return nil, fmt.Errorf("error parse jenkins config[%s]: %v", j.URL, err) + } + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + MaxIdleConns: j.MaxConnections, + }, + Timeout: j.ResponseTimeout.Duration, + }, nil +} + +// seperate the client as dependency to use httptest Client for mocking +func (j *Jenkins) initialize(client *http.Client) error { + var err error + + // init job filter + j.jobFilter, err = filter.Compile(j.JobExclude) + if err != nil { + return fmt.Errorf("error compile job filters[%s]: %v", j.URL, err) + } + + // init node filter + j.nodeFilter, err = filter.Compile(j.NodeExclude) + if err != nil { + return fmt.Errorf("error compile node filters[%s]: %v", j.URL, err) + } + + // init tcp pool with default value + if j.MaxConnections <= 0 { + j.MaxConnections = 5 + } + + // default sub jobs can be acquired + if j.MaxSubJobPerLayer <= 0 { + j.MaxSubJobPerLayer = 10 + } + + j.semaphore = make(chan struct{}, j.MaxConnections) + + j.client = newClient(client, j.URL, j.Username, j.Password, j.MaxConnections) + + return j.client.init() +} + +func (j *Jenkins) gatherNodeData(n node, acc telegraf.Accumulator) error { + + tags := map[string]string{} + if n.DisplayName == "" { + return fmt.Errorf("error empty node name") + } + + tags["node_name"] = n.DisplayName + // filter out excluded node_name + if j.nodeFilter != nil && j.nodeFilter.Match(tags["node_name"]) { + return nil + } + + tags["arch"] = n.MonitorData.HudsonNodeMonitorsArchitectureMonitor + + tags["status"] = "online" + if n.Offline { + tags["status"] = "offline" + } + monitorData := n.MonitorData + if monitorData.HudsonNodeMonitorsArchitectureMonitor == "" { + return errors.New("empty monitor data, please check your permission") + } + tags["disk_path"] = monitorData.HudsonNodeMonitorsDiskSpaceMonitor.Path + tags["temp_path"] = monitorData.HudsonNodeMonitorsTemporarySpaceMonitor.Path + + fields := map[string]interface{}{ + "response_time": monitorData.HudsonNodeMonitorsResponseTimeMonitor.Average, + "disk_available": monitorData.HudsonNodeMonitorsDiskSpaceMonitor.Size, + "temp_available": monitorData.HudsonNodeMonitorsTemporarySpaceMonitor.Size, + "swap_available": monitorData.HudsonNodeMonitorsSwapSpaceMonitor.SwapAvailable, + "memory_available": monitorData.HudsonNodeMonitorsSwapSpaceMonitor.MemoryAvailable, + "swap_total": monitorData.HudsonNodeMonitorsSwapSpaceMonitor.SwapTotal, + "memory_total": monitorData.HudsonNodeMonitorsSwapSpaceMonitor.MemoryTotal, + } + acc.AddFields(measurementNode, fields, tags) + + return nil +} + +func (j *Jenkins) gatherNodesData(acc telegraf.Accumulator) { + + nodeResp, err := j.client.getAllNodes(context.Background()) + if err != nil { + acc.AddError(err) + return + } + // get node data + for _, node := range nodeResp.Computers { + err = j.gatherNodeData(node, acc) + if err == nil { + continue + } + acc.AddError(err) + } +} + +func (j *Jenkins) gatherJobs(acc telegraf.Accumulator) { + js, err := j.client.getJobs(context.Background(), nil) + if err != nil { + acc.AddError(err) + return + } + var wg sync.WaitGroup + for _, job := range js.Jobs { + wg.Add(1) + go func(name string, wg *sync.WaitGroup, acc telegraf.Accumulator) { + defer wg.Done() + if err := j.getJobDetail(jobRequest{ + name: name, + parents: []string{}, + layer: 0, + }, acc); err != nil { + acc.AddError(err) + } + }(job.Name, &wg, acc) + } + wg.Wait() +} + +// wrap the tcp request with doGet +// block tcp request if buffered channel is full +func (j *Jenkins) doGet(tcp func() error) error { + j.semaphore <- struct{}{} + if err := tcp(); err != nil { + <-j.semaphore + return err + } + <-j.semaphore + return nil +} + +func (j *Jenkins) getJobDetail(jr jobRequest, acc telegraf.Accumulator) error { + if j.MaxSubJobDepth > 0 && jr.layer == j.MaxSubJobDepth { + return nil + } + // filter out excluded job. + if j.jobFilter != nil && j.jobFilter.Match(jr.hierarchyName()) { + return nil + } + + js, err := j.client.getJobs(context.Background(), &jr) + if err != nil { + return err + } + + var wg sync.WaitGroup + for k, ij := range js.Jobs { + if k < len(js.Jobs)-j.MaxSubJobPerLayer-1 { + continue + } + wg.Add(1) + // schedule tcp fetch for inner jobs + go func(ij innerJob, jr jobRequest, acc telegraf.Accumulator) { + defer wg.Done() + if err := j.getJobDetail(jobRequest{ + name: ij.Name, + parents: jr.combined(), + layer: jr.layer + 1, + }, acc); err != nil { + acc.AddError(err) + } + }(ij, jr, acc) + } + wg.Wait() + + // collect build info + number := js.LastBuild.Number + if number < 1 { + // no build info + return nil + } + build, err := j.client.getBuild(context.Background(), jr, number) + if err != nil { + return err + } + + if build.Building { + log.Printf("D! Ignore running build on %s, build %v", jr.name, number) + return nil + } + + // stop if build is too old + // Higher up in gatherJobs + cutoff := time.Now().Add(-1 * j.MaxBuildAge.Duration) + + // Here we just test + if build.GetTimestamp().Before(cutoff) { + return nil + } + + gatherJobBuild(jr, build, acc) + return nil +} + +type nodeResponse struct { + Computers []node `json:"computer"` +} + +type node struct { + DisplayName string `json:"displayName"` + Offline bool `json:"offline"` + MonitorData monitorData `json:"monitorData"` +} + +type monitorData struct { + HudsonNodeMonitorsArchitectureMonitor string `json:"hudson.node_monitors.ArchitectureMonitor"` + HudsonNodeMonitorsDiskSpaceMonitor nodeSpaceMonitor `json:"hudson.node_monitors.DiskSpaceMonitor"` + HudsonNodeMonitorsResponseTimeMonitor struct { + Average int64 `json:"average"` + } `json:"hudson.node_monitors.ResponseTimeMonitor"` + HudsonNodeMonitorsSwapSpaceMonitor struct { + SwapAvailable float64 `json:"availableSwapSpace"` + SwapTotal float64 `json:"totalSwapSpace"` + MemoryAvailable float64 `json:"availablePhysicalMemory"` + MemoryTotal float64 `json:"totalPhysicalMemory"` + } `json:"hudson.node_monitors.SwapSpaceMonitor"` + HudsonNodeMonitorsTemporarySpaceMonitor nodeSpaceMonitor `json:"hudson.node_monitors.TemporarySpaceMonitor"` +} + +type nodeSpaceMonitor struct { + Path string `json:"path"` + Size float64 `json:"size"` +} + +type jobResponse struct { + LastBuild jobBuild `json:"lastBuild"` + Jobs []innerJob `json:"jobs"` + Name string `json:"name"` +} + +type innerJob struct { + Name string `json:"name"` + URL string `json:"url"` + Color string `json:"color"` +} + +type jobBuild struct { + Number int64 + URL string +} + +type buildResponse struct { + Building bool `json:"building"` + Duration int64 `json:"duration"` + Result string `json:"result"` + Timestamp int64 `json:"timestamp"` +} + +func (b *buildResponse) GetTimestamp() time.Time { + return time.Unix(0, int64(b.Timestamp)*int64(time.Millisecond)) +} + +const ( + nodePath = "/computer/api/json" + jobPath = "/api/json" +) + +type jobRequest struct { + name string + parents []string + layer int +} + +func (jr jobRequest) combined() []string { + return append(jr.parents, jr.name) +} + +func (jr jobRequest) URL() string { + return "/job/" + strings.Join(jr.combined(), "/job/") + jobPath +} + +func (jr jobRequest) buildURL(number int64) string { + return "/job/" + strings.Join(jr.combined(), "/job/") + "/" + strconv.Itoa(int(number)) + jobPath +} + +func (jr jobRequest) hierarchyName() string { + return strings.Join(jr.combined(), "/") +} + +func (jr jobRequest) parentsString() string { + return strings.Join(jr.parents, "/") +} + +func gatherJobBuild(jr jobRequest, b *buildResponse, acc telegraf.Accumulator) { + tags := map[string]string{"name": jr.name, "parents": jr.parentsString(), "result": b.Result} + fields := make(map[string]interface{}) + fields["duration"] = b.Duration + fields["result_code"] = mapResultCode(b.Result) + + acc.AddFields(measurementJob, fields, tags, b.GetTimestamp()) +} + +// perform status mapping +func mapResultCode(s string) int { + switch strings.ToLower(s) { + case "success": + return 0 + case "failure": + return 1 + case "not_built": + return 2 + case "unstable": + return 3 + case "aborted": + return 4 + } + return -1 +} + +func init() { + inputs.Add("jenkins", func() telegraf.Input { + return &Jenkins{} + }) +} diff --git a/plugins/inputs/jenkins/jenkins_test.go b/plugins/inputs/jenkins/jenkins_test.go new file mode 100644 index 000000000..7724fc0e3 --- /dev/null +++ b/plugins/inputs/jenkins/jenkins_test.go @@ -0,0 +1,615 @@ +package jenkins + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "sort" + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" +) + +func TestJobRequest(t *testing.T) { + tests := []struct { + input jobRequest + output string + }{ + { + jobRequest{}, + "", + }, + { + jobRequest{ + name: "1", + parents: []string{"3", "2"}, + }, + "3/2/1", + }, + } + for _, test := range tests { + output := test.input.hierarchyName() + if output != test.output { + t.Errorf("Expected %s, got %s\n", test.output, output) + } + } +} + +func TestResultCode(t *testing.T) { + tests := []struct { + input string + output int + }{ + {"SUCCESS", 0}, + {"Failure", 1}, + {"NOT_BUILT", 2}, + {"UNSTABLE", 3}, + {"ABORTED", 4}, + } + for _, test := range tests { + output := mapResultCode(test.input) + if output != test.output { + t.Errorf("Expected %d, got %d\n", test.output, output) + } + } +} + +type mockHandler struct { + // responseMap is the path to repsonse interface + // we will ouput the serialized response in json when serving http + // example '/computer/api/json': *gojenkins. + responseMap map[string]interface{} +} + +func (h mockHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + o, ok := h.responseMap[r.URL.Path] + if !ok { + w.WriteHeader(http.StatusNotFound) + return + } + + b, err := json.Marshal(o) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + if len(b) == 0 { + w.WriteHeader(http.StatusNoContent) + return + } + w.Write(b) +} + +func TestGatherNodeData(t *testing.T) { + tests := []struct { + name string + input mockHandler + output *testutil.Accumulator + wantErr bool + }{ + { + name: "bad node data", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": struct{}{}, + "/computer/api/json": nodeResponse{ + Computers: []node{ + {}, + {}, + {}, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "bad empty monitor data", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": struct{}{}, + "/computer/api/json": nodeResponse{ + Computers: []node{ + {DisplayName: "master"}, + {DisplayName: "node1"}, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "filtered nodes", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": struct{}{}, + "/computer/api/json": nodeResponse{ + Computers: []node{ + {DisplayName: "ignore-1"}, + {DisplayName: "ignore-2"}, + }, + }, + }, + }, + }, + + { + name: "normal data collection", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": struct{}{}, + "/computer/api/json": nodeResponse{ + Computers: []node{ + { + DisplayName: "master", + MonitorData: monitorData{ + HudsonNodeMonitorsArchitectureMonitor: "linux", + HudsonNodeMonitorsResponseTimeMonitor: struct { + Average int64 `json:"average"` + }{ + Average: 10032, + }, + HudsonNodeMonitorsDiskSpaceMonitor: nodeSpaceMonitor{ + Path: "/path/1", + Size: 123, + }, + HudsonNodeMonitorsTemporarySpaceMonitor: nodeSpaceMonitor{ + Path: "/path/2", + Size: 245, + }, + HudsonNodeMonitorsSwapSpaceMonitor: struct { + SwapAvailable float64 `json:"availableSwapSpace"` + SwapTotal float64 `json:"totalSwapSpace"` + MemoryAvailable float64 `json:"availablePhysicalMemory"` + MemoryTotal float64 `json:"totalPhysicalMemory"` + }{ + SwapAvailable: 212, + SwapTotal: 500, + MemoryAvailable: 101, + MemoryTotal: 500, + }, + }, + Offline: false, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Tags: map[string]string{ + "node_name": "master", + "arch": "linux", + "status": "online", + "disk_path": "/path/1", + "temp_path": "/path/2", + }, + Fields: map[string]interface{}{ + "response_time": int64(10032), + "disk_available": float64(123), + "temp_available": float64(245), + "swap_available": float64(212), + "swap_total": float64(500), + "memory_available": float64(101), + "memory_total": float64(500), + }, + }, + }, + }, + }, + } + for _, test := range tests { + ts := httptest.NewServer(test.input) + defer ts.Close() + j := &Jenkins{ + URL: ts.URL, + ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + NodeExclude: []string{"ignore-1", "ignore-2"}, + } + te := j.initialize(&http.Client{Transport: &http.Transport{}}) + acc := new(testutil.Accumulator) + j.gatherNodesData(acc) + if err := acc.FirstError(); err != nil { + te = err + } + + if !test.wantErr && te != nil { + t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error()) + } else if test.wantErr && te == nil { + t.Fatalf("%s: expected err, got nil", test.name) + } + if test.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", test.name) + } else if test.output != nil && len(test.output.Metrics) > 0 { + for k, m := range test.output.Metrics[0].Tags { + if acc.Metrics[0].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", test.name, k, m, acc.Metrics[0].Tags[k]) + } + } + for k, m := range test.output.Metrics[0].Fields { + if acc.Metrics[0].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", test.name, k, m, m, acc.Metrics[0].Fields[k], acc.Metrics[0].Fields[k]) + } + } + } + } +} + +func TestInitialize(t *testing.T) { + mh := mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": struct{}{}, + }, + } + ts := httptest.NewServer(mh) + defer ts.Close() + mockClient := &http.Client{Transport: &http.Transport{}} + tests := []struct { + // name of the test + name string + input *Jenkins + output *Jenkins + wantErr bool + }{ + { + name: "bad jenkins config", + input: &Jenkins{ + URL: "http://a bad url", + ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + }, + wantErr: true, + }, + { + name: "has filter", + input: &Jenkins{ + URL: ts.URL, + ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + JobExclude: []string{"job1", "job2"}, + NodeExclude: []string{"node1", "node2"}, + }, + }, + { + name: "default config", + input: &Jenkins{ + URL: ts.URL, + ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + }, + output: &Jenkins{ + MaxConnections: 5, + MaxSubJobPerLayer: 10, + }, + }, + } + for _, test := range tests { + te := test.input.initialize(mockClient) + if !test.wantErr && te != nil { + t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error()) + } else if test.wantErr && te == nil { + t.Fatalf("%s: expected err, got nil", test.name) + } + if test.output != nil { + if test.input.client == nil { + t.Fatalf("%s: failed %s, jenkins instance shouldn't be nil", test.name, te.Error()) + } + if test.input.MaxConnections != test.output.MaxConnections { + t.Fatalf("%s: different MaxConnections Expected %d, got %d\n", test.name, test.output.MaxConnections, test.input.MaxConnections) + } + } + + } +} + +func TestGatherJobs(t *testing.T) { + tests := []struct { + name string + input mockHandler + output *testutil.Accumulator + wantErr bool + }{ + { + name: "empty job", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{}, + }, + }, + }, + { + name: "bad inner jobs", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "job1"}, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "jobs has no build", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "job1"}, + }, + }, + "/job/job1/api/json": &jobResponse{}, + }, + }, + }, + { + name: "bad build info", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "job1"}, + }, + }, + "/job/job1/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 1, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "ignore building job", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "job1"}, + }, + }, + "/job/job1/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 1, + }, + }, + "/job/job1/1/api/json": &buildResponse{ + Building: true, + }, + }, + }, + }, + { + name: "ignore old build", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "job1"}, + }, + }, + "/job/job1/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 2, + }, + }, + "/job/job1/2/api/json": &buildResponse{ + Building: false, + Timestamp: 100, + }, + }, + }, + }, + { + name: "gather metrics", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "job1"}, + {Name: "job2"}, + }, + }, + "/job/job1/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 3, + }, + }, + "/job/job2/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 1, + }, + }, + "/job/job1/3/api/json": &buildResponse{ + Building: false, + Result: "SUCCESS", + Duration: 25558, + Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, + }, + "/job/job2/1/api/json": &buildResponse{ + Building: false, + Result: "FAILURE", + Duration: 1558, + Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Tags: map[string]string{ + "name": "job1", + "result": "SUCCESS", + }, + Fields: map[string]interface{}{ + "duration": int64(25558), + "result_code": 0, + }, + }, + { + Tags: map[string]string{ + "name": "job2", + "result": "FAILURE", + }, + Fields: map[string]interface{}{ + "duration": int64(1558), + "result_code": 1, + }, + }, + }, + }, + }, + { + name: "gather sub jobs, jobs filter", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "apps"}, + {Name: "ignore-1"}, + }, + }, + "/job/apps/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "k8s-cloud"}, + {Name: "chronograf"}, + {Name: "ignore-all"}, + }, + }, + "/job/apps/job/ignore-all/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "1"}, + {Name: "2"}, + }, + }, + "/job/apps/job/chronograf/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 1, + }, + }, + "/job/apps/job/k8s-cloud/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "PR-100"}, + {Name: "PR-101"}, + {Name: "PR-ignore2"}, + }, + }, + "/job/apps/job/k8s-cloud/job/PR-100/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 1, + }, + }, + "/job/apps/job/k8s-cloud/job/PR-101/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 4, + }, + }, + "/job/apps/job/chronograf/1/api/json": &buildResponse{ + Building: false, + Result: "FAILURE", + Duration: 1558, + Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, + }, + "/job/apps/job/k8s-cloud/job/PR-101/4/api/json": &buildResponse{ + Building: false, + Result: "SUCCESS", + Duration: 76558, + Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, + }, + "/job/apps/job/k8s-cloud/job/PR-100/1/api/json": &buildResponse{ + Building: false, + Result: "SUCCESS", + Duration: 91558, + Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Tags: map[string]string{ + "name": "PR-100", + "parents": "apps/k8s-cloud", + "result": "SUCCESS", + }, + Fields: map[string]interface{}{ + "duration": int64(91558), + "result_code": 0, + }, + }, + { + Tags: map[string]string{ + "name": "PR-101", + "parents": "apps/k8s-cloud", + "result": "SUCCESS", + }, + Fields: map[string]interface{}{ + "duration": int64(76558), + "result_code": 0, + }, + }, + { + Tags: map[string]string{ + "name": "chronograf", + "parents": "apps", + "result": "FAILURE", + }, + Fields: map[string]interface{}{ + "duration": int64(1558), + "result_code": 1, + }, + }, + }, + }, + }, + } + for _, test := range tests { + ts := httptest.NewServer(test.input) + defer ts.Close() + j := &Jenkins{ + URL: ts.URL, + MaxBuildAge: internal.Duration{Duration: time.Hour}, + ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + JobExclude: []string{ + "ignore-1", + "apps/ignore-all/*", + "apps/k8s-cloud/PR-ignore2", + }, + } + te := j.initialize(&http.Client{Transport: &http.Transport{}}) + acc := new(testutil.Accumulator) + acc.SetDebug(true) + j.gatherJobs(acc) + if err := acc.FirstError(); err != nil { + te = err + } + if !test.wantErr && te != nil { + t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error()) + } else if test.wantErr && te == nil { + t.Fatalf("%s: expected err, got nil", test.name) + } + + if test.output != nil && len(test.output.Metrics) > 0 { + // sort metrics + sort.Slice(acc.Metrics, func(i, j int) bool { + return strings.Compare(acc.Metrics[i].Tags["name"], acc.Metrics[j].Tags["name"]) < 0 + }) + for i := range test.output.Metrics { + for k, m := range test.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", test.name, k, m, acc.Metrics[i].Tags[k]) + } + } + for k, m := range test.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", test.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[0].Fields[k]) + } + } + } + + } + } +}