Fix metric creation when node is offline in jenkins input (#6627)
This commit is contained in:
parent
ce3ae58ad9
commit
bcf1bcf318
|
@ -67,6 +67,7 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API
|
||||||
- swap_available
|
- swap_available
|
||||||
- swap_total
|
- swap_total
|
||||||
- response_time
|
- response_time
|
||||||
|
- num_executors
|
||||||
|
|
||||||
- jenkins_job
|
- jenkins_job
|
||||||
- tags:
|
- tags:
|
||||||
|
|
|
@ -2,7 +2,6 @@ package jenkins
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -180,27 +179,36 @@ func (j *Jenkins) gatherNodeData(n node, acc telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
tags["arch"] = n.MonitorData.HudsonNodeMonitorsArchitectureMonitor
|
monitorData := n.MonitorData
|
||||||
|
|
||||||
|
if monitorData.HudsonNodeMonitorsArchitectureMonitor != "" {
|
||||||
|
tags["arch"] = monitorData.HudsonNodeMonitorsArchitectureMonitor
|
||||||
|
}
|
||||||
|
|
||||||
tags["status"] = "online"
|
tags["status"] = "online"
|
||||||
if n.Offline {
|
if n.Offline {
|
||||||
tags["status"] = "offline"
|
tags["status"] = "offline"
|
||||||
}
|
}
|
||||||
monitorData := n.MonitorData
|
|
||||||
if monitorData.HudsonNodeMonitorsArchitectureMonitor == "" {
|
|
||||||
return errors.New("empty monitor data, please check your permission")
|
|
||||||
}
|
|
||||||
tags["disk_path"] = monitorData.HudsonNodeMonitorsDiskSpaceMonitor.Path
|
|
||||||
tags["temp_path"] = monitorData.HudsonNodeMonitorsTemporarySpaceMonitor.Path
|
|
||||||
|
|
||||||
fields := map[string]interface{}{
|
fields := make(map[string]interface{})
|
||||||
"response_time": monitorData.HudsonNodeMonitorsResponseTimeMonitor.Average,
|
fields["num_executors"] = n.NumExecutors
|
||||||
"disk_available": monitorData.HudsonNodeMonitorsDiskSpaceMonitor.Size,
|
|
||||||
"temp_available": monitorData.HudsonNodeMonitorsTemporarySpaceMonitor.Size,
|
if monitorData.HudsonNodeMonitorsResponseTimeMonitor != nil {
|
||||||
"swap_available": monitorData.HudsonNodeMonitorsSwapSpaceMonitor.SwapAvailable,
|
fields["response_time"] = monitorData.HudsonNodeMonitorsResponseTimeMonitor.Average
|
||||||
"memory_available": monitorData.HudsonNodeMonitorsSwapSpaceMonitor.MemoryAvailable,
|
}
|
||||||
"swap_total": monitorData.HudsonNodeMonitorsSwapSpaceMonitor.SwapTotal,
|
if monitorData.HudsonNodeMonitorsDiskSpaceMonitor != nil {
|
||||||
"memory_total": monitorData.HudsonNodeMonitorsSwapSpaceMonitor.MemoryTotal,
|
tags["disk_path"] = monitorData.HudsonNodeMonitorsDiskSpaceMonitor.Path
|
||||||
|
fields["disk_available"] = monitorData.HudsonNodeMonitorsDiskSpaceMonitor.Size
|
||||||
|
}
|
||||||
|
if monitorData.HudsonNodeMonitorsTemporarySpaceMonitor != nil {
|
||||||
|
tags["temp_path"] = monitorData.HudsonNodeMonitorsTemporarySpaceMonitor.Path
|
||||||
|
fields["temp_available"] = monitorData.HudsonNodeMonitorsTemporarySpaceMonitor.Size
|
||||||
|
}
|
||||||
|
if monitorData.HudsonNodeMonitorsSwapSpaceMonitor != nil {
|
||||||
|
fields["swap_available"] = monitorData.HudsonNodeMonitorsSwapSpaceMonitor.SwapAvailable
|
||||||
|
fields["memory_available"] = monitorData.HudsonNodeMonitorsSwapSpaceMonitor.MemoryAvailable
|
||||||
|
fields["swap_total"] = monitorData.HudsonNodeMonitorsSwapSpaceMonitor.SwapTotal
|
||||||
|
fields["memory_total"] = monitorData.HudsonNodeMonitorsSwapSpaceMonitor.MemoryTotal
|
||||||
}
|
}
|
||||||
acc.AddFields(measurementNode, fields, tags)
|
acc.AddFields(measurementNode, fields, tags)
|
||||||
|
|
||||||
|
@ -329,22 +337,16 @@ type nodeResponse struct {
|
||||||
type node struct {
|
type node struct {
|
||||||
DisplayName string `json:"displayName"`
|
DisplayName string `json:"displayName"`
|
||||||
Offline bool `json:"offline"`
|
Offline bool `json:"offline"`
|
||||||
|
NumExecutors int `json:"numExecutors"`
|
||||||
MonitorData monitorData `json:"monitorData"`
|
MonitorData monitorData `json:"monitorData"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type monitorData struct {
|
type monitorData struct {
|
||||||
HudsonNodeMonitorsArchitectureMonitor string `json:"hudson.node_monitors.ArchitectureMonitor"`
|
HudsonNodeMonitorsArchitectureMonitor string `json:"hudson.node_monitors.ArchitectureMonitor"`
|
||||||
HudsonNodeMonitorsDiskSpaceMonitor nodeSpaceMonitor `json:"hudson.node_monitors.DiskSpaceMonitor"`
|
HudsonNodeMonitorsDiskSpaceMonitor *nodeSpaceMonitor `json:"hudson.node_monitors.DiskSpaceMonitor"`
|
||||||
HudsonNodeMonitorsResponseTimeMonitor struct {
|
HudsonNodeMonitorsResponseTimeMonitor *responseTimeMonitor `json:"hudson.node_monitors.ResponseTimeMonitor"`
|
||||||
Average int64 `json:"average"`
|
HudsonNodeMonitorsSwapSpaceMonitor *swapSpaceMonitor `json:"hudson.node_monitors.SwapSpaceMonitor"`
|
||||||
} `json:"hudson.node_monitors.ResponseTimeMonitor"`
|
HudsonNodeMonitorsTemporarySpaceMonitor *nodeSpaceMonitor `json:"hudson.node_monitors.TemporarySpaceMonitor"`
|
||||||
HudsonNodeMonitorsSwapSpaceMonitor struct {
|
|
||||||
SwapAvailable float64 `json:"availableSwapSpace"`
|
|
||||||
SwapTotal float64 `json:"totalSwapSpace"`
|
|
||||||
MemoryAvailable float64 `json:"availablePhysicalMemory"`
|
|
||||||
MemoryTotal float64 `json:"totalPhysicalMemory"`
|
|
||||||
} `json:"hudson.node_monitors.SwapSpaceMonitor"`
|
|
||||||
HudsonNodeMonitorsTemporarySpaceMonitor nodeSpaceMonitor `json:"hudson.node_monitors.TemporarySpaceMonitor"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type nodeSpaceMonitor struct {
|
type nodeSpaceMonitor struct {
|
||||||
|
@ -352,6 +354,17 @@ type nodeSpaceMonitor struct {
|
||||||
Size float64 `json:"size"`
|
Size float64 `json:"size"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type responseTimeMonitor struct {
|
||||||
|
Average int64 `json:"average"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type swapSpaceMonitor struct {
|
||||||
|
SwapAvailable float64 `json:"availableSwapSpace"`
|
||||||
|
SwapTotal float64 `json:"totalSwapSpace"`
|
||||||
|
MemoryAvailable float64 `json:"availablePhysicalMemory"`
|
||||||
|
MemoryTotal float64 `json:"totalPhysicalMemory"`
|
||||||
|
}
|
||||||
|
|
||||||
type jobResponse struct {
|
type jobResponse struct {
|
||||||
LastBuild jobBuild `json:"lastBuild"`
|
LastBuild jobBuild `json:"lastBuild"`
|
||||||
Jobs []innerJob `json:"jobs"`
|
Jobs []innerJob `json:"jobs"`
|
||||||
|
|
|
@ -107,7 +107,7 @@ func TestGatherNodeData(t *testing.T) {
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "bad empty monitor data",
|
name: "empty monitor data",
|
||||||
input: mockHandler{
|
input: mockHandler{
|
||||||
responseMap: map[string]interface{}{
|
responseMap: map[string]interface{}{
|
||||||
"/api/json": struct{}{},
|
"/api/json": struct{}{},
|
||||||
|
@ -119,7 +119,9 @@ func TestGatherNodeData(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
wantErr: true,
|
output: &testutil.Accumulator{
|
||||||
|
Metrics: []*testutil.Metric{},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "filtered nodes",
|
name: "filtered nodes",
|
||||||
|
@ -135,7 +137,6 @@ func TestGatherNodeData(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
{
|
{
|
||||||
name: "normal data collection",
|
name: "normal data collection",
|
||||||
input: mockHandler{
|
input: mockHandler{
|
||||||
|
@ -147,25 +148,18 @@ func TestGatherNodeData(t *testing.T) {
|
||||||
DisplayName: "master",
|
DisplayName: "master",
|
||||||
MonitorData: monitorData{
|
MonitorData: monitorData{
|
||||||
HudsonNodeMonitorsArchitectureMonitor: "linux",
|
HudsonNodeMonitorsArchitectureMonitor: "linux",
|
||||||
HudsonNodeMonitorsResponseTimeMonitor: struct {
|
HudsonNodeMonitorsResponseTimeMonitor: &responseTimeMonitor{
|
||||||
Average int64 `json:"average"`
|
|
||||||
}{
|
|
||||||
Average: 10032,
|
Average: 10032,
|
||||||
},
|
},
|
||||||
HudsonNodeMonitorsDiskSpaceMonitor: nodeSpaceMonitor{
|
HudsonNodeMonitorsDiskSpaceMonitor: &nodeSpaceMonitor{
|
||||||
Path: "/path/1",
|
Path: "/path/1",
|
||||||
Size: 123,
|
Size: 123,
|
||||||
},
|
},
|
||||||
HudsonNodeMonitorsTemporarySpaceMonitor: nodeSpaceMonitor{
|
HudsonNodeMonitorsTemporarySpaceMonitor: &nodeSpaceMonitor{
|
||||||
Path: "/path/2",
|
Path: "/path/2",
|
||||||
Size: 245,
|
Size: 245,
|
||||||
},
|
},
|
||||||
HudsonNodeMonitorsSwapSpaceMonitor: struct {
|
HudsonNodeMonitorsSwapSpaceMonitor: &swapSpaceMonitor{
|
||||||
SwapAvailable float64 `json:"availableSwapSpace"`
|
|
||||||
SwapTotal float64 `json:"totalSwapSpace"`
|
|
||||||
MemoryAvailable float64 `json:"availablePhysicalMemory"`
|
|
||||||
MemoryTotal float64 `json:"totalPhysicalMemory"`
|
|
||||||
}{
|
|
||||||
SwapAvailable: 212,
|
SwapAvailable: 212,
|
||||||
SwapTotal: 500,
|
SwapTotal: 500,
|
||||||
MemoryAvailable: 101,
|
MemoryAvailable: 101,
|
||||||
|
@ -201,8 +195,40 @@ func TestGatherNodeData(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "slave is offline",
|
||||||
|
input: mockHandler{
|
||||||
|
responseMap: map[string]interface{}{
|
||||||
|
"/api/json": struct{}{},
|
||||||
|
"/computer/api/json": nodeResponse{
|
||||||
|
Computers: []node{
|
||||||
|
{
|
||||||
|
DisplayName: "slave",
|
||||||
|
MonitorData: monitorData{},
|
||||||
|
NumExecutors: 1,
|
||||||
|
Offline: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
output: &testutil.Accumulator{
|
||||||
|
Metrics: []*testutil.Metric{
|
||||||
|
{
|
||||||
|
Tags: map[string]string{
|
||||||
|
"node_name": "slave",
|
||||||
|
"status": "offline",
|
||||||
|
},
|
||||||
|
Fields: map[string]interface{}{
|
||||||
|
"num_executors": 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
ts := httptest.NewServer(test.input)
|
ts := httptest.NewServer(test.input)
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
j := &Jenkins{
|
j := &Jenkins{
|
||||||
|
@ -237,6 +263,7 @@ func TestGatherNodeData(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -290,6 +317,7 @@ func TestInitialize(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
te := test.input.initialize(mockClient)
|
te := test.input.initialize(mockClient)
|
||||||
if !test.wantErr && te != nil {
|
if !test.wantErr && te != nil {
|
||||||
t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error())
|
t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error())
|
||||||
|
@ -304,7 +332,7 @@ func TestInitialize(t *testing.T) {
|
||||||
t.Fatalf("%s: different MaxConnections Expected %d, got %d\n", test.name, test.output.MaxConnections, test.input.MaxConnections)
|
t.Fatalf("%s: different MaxConnections Expected %d, got %d\n", test.name, test.output.MaxConnections, test.input.MaxConnections)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -572,6 +600,7 @@ func TestGatherJobs(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
ts := httptest.NewServer(test.input)
|
ts := httptest.NewServer(test.input)
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
j := &Jenkins{
|
j := &Jenkins{
|
||||||
|
@ -587,7 +616,6 @@ func TestGatherJobs(t *testing.T) {
|
||||||
}
|
}
|
||||||
te := j.initialize(&http.Client{Transport: &http.Transport{}})
|
te := j.initialize(&http.Client{Transport: &http.Transport{}})
|
||||||
acc := new(testutil.Accumulator)
|
acc := new(testutil.Accumulator)
|
||||||
acc.SetDebug(true)
|
|
||||||
j.gatherJobs(acc)
|
j.gatherJobs(acc)
|
||||||
if err := acc.FirstError(); err != nil {
|
if err := acc.FirstError(); err != nil {
|
||||||
te = err
|
te = err
|
||||||
|
@ -617,5 +645,6 @@ func TestGatherJobs(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue