completed test. change int to int64.
This commit is contained in:
parent
a8f4e1d443
commit
5ae390066f
|
@ -136,11 +136,11 @@ func (ceph *CephMetrics) getCommon(acc plugins.Accumulator) {
|
||||||
quorumValueMap["members"] = strings.Join(quorum_name, ",")
|
quorumValueMap["members"] = strings.Join(quorum_name, ",")
|
||||||
|
|
||||||
//clientIOs
|
//clientIOs
|
||||||
sumOps := 0
|
sumOps := int64(0)
|
||||||
sumWrs := 0
|
sumWrs := int64(0)
|
||||||
for _, stat := range poolStatsList {
|
for _, stat := range poolStatsList {
|
||||||
sumOps += stat.ClientIoRate.OpsPerSec
|
sumOps += int64(stat.ClientIoRate.OpsPerSec)
|
||||||
sumWrs += stat.ClientIoRate.WriteBytesPerSecond / 1024
|
sumWrs += int64(stat.ClientIoRate.WriteBytesPerSecond) / 1024
|
||||||
}
|
}
|
||||||
|
|
||||||
// OSD Epoch
|
// OSD Epoch
|
||||||
|
@ -148,8 +148,8 @@ func (ceph *CephMetrics) getCommon(acc plugins.Accumulator) {
|
||||||
acc.Add("osd_epoch", epoch, map[string]string{"cluster": ceph.Cluster})
|
acc.Add("osd_epoch", epoch, map[string]string{"cluster": ceph.Cluster})
|
||||||
acc.Add("health", health.OverallStatus, tags)
|
acc.Add("health", health.OverallStatus, tags)
|
||||||
acc.Add("total_storage", cephDf.Stats.TotalBytes, tags)
|
acc.Add("total_storage", cephDf.Stats.TotalBytes, tags)
|
||||||
acc.Add("used", cephDf.Stats.TotalUsedBytes, tags)
|
acc.Add("used_storage", cephDf.Stats.TotalUsedBytes, tags)
|
||||||
acc.Add("available", cephDf.Stats.TotalAvailableBytes, tags)
|
acc.Add("available_storage", cephDf.Stats.TotalAvailableBytes, tags)
|
||||||
acc.Add("client_io_kbs", sumWrs, tags)
|
acc.Add("client_io_kbs", sumWrs, tags)
|
||||||
acc.Add("client_io_ops", sumOps, tags)
|
acc.Add("client_io_ops", sumOps, tags)
|
||||||
acc.AddValuesWithTime("monitor", monitorValueMap, tags, time.Now())
|
acc.AddValuesWithTime("monitor", monitorValueMap, tags, time.Now())
|
||||||
|
@ -241,9 +241,9 @@ func (ceph *CephMetrics) getPg(acc plugins.Accumulator) {
|
||||||
tags := map[string]string{"cluster": ceph.Cluster}
|
tags := map[string]string{"cluster": ceph.Cluster}
|
||||||
acc.Add("pg_map_count", pgMap.PgCount, tags)
|
acc.Add("pg_map_count", pgMap.PgCount, tags)
|
||||||
acc.Add("pg_data_bytes", pgMap.DataBytes, tags)
|
acc.Add("pg_data_bytes", pgMap.DataBytes, tags)
|
||||||
acc.Add("pg_data_avail", pgMap.BytesAvail, tags)
|
acc.Add("pg_data_available_storage", pgMap.BytesAvail, tags)
|
||||||
acc.Add("pg_data_total", pgMap.BytesTotal, tags)
|
acc.Add("pg_data_total_storage", pgMap.BytesTotal, tags)
|
||||||
acc.Add("pg_data_used", pgMap.BytesUsed, tags)
|
acc.Add("pg_data_used_storage", pgMap.BytesUsed, tags)
|
||||||
|
|
||||||
var pgDump PgDump
|
var pgDump PgDump
|
||||||
if err := ceph.cephCommand(&pgDump, "pg", "dump"); err != nil {
|
if err := ceph.cephCommand(&pgDump, "pg", "dump"); err != nil {
|
||||||
|
@ -251,25 +251,25 @@ func (ceph *CephMetrics) getPg(acc plugins.Accumulator) {
|
||||||
}
|
}
|
||||||
|
|
||||||
poolOsdPgMap := make(PoolOsdPgMap, len(pgDump.PoolStats))
|
poolOsdPgMap := make(PoolOsdPgMap, len(pgDump.PoolStats))
|
||||||
totalOsdPgs := make(map[int]int, len(pgDump.OsdStats))
|
totalOsdPgs := make(map[int64]int64, len(pgDump.OsdStats))
|
||||||
|
|
||||||
for _, pgStat := range pgDump.PgStats {
|
for _, pgStat := range pgDump.PgStats {
|
||||||
poolId, _ := strconv.Atoi(strings.Split(pgStat.PgId, ".")[0])
|
poolId, _ := strconv.ParseInt(strings.Split(pgStat.PgId, ".")[0], 10, 64)
|
||||||
|
|
||||||
osdPgMap := poolOsdPgMap[poolId]
|
osdPgMap := poolOsdPgMap[poolId]
|
||||||
if osdPgMap == nil {
|
if osdPgMap == nil {
|
||||||
osdPgMap = make(map[int]int, len(pgDump.OsdStats))
|
osdPgMap = make(map[int64]int64, len(pgDump.OsdStats))
|
||||||
poolOsdPgMap[poolId] = osdPgMap
|
poolOsdPgMap[poolId] = osdPgMap
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, osd := range pgStat.Up {
|
for _, osd := range pgStat.Up {
|
||||||
osdPgMap[osd] = osdPgMap[osd] + 1
|
osdPgMap[osd] = int64(osdPgMap[osd] + 1)
|
||||||
totalOsdPgs[osd] = totalOsdPgs[osd] + 1
|
totalOsdPgs[osd] = int64(totalOsdPgs[osd] + 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for poolId, osdPgMap := range poolOsdPgMap {
|
for poolId, osdPgMap := range poolOsdPgMap {
|
||||||
poolPg := 0
|
poolPg := int64(0)
|
||||||
for osdId, pgs := range osdPgMap {
|
for osdId, pgs := range osdPgMap {
|
||||||
tags := map[string]string{"cluster": ceph.Cluster, "pool": fmt.Sprintf("%d", poolId), "osd": fmt.Sprintf("%d", osdId)}
|
tags := map[string]string{"cluster": ceph.Cluster, "pool": fmt.Sprintf("%d", poolId), "osd": fmt.Sprintf("%d", osdId)}
|
||||||
poolPg += pgs
|
poolPg += pgs
|
||||||
|
@ -381,7 +381,6 @@ func (ceph *CephMetrics) getOSDPerf(acc plugins.Accumulator) {
|
||||||
args := []string{"--admin-daemon", location, "perf", "dump"}
|
args := []string{"--admin-daemon", location, "perf", "dump"}
|
||||||
|
|
||||||
if err := ceph.cephCommand(&osdPerf, args...); err != nil {
|
if err := ceph.cephCommand(&osdPerf, args...); err != nil {
|
||||||
fmt.Println("error ", err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ type QuorumStat struct {
|
||||||
LeaderName string `json:"quorum_leader_name"`
|
LeaderName string `json:"quorum_leader_name"`
|
||||||
QuorumName []string `json:"quorum_names"`
|
QuorumName []string `json:"quorum_names"`
|
||||||
MonitorMap struct {
|
MonitorMap struct {
|
||||||
Epoch int `json:"election_epoch"`
|
Epoch int64 `json:"election_epoch"`
|
||||||
Mons []struct {
|
Mons []struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Address string `json:"addr"`
|
Address string `json:"addr"`
|
||||||
|
@ -16,10 +16,10 @@ type CephHealth struct {
|
||||||
OverallStatus string `json:"overall_status"`
|
OverallStatus string `json:"overall_status"`
|
||||||
}
|
}
|
||||||
type CephStatus struct {
|
type CephStatus struct {
|
||||||
Quorum []int `json:"quorum"`
|
Quorum []int64 `json:"quorum"`
|
||||||
OSDMap struct {
|
OSDMap struct {
|
||||||
OSDMap struct {
|
OSDMap struct {
|
||||||
Epoch int `json:"epoch"`
|
Epoch int64 `json:"epoch"`
|
||||||
} `json:"osdmap"`
|
} `json:"osdmap"`
|
||||||
} `json:"osdmap"`
|
} `json:"osdmap"`
|
||||||
Health struct {
|
Health struct {
|
||||||
|
@ -28,9 +28,9 @@ type CephStatus struct {
|
||||||
PgMap struct {
|
PgMap struct {
|
||||||
PgByState []struct {
|
PgByState []struct {
|
||||||
Name string `json:"state_name"`
|
Name string `json:"state_name"`
|
||||||
Count int `json:"count"`
|
Count int64 `json:"count"`
|
||||||
} `json:"pgs_by_state"`
|
} `json:"pgs_by_state"`
|
||||||
PgCount int `json:"num_pgs"`
|
PgCount int64 `json:"num_pgs"`
|
||||||
DataBytes int64 `json:"data_bytes"`
|
DataBytes int64 `json:"data_bytes"`
|
||||||
BytesUsed int64 `json:"bytes_used"`
|
BytesUsed int64 `json:"bytes_used"`
|
||||||
BytesAvail int64 `json:"bytes_avail"`
|
BytesAvail int64 `json:"bytes_avail"`
|
||||||
|
@ -46,7 +46,7 @@ type CephDF struct {
|
||||||
} `json:"stats"`
|
} `json:"stats"`
|
||||||
Pools []struct {
|
Pools []struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Id int `json:"id"`
|
Id int64 `json:"id"`
|
||||||
Stats struct {
|
Stats struct {
|
||||||
UsedKb int64 `json:"kb_used"`
|
UsedKb int64 `json:"kb_used"`
|
||||||
UsedBytes int64 `json:"bytes_used"`
|
UsedBytes int64 `json:"bytes_used"`
|
||||||
|
@ -58,36 +58,36 @@ type CephDF struct {
|
||||||
|
|
||||||
type PoolStats struct {
|
type PoolStats struct {
|
||||||
PoolName string `json:"pool_name"`
|
PoolName string `json:"pool_name"`
|
||||||
PoolId int `json:"pool_id"`
|
PoolId int64 `json:"pool_id"`
|
||||||
ClientIoRate struct {
|
ClientIoRate struct {
|
||||||
WriteBytesPerSecond int `json:"write_bytes_sec"`
|
WriteBytesPerSecond int64 `json:"write_bytes_sec"`
|
||||||
OpsPerSec int `json:"op_per_sec"`
|
OpsPerSec int64 `json:"op_per_sec"`
|
||||||
} `json:"client_io_rate"`
|
} `json:"client_io_rate"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type PoolQuota struct {
|
type PoolQuota struct {
|
||||||
PoolName string `json:"pool_name"`
|
PoolName string `json:"pool_name"`
|
||||||
PoolId int `json:"pool_id"`
|
PoolId int64 `json:"pool_id"`
|
||||||
MaxObjects int64 `json:"quota_max_objects"`
|
MaxObjects int64 `json:"quota_max_objects"`
|
||||||
MaxBytes int64 `json:"quota_max_bytes"`
|
MaxBytes int64 `json:"quota_max_bytes"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type OsdDump struct {
|
type OsdDump struct {
|
||||||
Osds []struct {
|
Osds []struct {
|
||||||
OsdNum int `json:"osd"`
|
OsdNum int64 `json:"osd"`
|
||||||
Uuid string `json:"uuid"`
|
Uuid string `json:"uuid"`
|
||||||
Up int `json:"up"`
|
Up int64 `json:"up"`
|
||||||
In int `json:"in"`
|
In int64 `json:"in"`
|
||||||
OsdState []string `json:"state"`
|
OsdState []string `json:"state"`
|
||||||
} `json:"osds"`
|
} `json:"osds"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type OsdPerf struct {
|
type OsdPerf struct {
|
||||||
PerfInfo []struct {
|
PerfInfo []struct {
|
||||||
Id int `json:"id"`
|
Id int64 `json:"id"`
|
||||||
Stats struct {
|
Stats struct {
|
||||||
CommitLatency int `json:"commit_latency_ms"`
|
CommitLatency int64 `json:"commit_latency_ms"`
|
||||||
ApplyLatency int `json:"apply_latency_ms"`
|
ApplyLatency int64 `json:"apply_latency_ms"`
|
||||||
} `json:"perf_stats"`
|
} `json:"perf_stats"`
|
||||||
} `json:"osd_perf_infos"`
|
} `json:"osd_perf_infos"`
|
||||||
}
|
}
|
||||||
|
@ -97,18 +97,18 @@ type PgDump struct {
|
||||||
StatSum map[string]int64 `json:"stat_sum"`
|
StatSum map[string]int64 `json:"stat_sum"`
|
||||||
} `json:"pg_stats_sum"`
|
} `json:"pg_stats_sum"`
|
||||||
PoolStats []struct {
|
PoolStats []struct {
|
||||||
PoolId int `json:"poolid"`
|
PoolId int64 `json:"poolid"`
|
||||||
StatSum map[string]interface{} `json:"stat_sum"`
|
StatSum map[string]interface{} `json:"stat_sum"`
|
||||||
} `json:"pool_stats"`
|
} `json:"pool_stats"`
|
||||||
PgStats []struct {
|
PgStats []struct {
|
||||||
PgId string `json:"pgid"`
|
PgId string `json:"pgid"`
|
||||||
Up []int `json:"up"`
|
Up []int64 `json:"up"`
|
||||||
Acting []int `json:"acting"`
|
Acting []int64 `json:"acting"`
|
||||||
UpPrimary int `json:"up_primary"`
|
UpPrimary int64 `json:"up_primary"`
|
||||||
ActingPrimary int `json:"acting_primary"`
|
ActingPrimary int64 `json:"acting_primary"`
|
||||||
} `json:"pg_stats"`
|
} `json:"pg_stats"`
|
||||||
OsdStats []struct {
|
OsdStats []struct {
|
||||||
Osd int `json:"osd"`
|
Osd int64 `json:"osd"`
|
||||||
TotalKb int64 `json:"kb"`
|
TotalKb int64 `json:"kb"`
|
||||||
UsedKb int64 `json:"kb_used"`
|
UsedKb int64 `json:"kb_used"`
|
||||||
AvailableKb int64 `json:"kb_avail"`
|
AvailableKb int64 `json:"kb_avail"`
|
||||||
|
@ -117,18 +117,18 @@ type PgDump struct {
|
||||||
|
|
||||||
type OsdPerfDump struct {
|
type OsdPerfDump struct {
|
||||||
Osd struct {
|
Osd struct {
|
||||||
RecoveryOps int
|
RecoveryOps int64
|
||||||
OpWip int `json:"op_wip"`
|
OpWip int64 `json:"op_wip"`
|
||||||
Op int `json:"op"`
|
Op int64 `json:"op"`
|
||||||
OpInBytes int `json:"op_in_bytes"`
|
OpInBytes int64 `json:"op_in_bytes"`
|
||||||
OpOutBytes int `json:"op_out_bytes"`
|
OpOutBytes int64 `json:"op_out_bytes"`
|
||||||
OpRead int `json:"op_r"`
|
OpRead int64 `json:"op_r"`
|
||||||
OpReadOutBytes int `json:"op_r_out_bytes"`
|
OpReadOutBytes int64 `json:"op_r_out_bytes"`
|
||||||
OpWrite int `json:"op_w"`
|
OpWrite int64 `json:"op_w"`
|
||||||
OpWriteInBytes int `json:"op_w_in_bytes"`
|
OpWriteInBytes int64 `json:"op_w_in_bytes"`
|
||||||
OpReadWrite int `json:"op_rw"`
|
OpReadWrite int64 `json:"op_rw"`
|
||||||
OpReadWriteInBytes int `json:"op_rw_in_btyes"`
|
OpReadWriteInBytes int64 `json:"op_rw_in_btyes"`
|
||||||
OpReadWriteOutBytes int `json:"op_rw_out_bytes"`
|
OpReadWriteOutBytes int64 `json:"op_rw_out_bytes"`
|
||||||
|
|
||||||
OpLatency struct {
|
OpLatency struct {
|
||||||
OSDLatencyCalc OSDLatency
|
OSDLatencyCalc OSDLatency
|
||||||
|
@ -164,8 +164,8 @@ type OsdPerfDump struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type OSDLatency struct {
|
type OSDLatency struct {
|
||||||
AvgCount int `json:"avgcount"`
|
AvgCount int64 `json:"avgcount"`
|
||||||
Sum float64 `json:"sum"`
|
Sum float64 `json:"sum"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type PoolOsdPgMap map[int]map[int]int
|
type PoolOsdPgMap map[int64]map[int64]int64
|
||||||
|
|
|
@ -23,44 +23,42 @@ func TestCephGenerateMetrics(t *testing.T) {
|
||||||
assert.NotNil(t, sample)
|
assert.NotNil(t, sample)
|
||||||
assert.Equal(t, p.Cluster, "ceph", "Same Cluster")
|
assert.Equal(t, p.Cluster, "ceph", "Same Cluster")
|
||||||
|
|
||||||
intMetrics := []string{"pg_map_count"}
|
intMetrics := []string{
|
||||||
// "pg_data_avail",
|
"pg_data_bytes",
|
||||||
// "osd_count",
|
"pg_data_avail",
|
||||||
// "osd_utilization",
|
// "osd_count",
|
||||||
// "total_storage",
|
// "osd_utilization",
|
||||||
// "used",
|
// "total_storage",
|
||||||
// "available",
|
// "used",
|
||||||
// "client_io_kbs",
|
// "available",
|
||||||
// "client_io_ops",
|
// "client_io_kbs",
|
||||||
// "pool_used",
|
// "client_io_ops",
|
||||||
// "pool_usedKb",
|
// "pool_used",
|
||||||
// "pool_maxbytes",
|
// "pool_usedKb",
|
||||||
// "pool_utilization",
|
// "pool_maxbytes",
|
||||||
// "osd_used",
|
// "pool_utilization",
|
||||||
// "osd_total",
|
// "osd_used",
|
||||||
// "osd_epoch",
|
// "osd_total",
|
||||||
// "osd_latency_commit",
|
// "osd_epoch",
|
||||||
// "osd_latency_apply",
|
// "osd_latency_commit",
|
||||||
// "op",
|
// "osd_latency_apply",
|
||||||
// "op_in_bytes",
|
// "op",
|
||||||
// "op_out_bytes",
|
// "op_in_bytes",
|
||||||
// "op_r",
|
// "op_out_bytes",
|
||||||
// "op_r_out_byes",
|
// "op_r",
|
||||||
// "op_w",
|
// "op_r_out_byes",
|
||||||
// "op_w_in_bytes",
|
// "op_w",
|
||||||
// "op_rw",
|
// "op_w_in_bytes",
|
||||||
// "op_rw_in_bytes",
|
// "op_rw",
|
||||||
// "op_rw_out_bytes",
|
// "op_rw_in_bytes",
|
||||||
// "pool_objects",
|
// "op_rw_out_bytes",
|
||||||
// "pg_map_count",
|
// "pool_objects",
|
||||||
// "pg_data_bytes",
|
// "pg_map_count",
|
||||||
// "pg_data_avail",
|
"pg_data_bytes",
|
||||||
// "pg_data_total",
|
"pg_data_avail",
|
||||||
// "pg_data_used",
|
"pg_data_total",
|
||||||
// "pg_distribution",
|
"pg_data_used",
|
||||||
// "pg_distribution_pool",
|
}
|
||||||
// "pg_distribution_osd",
|
|
||||||
// }
|
|
||||||
|
|
||||||
for _, metric := range intMetrics {
|
for _, metric := range intMetrics {
|
||||||
assert.True(t, acc.HasIntValue(metric))
|
assert.True(t, acc.HasIntValue(metric))
|
||||||
|
@ -77,8 +75,8 @@ func TestCephGenerateMetricsDefault(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.True(t, len(acc.Points) > 0)
|
assert.True(t, len(acc.Points) > 0)
|
||||||
|
|
||||||
// point, ok := acc.Get("ceph_op_wip")
|
point, ok := acc.Get("op_wip")
|
||||||
// require.True(t, ok)
|
require.True(t, ok)
|
||||||
// assert.Equal(t, "ceph", point.Tags["cluster"])
|
assert.Equal(t, "ceph", point.Tags["cluster"])
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue