modified as commented on code review

This commit is contained in:
madz 2015-07-03 16:17:49 +08:00
parent 5ae390066f
commit cfb92e0e37
2 changed files with 46 additions and 40 deletions

View File

@ -136,11 +136,11 @@ func (ceph *CephMetrics) getCommon(acc plugins.Accumulator) {
quorumValueMap["members"] = strings.Join(quorum_name, ",") quorumValueMap["members"] = strings.Join(quorum_name, ",")
//clientIOs //clientIOs
sumOps := int64(0) var sumOps int64 = 0
sumWrs := int64(0) var sumWrs int64 = 0
for _, stat := range poolStatsList { for _, stat := range poolStatsList {
sumOps += int64(stat.ClientIoRate.OpsPerSec) sumOps += stat.ClientIoRate.OpsPerSec
sumWrs += int64(stat.ClientIoRate.WriteBytesPerSecond) / 1024 sumWrs += stat.ClientIoRate.WriteBytesPerSecond / 1024
} }
// OSD Epoch // OSD Epoch
@ -269,7 +269,7 @@ func (ceph *CephMetrics) getPg(acc plugins.Accumulator) {
} }
for poolId, osdPgMap := range poolOsdPgMap { for poolId, osdPgMap := range poolOsdPgMap {
poolPg := int64(0) var poolPg int64 = 0
for osdId, pgs := range osdPgMap { for osdId, pgs := range osdPgMap {
tags := map[string]string{"cluster": ceph.Cluster, "pool": fmt.Sprintf("%d", poolId), "osd": fmt.Sprintf("%d", osdId)} tags := map[string]string{"cluster": ceph.Cluster, "pool": fmt.Sprintf("%d", poolId), "osd": fmt.Sprintf("%d", osdId)}
poolPg += pgs poolPg += pgs
@ -346,8 +346,8 @@ func (ceph *CephMetrics) getOSDDaemon(acc plugins.Accumulator) {
tag := map[string]string{"cluster": ceph.Cluster, "osd": fmt.Sprintf("%d", osdNum)} tag := map[string]string{"cluster": ceph.Cluster, "osd": fmt.Sprintf("%d", osdNum)}
acc.Add("osd_utilization", utilized, tag) acc.Add("osd_utilization", utilized, tag)
acc.Add("osd_used", utilized, tag) acc.Add("osd_used_storage", used, tag)
acc.Add("osd_total", total, tag) acc.Add("osd_total_storage", total, tag)
} }
//OSD Commit and Apply Latency //OSD Commit and Apply Latency

View File

@ -24,46 +24,52 @@ func TestCephGenerateMetrics(t *testing.T) {
assert.Equal(t, p.Cluster, "ceph", "Same Cluster") assert.Equal(t, p.Cluster, "ceph", "Same Cluster")
intMetrics := []string{ intMetrics := []string{
"total_storage",
"used_storage",
"available_storage",
"client_io_kbs",
"client_io_ops",
"pool_used",
"pool_usedKb",
"pool_maxbytes",
"pool_objects",
"osd_epoch",
"op_in_bytes",
"op_out_bytes",
"op_r",
"op_w",
"op_w_in_bytes",
"op_rw",
"op_rw_in_bytes",
"op_rw_out_bytes",
"pg_map_count",
"pg_data_bytes", "pg_data_bytes",
"pg_data_avail", "pg_data_total_storage",
// "osd_count", "pg_data_used_storage",
// "osd_utilization", "pg_distribution",
// "total_storage", "pg_distribution_pool",
// "used", "pg_distribution_osd",
// "available", }
// "client_io_kbs",
// "client_io_ops", floatMetrics := []string{
// "pool_used", "osd_utilization",
// "pool_usedKb", "pool_utilization",
// "pool_maxbytes", "osd_used_storage",
// "pool_utilization", "osd_total_storage",
// "osd_used",
// "osd_total",
// "osd_epoch",
// "osd_latency_commit",
// "osd_latency_apply",
// "op",
// "op_in_bytes",
// "op_out_bytes",
// "op_r",
// "op_r_out_byes",
// "op_w",
// "op_w_in_bytes",
// "op_rw",
// "op_rw_in_bytes",
// "op_rw_out_bytes",
// "pool_objects",
// "pg_map_count",
"pg_data_bytes",
"pg_data_avail",
"pg_data_total",
"pg_data_used",
} }
for _, metric := range intMetrics { for _, metric := range intMetrics {
assert.True(t, acc.HasIntValue(metric)) assert.True(t, acc.HasIntValue(metric))
} }
for _, metric := range floatMetrics {
assert.True(t, acc.HasFloatValue(metric))
}
} }
func TestCephGenerateMetricsDefault(t *testing.T) { func TestCephGenerateMetricsDefault(t *testing.T) {