diff --git a/plugins/ceph/ceph.go b/plugins/ceph/ceph.go index e8df1ad46..197756a8f 100644 --- a/plugins/ceph/ceph.go +++ b/plugins/ceph/ceph.go @@ -136,11 +136,11 @@ func (ceph *CephMetrics) getCommon(acc plugins.Accumulator) { quorumValueMap["members"] = strings.Join(quorum_name, ",") //clientIOs - sumOps := 0 - sumWrs := 0 + sumOps := int64(0) + sumWrs := int64(0) for _, stat := range poolStatsList { - sumOps += stat.ClientIoRate.OpsPerSec - sumWrs += stat.ClientIoRate.WriteBytesPerSecond / 1024 + sumOps += int64(stat.ClientIoRate.OpsPerSec) + sumWrs += int64(stat.ClientIoRate.WriteBytesPerSecond) / 1024 } // OSD Epoch @@ -148,8 +148,8 @@ func (ceph *CephMetrics) getCommon(acc plugins.Accumulator) { acc.Add("osd_epoch", epoch, map[string]string{"cluster": ceph.Cluster}) acc.Add("health", health.OverallStatus, tags) acc.Add("total_storage", cephDf.Stats.TotalBytes, tags) - acc.Add("used", cephDf.Stats.TotalUsedBytes, tags) - acc.Add("available", cephDf.Stats.TotalAvailableBytes, tags) + acc.Add("used_storage", cephDf.Stats.TotalUsedBytes, tags) + acc.Add("available_storage", cephDf.Stats.TotalAvailableBytes, tags) acc.Add("client_io_kbs", sumWrs, tags) acc.Add("client_io_ops", sumOps, tags) acc.AddValuesWithTime("monitor", monitorValueMap, tags, time.Now()) @@ -241,9 +241,9 @@ func (ceph *CephMetrics) getPg(acc plugins.Accumulator) { tags := map[string]string{"cluster": ceph.Cluster} acc.Add("pg_map_count", pgMap.PgCount, tags) acc.Add("pg_data_bytes", pgMap.DataBytes, tags) - acc.Add("pg_data_avail", pgMap.BytesAvail, tags) - acc.Add("pg_data_total", pgMap.BytesTotal, tags) - acc.Add("pg_data_used", pgMap.BytesUsed, tags) + acc.Add("pg_data_available_storage", pgMap.BytesAvail, tags) + acc.Add("pg_data_total_storage", pgMap.BytesTotal, tags) + acc.Add("pg_data_used_storage", pgMap.BytesUsed, tags) var pgDump PgDump if err := ceph.cephCommand(&pgDump, "pg", "dump"); err != nil { @@ -251,25 +251,25 @@ func (ceph *CephMetrics) getPg(acc plugins.Accumulator) { } poolOsdPgMap := make(PoolOsdPgMap, len(pgDump.PoolStats)) - totalOsdPgs := make(map[int]int, len(pgDump.OsdStats)) + totalOsdPgs := make(map[int64]int64, len(pgDump.OsdStats)) for _, pgStat := range pgDump.PgStats { - poolId, _ := strconv.Atoi(strings.Split(pgStat.PgId, ".")[0]) + poolId, _ := strconv.ParseInt(strings.Split(pgStat.PgId, ".")[0], 10, 64) osdPgMap := poolOsdPgMap[poolId] if osdPgMap == nil { - osdPgMap = make(map[int]int, len(pgDump.OsdStats)) + osdPgMap = make(map[int64]int64, len(pgDump.OsdStats)) poolOsdPgMap[poolId] = osdPgMap } for _, osd := range pgStat.Up { - osdPgMap[osd] = osdPgMap[osd] + 1 - totalOsdPgs[osd] = totalOsdPgs[osd] + 1 + osdPgMap[osd] = int64(osdPgMap[osd] + 1) + totalOsdPgs[osd] = int64(totalOsdPgs[osd] + 1) } } for poolId, osdPgMap := range poolOsdPgMap { - poolPg := 0 + poolPg := int64(0) for osdId, pgs := range osdPgMap { tags := map[string]string{"cluster": ceph.Cluster, "pool": fmt.Sprintf("%d", poolId), "osd": fmt.Sprintf("%d", osdId)} poolPg += pgs @@ -381,7 +381,6 @@ func (ceph *CephMetrics) getOSDPerf(acc plugins.Accumulator) { args := []string{"--admin-daemon", location, "perf", "dump"} if err := ceph.cephCommand(&osdPerf, args...); err != nil { - fmt.Println("error ", err) return } diff --git a/plugins/ceph/ceph_data.go b/plugins/ceph/ceph_data.go index f89b2d924..c89575bb4 100644 --- a/plugins/ceph/ceph_data.go +++ b/plugins/ceph/ceph_data.go @@ -4,7 +4,7 @@ type QuorumStat struct { LeaderName string `json:"quorum_leader_name"` QuorumName []string `json:"quorum_names"` MonitorMap struct { - Epoch int `json:"election_epoch"` + Epoch int64 `json:"election_epoch"` Mons []struct { Name string `json:"name"` Address string `json:"addr"` @@ -16,10 +16,10 @@ type CephHealth struct { OverallStatus string `json:"overall_status"` } type CephStatus struct { - Quorum []int `json:"quorum"` + Quorum []int64 `json:"quorum"` OSDMap struct { OSDMap struct { - Epoch int `json:"epoch"` + Epoch int64 `json:"epoch"` } `json:"osdmap"` } `json:"osdmap"` Health struct { @@ -28,9 +28,9 @@ type CephStatus struct { PgMap struct { PgByState []struct { Name string `json:"state_name"` - Count int `json:"count"` + Count int64 `json:"count"` } `json:"pgs_by_state"` - PgCount int `json:"num_pgs"` + PgCount int64 `json:"num_pgs"` DataBytes int64 `json:"data_bytes"` BytesUsed int64 `json:"bytes_used"` BytesAvail int64 `json:"bytes_avail"` @@ -46,7 +46,7 @@ type CephDF struct { } `json:"stats"` Pools []struct { Name string `json:"name"` - Id int `json:"id"` + Id int64 `json:"id"` Stats struct { UsedKb int64 `json:"kb_used"` UsedBytes int64 `json:"bytes_used"` @@ -58,36 +58,36 @@ type CephDF struct { type PoolStats struct { PoolName string `json:"pool_name"` - PoolId int `json:"pool_id"` + PoolId int64 `json:"pool_id"` ClientIoRate struct { - WriteBytesPerSecond int `json:"write_bytes_sec"` - OpsPerSec int `json:"op_per_sec"` + WriteBytesPerSecond int64 `json:"write_bytes_sec"` + OpsPerSec int64 `json:"op_per_sec"` } `json:"client_io_rate"` } type PoolQuota struct { PoolName string `json:"pool_name"` - PoolId int `json:"pool_id"` + PoolId int64 `json:"pool_id"` MaxObjects int64 `json:"quota_max_objects"` MaxBytes int64 `json:"quota_max_bytes"` } type OsdDump struct { Osds []struct { - OsdNum int `json:"osd"` + OsdNum int64 `json:"osd"` Uuid string `json:"uuid"` - Up int `json:"up"` - In int `json:"in"` + Up int64 `json:"up"` + In int64 `json:"in"` OsdState []string `json:"state"` } `json:"osds"` } type OsdPerf struct { PerfInfo []struct { - Id int `json:"id"` + Id int64 `json:"id"` Stats struct { - CommitLatency int `json:"commit_latency_ms"` - ApplyLatency int `json:"apply_latency_ms"` + CommitLatency int64 `json:"commit_latency_ms"` + ApplyLatency int64 `json:"apply_latency_ms"` } `json:"perf_stats"` } `json:"osd_perf_infos"` } @@ -97,18 +97,18 @@ type PgDump struct { StatSum map[string]int64 `json:"stat_sum"` } `json:"pg_stats_sum"` PoolStats []struct { - PoolId int `json:"poolid"` + PoolId int64 `json:"poolid"` StatSum map[string]interface{} `json:"stat_sum"` } `json:"pool_stats"` PgStats []struct { - PgId string `json:"pgid"` - Up []int `json:"up"` - Acting []int `json:"acting"` - UpPrimary int `json:"up_primary"` - ActingPrimary int `json:"acting_primary"` + PgId string `json:"pgid"` + Up []int64 `json:"up"` + Acting []int64 `json:"acting"` + UpPrimary int64 `json:"up_primary"` + ActingPrimary int64 `json:"acting_primary"` } `json:"pg_stats"` OsdStats []struct { - Osd int `json:"osd"` + Osd int64 `json:"osd"` TotalKb int64 `json:"kb"` UsedKb int64 `json:"kb_used"` AvailableKb int64 `json:"kb_avail"` @@ -117,18 +117,18 @@ type PgDump struct { type OsdPerfDump struct { Osd struct { - RecoveryOps int - OpWip int `json:"op_wip"` - Op int `json:"op"` - OpInBytes int `json:"op_in_bytes"` - OpOutBytes int `json:"op_out_bytes"` - OpRead int `json:"op_r"` - OpReadOutBytes int `json:"op_r_out_bytes"` - OpWrite int `json:"op_w"` - OpWriteInBytes int `json:"op_w_in_bytes"` - OpReadWrite int `json:"op_rw"` - OpReadWriteInBytes int `json:"op_rw_in_btyes"` - OpReadWriteOutBytes int `json:"op_rw_out_bytes"` + RecoveryOps int64 + OpWip int64 `json:"op_wip"` + Op int64 `json:"op"` + OpInBytes int64 `json:"op_in_bytes"` + OpOutBytes int64 `json:"op_out_bytes"` + OpRead int64 `json:"op_r"` + OpReadOutBytes int64 `json:"op_r_out_bytes"` + OpWrite int64 `json:"op_w"` + OpWriteInBytes int64 `json:"op_w_in_bytes"` + OpReadWrite int64 `json:"op_rw"` + OpReadWriteInBytes int64 `json:"op_rw_in_btyes"` + OpReadWriteOutBytes int64 `json:"op_rw_out_bytes"` OpLatency struct { OSDLatencyCalc OSDLatency @@ -164,8 +164,8 @@ type OsdPerfDump struct { } type OSDLatency struct { - AvgCount int `json:"avgcount"` + AvgCount int64 `json:"avgcount"` Sum float64 `json:"sum"` } -type PoolOsdPgMap map[int]map[int]int +type PoolOsdPgMap map[int64]map[int64]int64 diff --git a/plugins/ceph/ceph_test.go b/plugins/ceph/ceph_test.go index 0f27f0965..c0762cd4f 100644 --- a/plugins/ceph/ceph_test.go +++ b/plugins/ceph/ceph_test.go @@ -23,44 +23,42 @@ func TestCephGenerateMetrics(t *testing.T) { assert.NotNil(t, sample) assert.Equal(t, p.Cluster, "ceph", "Same Cluster") - intMetrics := []string{"pg_map_count"} - // "pg_data_avail", - // "osd_count", - // "osd_utilization", - // "total_storage", - // "used", - // "available", - // "client_io_kbs", - // "client_io_ops", - // "pool_used", - // "pool_usedKb", - // "pool_maxbytes", - // "pool_utilization", - // "osd_used", - // "osd_total", - // "osd_epoch", - // "osd_latency_commit", - // "osd_latency_apply", - // "op", - // "op_in_bytes", - // "op_out_bytes", - // "op_r", - // "op_r_out_byes", - // "op_w", - // "op_w_in_bytes", - // "op_rw", - // "op_rw_in_bytes", - // "op_rw_out_bytes", - // "pool_objects", - // "pg_map_count", - // "pg_data_bytes", - // "pg_data_avail", - // "pg_data_total", - // "pg_data_used", - // "pg_distribution", - // "pg_distribution_pool", - // "pg_distribution_osd", - // } + intMetrics := []string{ + "pg_data_bytes", + "pg_data_avail", + // "osd_count", + // "osd_utilization", + // "total_storage", + // "used", + // "available", + // "client_io_kbs", + // "client_io_ops", + // "pool_used", + // "pool_usedKb", + // "pool_maxbytes", + // "pool_utilization", + // "osd_used", + // "osd_total", + // "osd_epoch", + // "osd_latency_commit", + // "osd_latency_apply", + // "op", + // "op_in_bytes", + // "op_out_bytes", + // "op_r", + // "op_r_out_byes", + // "op_w", + // "op_w_in_bytes", + // "op_rw", + // "op_rw_in_bytes", + // "op_rw_out_bytes", + // "pool_objects", + // "pg_map_count", + "pg_data_bytes", + "pg_data_avail", + "pg_data_total", + "pg_data_used", + } for _, metric := range intMetrics { assert.True(t, acc.HasIntValue(metric)) @@ -77,8 +75,8 @@ func TestCephGenerateMetricsDefault(t *testing.T) { require.NoError(t, err) assert.True(t, len(acc.Points) > 0) - // point, ok := acc.Get("ceph_op_wip") - // require.True(t, ok) - // assert.Equal(t, "ceph", point.Tags["cluster"]) + point, ok := acc.Get("op_wip") + require.True(t, ok) + assert.Equal(t, "ceph", point.Tags["cluster"]) }