add missing redis metrics

This makes sure that all redis metrics are present without having to use a hard-coded list of what metrics to pull in.
This commit is contained in:
Patrick Hemmer 2016-08-31 01:05:11 -04:00
parent cc2b53abf4
commit 41a5ee6571
3 changed files with 128 additions and 89 deletions

View File

@ -9,6 +9,7 @@
- [#1572](https://github.com/influxdata/telegraf/pull/1572): mesos improvements. - [#1572](https://github.com/influxdata/telegraf/pull/1572): mesos improvements.
- [#1513](https://github.com/influxdata/telegraf/issues/1513): Add Ceph Cluster Performance Statistics - [#1513](https://github.com/influxdata/telegraf/issues/1513): Add Ceph Cluster Performance Statistics
- [#1650](https://github.com/influxdata/telegraf/issues/1650): Ability to configure response_timeout in httpjson input. - [#1650](https://github.com/influxdata/telegraf/issues/1650): Ability to configure response_timeout in httpjson input.
- [#1685](https://github.com/influxdata/telegraf/issues/1685): Add additional redis metrics.
### Bugfixes ### Bugfixes

View File

@ -44,40 +44,9 @@ func (r *Redis) Description() string {
} }
var Tracking = map[string]string{ var Tracking = map[string]string{
"uptime_in_seconds": "uptime", "uptime_in_seconds": "uptime",
"connected_clients": "clients", "connected_clients": "clients",
"used_memory": "used_memory", "role": "replication_role",
"used_memory_rss": "used_memory_rss",
"used_memory_peak": "used_memory_peak",
"used_memory_lua": "used_memory_lua",
"rdb_changes_since_last_save": "rdb_changes_since_last_save",
"total_connections_received": "total_connections_received",
"total_commands_processed": "total_commands_processed",
"instantaneous_ops_per_sec": "instantaneous_ops_per_sec",
"instantaneous_input_kbps": "instantaneous_input_kbps",
"instantaneous_output_kbps": "instantaneous_output_kbps",
"sync_full": "sync_full",
"sync_partial_ok": "sync_partial_ok",
"sync_partial_err": "sync_partial_err",
"expired_keys": "expired_keys",
"evicted_keys": "evicted_keys",
"keyspace_hits": "keyspace_hits",
"keyspace_misses": "keyspace_misses",
"pubsub_channels": "pubsub_channels",
"pubsub_patterns": "pubsub_patterns",
"latest_fork_usec": "latest_fork_usec",
"connected_slaves": "connected_slaves",
"master_repl_offset": "master_repl_offset",
"master_last_io_seconds_ago": "master_last_io_seconds_ago",
"repl_backlog_active": "repl_backlog_active",
"repl_backlog_size": "repl_backlog_size",
"repl_backlog_histlen": "repl_backlog_histlen",
"mem_fragmentation_ratio": "mem_fragmentation_ratio",
"used_cpu_sys": "used_cpu_sys",
"used_cpu_user": "used_cpu_user",
"used_cpu_sys_children": "used_cpu_sys_children",
"used_cpu_user_children": "used_cpu_user_children",
"role": "replication_role",
} }
var ErrProtocolError = errors.New("redis protocol error") var ErrProtocolError = errors.New("redis protocol error")
@ -188,6 +157,7 @@ func gatherInfoOutput(
acc telegraf.Accumulator, acc telegraf.Accumulator,
tags map[string]string, tags map[string]string,
) error { ) error {
var section string
var keyspace_hits, keyspace_misses uint64 = 0, 0 var keyspace_hits, keyspace_misses uint64 = 0, 0
scanner := bufio.NewScanner(rdr) scanner := bufio.NewScanner(rdr)
@ -198,7 +168,13 @@ func gatherInfoOutput(
break break
} }
if len(line) == 0 || line[0] == '#' { if len(line) == 0 {
continue
}
if line[0] == '#' {
if len(line) > 2 {
section = line[2:]
}
continue continue
} }
@ -206,42 +182,69 @@ func gatherInfoOutput(
if len(parts) < 2 { if len(parts) < 2 {
continue continue
} }
name := string(parts[0]) name := string(parts[0])
metric, ok := Tracking[name]
if !ok { if section == "Server" {
kline := strings.TrimSpace(string(parts[1])) if name != "lru_clock" && name != "uptime_in_seconds" {
gatherKeyspaceLine(name, kline, acc, tags) continue
}
}
if name == "mem_allocator" {
continue continue
} }
if strings.HasSuffix(name, "_human") {
continue
}
metric, ok := Tracking[name]
if !ok {
if section == "Keyspace" {
kline := strings.TrimSpace(string(parts[1]))
gatherKeyspaceLine(name, kline, acc, tags)
continue
}
metric = name
}
val := strings.TrimSpace(parts[1]) val := strings.TrimSpace(parts[1])
ival, err := strconv.ParseUint(val, 10, 64)
if name == "keyspace_hits" { // Try parsing as a uint
keyspace_hits = ival if ival, err := strconv.ParseUint(val, 10, 64); err == nil {
switch name {
case "keyspace_hits":
keyspace_hits = ival
case "keyspace_misses":
keyspace_misses = ival
case "rdb_last_save_time":
// influxdb can't calculate this, so we have to do it
fields["rdb_last_save_time_elapsed"] = uint64(time.Now().Unix()) - ival
}
fields[metric] = ival
continue
} }
if name == "keyspace_misses" { // Try parsing as an int
keyspace_misses = ival if ival, err := strconv.ParseInt(val, 10, 64); err == nil {
fields[metric] = ival
continue
} }
// Try parsing as a float
if fval, err := strconv.ParseFloat(val, 64); err == nil {
fields[metric] = fval
continue
}
// Treat it as a string
if name == "role" { if name == "role" {
tags["replication_role"] = val tags["replication_role"] = val
continue continue
} }
if err == nil { fields[metric] = val
fields[metric] = ival
continue
}
fval, err := strconv.ParseFloat(val, 64)
if err != nil {
return err
}
fields[metric] = fval
} }
var keyspace_hitrate float64 = 0.0 var keyspace_hitrate float64 = 0.0
if keyspace_hits != 0 || keyspace_misses != 0 { if keyspace_hits != 0 || keyspace_misses != 0 {

View File

@ -5,8 +5,10 @@ import (
"fmt" "fmt"
"strings" "strings"
"testing" "testing"
"time"
"github.com/influxdata/telegraf/testutil" "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -37,40 +39,73 @@ func TestRedis_ParseMetrics(t *testing.T) {
tags = map[string]string{"host": "redis.net", "replication_role": "master"} tags = map[string]string{"host": "redis.net", "replication_role": "master"}
fields := map[string]interface{}{ fields := map[string]interface{}{
"uptime": uint64(238), "uptime": uint64(238),
"clients": uint64(1), "lru_clock": uint64(2364819),
"used_memory": uint64(1003936), "clients": uint64(1),
"used_memory_rss": uint64(811008), "client_longest_output_list": uint64(0),
"used_memory_peak": uint64(1003936), "client_biggest_input_buf": uint64(0),
"used_memory_lua": uint64(33792), "blocked_clients": uint64(0),
"rdb_changes_since_last_save": uint64(0), "used_memory": uint64(1003936),
"total_connections_received": uint64(2), "used_memory_rss": uint64(811008),
"total_commands_processed": uint64(1), "used_memory_peak": uint64(1003936),
"instantaneous_ops_per_sec": uint64(0), "used_memory_lua": uint64(33792),
"sync_full": uint64(0), "mem_fragmentation_ratio": float64(0.81),
"sync_partial_ok": uint64(0), "loading": uint64(0),
"sync_partial_err": uint64(0), "rdb_changes_since_last_save": uint64(0),
"expired_keys": uint64(0), "rdb_bgsave_in_progress": uint64(0),
"evicted_keys": uint64(0), "rdb_last_save_time": uint64(1428427941),
"keyspace_hits": uint64(1), "rdb_last_bgsave_status": "ok",
"keyspace_misses": uint64(1), "rdb_last_bgsave_time_sec": int64(-1),
"pubsub_channels": uint64(0), "rdb_current_bgsave_time_sec": int64(-1),
"pubsub_patterns": uint64(0), "aof_enabled": uint64(0),
"latest_fork_usec": uint64(0), "aof_rewrite_in_progress": uint64(0),
"connected_slaves": uint64(0), "aof_rewrite_scheduled": uint64(0),
"master_repl_offset": uint64(0), "aof_last_rewrite_time_sec": int64(-1),
"repl_backlog_active": uint64(0), "aof_current_rewrite_time_sec": int64(-1),
"repl_backlog_size": uint64(1048576), "aof_last_bgrewrite_status": "ok",
"repl_backlog_histlen": uint64(0), "aof_last_write_status": "ok",
"mem_fragmentation_ratio": float64(0.81), "total_connections_received": uint64(2),
"instantaneous_input_kbps": float64(876.16), "total_commands_processed": uint64(1),
"instantaneous_output_kbps": float64(3010.23), "instantaneous_ops_per_sec": uint64(0),
"used_cpu_sys": float64(0.14), "instantaneous_input_kbps": float64(876.16),
"used_cpu_user": float64(0.05), "instantaneous_output_kbps": float64(3010.23),
"used_cpu_sys_children": float64(0.00), "rejected_connections": uint64(0),
"used_cpu_user_children": float64(0.00), "sync_full": uint64(0),
"keyspace_hitrate": float64(0.50), "sync_partial_ok": uint64(0),
"sync_partial_err": uint64(0),
"expired_keys": uint64(0),
"evicted_keys": uint64(0),
"keyspace_hits": uint64(1),
"keyspace_misses": uint64(1),
"pubsub_channels": uint64(0),
"pubsub_patterns": uint64(0),
"latest_fork_usec": uint64(0),
"connected_slaves": uint64(0),
"master_repl_offset": uint64(0),
"repl_backlog_active": uint64(0),
"repl_backlog_size": uint64(1048576),
"repl_backlog_first_byte_offset": uint64(0),
"repl_backlog_histlen": uint64(0),
"used_cpu_sys": float64(0.14),
"used_cpu_user": float64(0.05),
"used_cpu_sys_children": float64(0.00),
"used_cpu_user_children": float64(0.00),
"keyspace_hitrate": float64(0.50),
} }
// We have to test rdb_last_save_time_offset manually because the value is based on the time when gathered
for _, m := range acc.Metrics {
for k, v := range m.Fields {
if k == "rdb_last_save_time_elapsed" {
fields[k] = v
}
}
}
assert.InDelta(t,
uint64(time.Now().Unix())-fields["rdb_last_save_time"].(uint64),
fields["rdb_last_save_time_elapsed"].(uint64),
2) // allow for 2 seconds worth of offset
keyspaceTags := map[string]string{"host": "redis.net", "replication_role": "master", "database": "db0"} keyspaceTags := map[string]string{"host": "redis.net", "replication_role": "master", "database": "db0"}
keyspaceFields := map[string]interface{}{ keyspaceFields := map[string]interface{}{
"avg_ttl": uint64(0), "avg_ttl": uint64(0),