Enforce stat prefixing at the accumulator layer

This commit is contained in:
Evan Phoenix 2015-05-18 12:15:15 -07:00
parent 34e87e7026
commit f1e1204374
10 changed files with 439 additions and 314 deletions

View File

@ -12,9 +12,13 @@ type BatchPoints struct {
client.BatchPoints
Debug bool
Prefix string
}
func (bp *BatchPoints) Add(name string, val interface{}, tags map[string]string) {
name = bp.Prefix + name
if bp.Debug {
var tg []string

View File

@ -11,6 +11,11 @@ import (
"github.com/influxdb/tivan/plugins"
)
type runningPlugin struct {
name string
plugin plugins.Plugin
}
type Agent struct {
Interval Duration
Debug bool
@ -19,7 +24,7 @@ type Agent struct {
Config *Config
plugins []plugins.Plugin
plugins []*runningPlugin
conn *client.Client
}
@ -93,7 +98,7 @@ func (a *Agent) LoadPlugins() ([]string, error) {
return nil, err
}
a.plugins = append(a.plugins, plugin)
a.plugins = append(a.plugins, &runningPlugin{name, plugin})
names = append(names, name)
}
@ -108,7 +113,8 @@ func (a *Agent) crank() error {
acc.Debug = a.Debug
for _, plugin := range a.plugins {
err := plugin.Gather(&acc)
acc.Prefix = plugin.name + "_"
err := plugin.plugin.Gather(&acc)
if err != nil {
return err
}
@ -128,7 +134,8 @@ func (a *Agent) Test() error {
acc.Debug = true
for _, plugin := range a.plugins {
err := plugin.Gather(&acc)
acc.Prefix = plugin.name + "_"
err := plugin.plugin.Gather(&acc)
if err != nil {
return err
}

View File

@ -50,23 +50,23 @@ type mapping struct {
var mappings = []*mapping{
{
onServer: "Bytes_",
inExport: "mysql_bytes_",
inExport: "bytes_",
},
{
onServer: "Com_",
inExport: "mysql_commands_",
inExport: "commands_",
},
{
onServer: "Handler_",
inExport: "mysql_handler_",
inExport: "handler_",
},
{
onServer: "Innodb_",
inExport: "mysql_innodb_",
inExport: "innodb_",
},
{
onServer: "Threads_",
inExport: "mysql_threads_",
inExport: "threads_",
},
}
@ -113,14 +113,14 @@ func (m *Mysql) gatherServer(serv *Server, acc plugins.Accumulator) error {
return err
}
acc.Add("mysql_queries", i, nil)
acc.Add("queries", i, nil)
case "Slow_queries":
i, err := strconv.ParseInt(string(val.([]byte)), 10, 64)
if err != nil {
return err
}
acc.Add("mysql_slow_queries", i, nil)
acc.Add("slow_queries", i, nil)
}
}

View File

@ -27,16 +27,16 @@ func TestMysqlGeneratesMetrics(t *testing.T) {
prefix string
count int
}{
{"mysql_commands", 141},
{"mysql_handler", 18},
{"mysql_bytes", 2},
{"mysql_innodb", 51},
{"mysql_threads", 4},
{"commands", 141},
{"handler", 18},
{"bytes", 2},
{"innodb", 51},
{"threads", 4},
}
intMetrics := []string{
"mysql_queries",
"mysql_slow_queries",
"queries",
"slow_queries",
}
for _, prefix := range prefixes {

View File

@ -100,21 +100,21 @@ func (p *Postgresql) accRow(row scanner, acc plugins.Accumulator) error {
tags := map[string]string{"db": name}
acc.Add("postgresql_xact_commit", commit, tags)
acc.Add("postgresql_xact_rollback", rollback, tags)
acc.Add("postgresql_blks_read", read, tags)
acc.Add("postgresql_blks_hit", hit, tags)
acc.Add("postgresql_tup_returned", returned, tags)
acc.Add("postgresql_tup_fetched", fetched, tags)
acc.Add("postgresql_tup_inserted", inserted, tags)
acc.Add("postgresql_tup_updated", updated, tags)
acc.Add("postgresql_tup_deleted", deleted, tags)
acc.Add("postgresql_conflicts", conflicts, tags)
acc.Add("postgresql_temp_files", temp_files, tags)
acc.Add("postgresql_temp_bytes", temp_bytes, tags)
acc.Add("postgresql_deadlocks", deadlocks, tags)
acc.Add("postgresql_blk_read_time", read_time, tags)
acc.Add("postgresql_blk_write_time", read_time, tags)
acc.Add("xact_commit", commit, tags)
acc.Add("xact_rollback", rollback, tags)
acc.Add("blks_read", read, tags)
acc.Add("blks_hit", hit, tags)
acc.Add("tup_returned", returned, tags)
acc.Add("tup_fetched", fetched, tags)
acc.Add("tup_inserted", inserted, tags)
acc.Add("tup_updated", updated, tags)
acc.Add("tup_deleted", deleted, tags)
acc.Add("conflicts", conflicts, tags)
acc.Add("temp_files", temp_files, tags)
acc.Add("temp_bytes", temp_bytes, tags)
acc.Add("deadlocks", deadlocks, tags)
acc.Add("blk_read_time", read_time, tags)
acc.Add("blk_write_time", read_time, tags)
return nil
}

View File

@ -24,24 +24,24 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) {
require.NoError(t, err)
intMetrics := []string{
"postgresql_xact_commit",
"postgresql_xact_rollback",
"postgresql_blks_read",
"postgresql_blks_hit",
"postgresql_tup_returned",
"postgresql_tup_fetched",
"postgresql_tup_inserted",
"postgresql_tup_updated",
"postgresql_tup_deleted",
"postgresql_conflicts",
"postgresql_temp_files",
"postgresql_temp_bytes",
"postgresql_deadlocks",
"xact_commit",
"xact_rollback",
"blks_read",
"blks_hit",
"tup_returned",
"tup_fetched",
"tup_inserted",
"tup_updated",
"tup_deleted",
"conflicts",
"temp_files",
"temp_bytes",
"deadlocks",
}
floatMetrics := []string{
"postgresql_blk_read_time",
"postgresql_blk_write_time",
"blk_read_time",
"blk_write_time",
}
for _, metric := range intMetrics {
@ -68,7 +68,7 @@ func TestPostgresqlTagsMetricsWithDatabaseName(t *testing.T) {
err := p.Gather(&acc)
require.NoError(t, err)
point, ok := acc.Get("postgresql_xact_commit")
point, ok := acc.Get("xact_commit")
require.True(t, ok)
assert.Equal(t, "postgres", point.Tags["db"])
@ -91,7 +91,7 @@ func TestPostgresqlDefaultsToAllDatabases(t *testing.T) {
var found bool
for _, pnt := range acc.Points {
if pnt.Name == "postgresql_xact_commit" {
if pnt.Name == "xact_commit" {
if pnt.Tags["db"] == "postgres" {
found = true
break

View File

@ -22,36 +22,36 @@ type Redis struct {
}
var Tracking = map[string]string{
"uptime_in_seconds": "redis_uptime",
"connected_clients": "redis_clients",
"used_memory": "redis_used_memory",
"used_memory_rss": "redis_used_memory_rss",
"used_memory_peak": "redis_used_memory_peak",
"used_memory_lua": "redis_used_memory_lua",
"rdb_changes_since_last_save": "redis_rdb_changes_since_last_save",
"total_connections_received": "redis_total_connections_received",
"total_commands_processed": "redis_total_commands_processed",
"instantaneous_ops_per_sec": "redis_instantaneous_ops_per_sec",
"sync_full": "redis_sync_full",
"sync_partial_ok": "redis_sync_partial_ok",
"sync_partial_err": "redis_sync_partial_err",
"expired_keys": "redis_expired_keys",
"evicted_keys": "redis_evicted_keys",
"keyspace_hits": "redis_keyspace_hits",
"keyspace_misses": "redis_keyspace_misses",
"pubsub_channels": "redis_pubsub_channels",
"pubsub_patterns": "redis_pubsub_patterns",
"latest_fork_usec": "redis_latest_fork_usec",
"connected_slaves": "redis_connected_slaves",
"master_repl_offset": "redis_master_repl_offset",
"repl_backlog_active": "redis_repl_backlog_active",
"repl_backlog_size": "redis_repl_backlog_size",
"repl_backlog_histlen": "redis_repl_backlog_histlen",
"mem_fragmentation_ratio": "redis_mem_fragmentation_ratio",
"used_cpu_sys": "redis_used_cpu_sys",
"used_cpu_user": "redis_used_cpu_user",
"used_cpu_sys_children": "redis_used_cpu_sys_children",
"used_cpu_user_children": "redis_used_cpu_user_children",
"uptime_in_seconds": "uptime",
"connected_clients": "clients",
"used_memory": "used_memory",
"used_memory_rss": "used_memory_rss",
"used_memory_peak": "used_memory_peak",
"used_memory_lua": "used_memory_lua",
"rdb_changes_since_last_save": "rdb_changes_since_last_save",
"total_connections_received": "total_connections_received",
"total_commands_processed": "total_commands_processed",
"instantaneous_ops_per_sec": "instantaneous_ops_per_sec",
"sync_full": "sync_full",
"sync_partial_ok": "sync_partial_ok",
"sync_partial_err": "sync_partial_err",
"expired_keys": "expired_keys",
"evicted_keys": "evicted_keys",
"keyspace_hits": "keyspace_hits",
"keyspace_misses": "keyspace_misses",
"pubsub_channels": "pubsub_channels",
"pubsub_patterns": "pubsub_patterns",
"latest_fork_usec": "latest_fork_usec",
"connected_slaves": "connected_slaves",
"master_repl_offset": "master_repl_offset",
"repl_backlog_active": "repl_backlog_active",
"repl_backlog_size": "repl_backlog_size",
"repl_backlog_histlen": "repl_backlog_histlen",
"mem_fragmentation_ratio": "mem_fragmentation_ratio",
"used_cpu_sys": "used_cpu_sys",
"used_cpu_user": "used_cpu_user",
"used_cpu_sys_children": "used_cpu_sys_children",
"used_cpu_user_children": "used_cpu_user_children",
}
var ErrProtocolError = errors.New("redis protocol error")

View File

@ -55,31 +55,31 @@ func TestRedisGeneratesMetrics(t *testing.T) {
name string
value uint64
}{
{"redis_uptime", 238},
{"redis_clients", 1},
{"redis_used_memory", 1003936},
{"redis_used_memory_rss", 811008},
{"redis_used_memory_peak", 1003936},
{"redis_used_memory_lua", 33792},
{"redis_rdb_changes_since_last_save", 0},
{"redis_total_connections_received", 2},
{"redis_total_commands_processed", 1},
{"redis_instantaneous_ops_per_sec", 0},
{"redis_sync_full", 0},
{"redis_sync_partial_ok", 0},
{"redis_sync_partial_err", 0},
{"redis_expired_keys", 0},
{"redis_evicted_keys", 0},
{"redis_keyspace_hits", 0},
{"redis_keyspace_misses", 0},
{"redis_pubsub_channels", 0},
{"redis_pubsub_patterns", 0},
{"redis_latest_fork_usec", 0},
{"redis_connected_slaves", 0},
{"redis_master_repl_offset", 0},
{"redis_repl_backlog_active", 0},
{"redis_repl_backlog_size", 1048576},
{"redis_repl_backlog_histlen", 0},
{"uptime", 238},
{"clients", 1},
{"used_memory", 1003936},
{"used_memory_rss", 811008},
{"used_memory_peak", 1003936},
{"used_memory_lua", 33792},
{"rdb_changes_since_last_save", 0},
{"total_connections_received", 2},
{"total_commands_processed", 1},
{"instantaneous_ops_per_sec", 0},
{"sync_full", 0},
{"sync_partial_ok", 0},
{"sync_partial_err", 0},
{"expired_keys", 0},
{"evicted_keys", 0},
{"keyspace_hits", 0},
{"keyspace_misses", 0},
{"pubsub_channels", 0},
{"pubsub_patterns", 0},
{"latest_fork_usec", 0},
{"connected_slaves", 0},
{"master_repl_offset", 0},
{"repl_backlog_active", 0},
{"repl_backlog_size", 1048576},
{"repl_backlog_histlen", 0},
}
for _, c := range checkInt {
@ -90,11 +90,11 @@ func TestRedisGeneratesMetrics(t *testing.T) {
name string
value float64
}{
{"redis_mem_fragmentation_ratio", 0.81},
{"redis_used_cpu_sys", 0.14},
{"redis_used_cpu_user", 0.05},
{"redis_used_cpu_sys_children", 0.00},
{"redis_used_cpu_user_children", 0.00},
{"mem_fragmentation_ratio", 0.81},
{"used_cpu_sys", 0.14},
{"used_cpu_user", 0.05},
{"used_cpu_sys_children", 0.00},
{"used_cpu_user_children", 0.00},
}
for _, c := range checkFloat {
@ -146,31 +146,31 @@ func TestRedisCanPullStatsFromMultipleServers(t *testing.T) {
name string
value uint64
}{
{"redis_uptime", 238},
{"redis_clients", 1},
{"redis_used_memory", 1003936},
{"redis_used_memory_rss", 811008},
{"redis_used_memory_peak", 1003936},
{"redis_used_memory_lua", 33792},
{"redis_rdb_changes_since_last_save", 0},
{"redis_total_connections_received", 2},
{"redis_total_commands_processed", 1},
{"redis_instantaneous_ops_per_sec", 0},
{"redis_sync_full", 0},
{"redis_sync_partial_ok", 0},
{"redis_sync_partial_err", 0},
{"redis_expired_keys", 0},
{"redis_evicted_keys", 0},
{"redis_keyspace_hits", 0},
{"redis_keyspace_misses", 0},
{"redis_pubsub_channels", 0},
{"redis_pubsub_patterns", 0},
{"redis_latest_fork_usec", 0},
{"redis_connected_slaves", 0},
{"redis_master_repl_offset", 0},
{"redis_repl_backlog_active", 0},
{"redis_repl_backlog_size", 1048576},
{"redis_repl_backlog_histlen", 0},
{"uptime", 238},
{"clients", 1},
{"used_memory", 1003936},
{"used_memory_rss", 811008},
{"used_memory_peak", 1003936},
{"used_memory_lua", 33792},
{"rdb_changes_since_last_save", 0},
{"total_connections_received", 2},
{"total_commands_processed", 1},
{"instantaneous_ops_per_sec", 0},
{"sync_full", 0},
{"sync_partial_ok", 0},
{"sync_partial_err", 0},
{"expired_keys", 0},
{"evicted_keys", 0},
{"keyspace_hits", 0},
{"keyspace_misses", 0},
{"pubsub_channels", 0},
{"pubsub_patterns", 0},
{"latest_fork_usec", 0},
{"connected_slaves", 0},
{"master_repl_offset", 0},
{"repl_backlog_active", 0},
{"repl_backlog_size", 1048576},
{"repl_backlog_histlen", 0},
}
for _, c := range checkInt {
@ -181,11 +181,11 @@ func TestRedisCanPullStatsFromMultipleServers(t *testing.T) {
name string
value float64
}{
{"redis_mem_fragmentation_ratio", 0.81},
{"redis_used_cpu_sys", 0.14},
{"redis_used_cpu_user", 0.05},
{"redis_used_cpu_sys_children", 0.00},
{"redis_used_cpu_user_children", 0.00},
{"mem_fragmentation_ratio", 0.81},
{"used_cpu_sys", 0.14},
{"used_cpu_user", 0.05},
{"used_cpu_sys_children", 0.00},
{"used_cpu_user_children", 0.00},
}
for _, c := range checkFloat {

View File

@ -35,6 +35,238 @@ type PS interface {
DockerStat() ([]*DockerContainerStat, error)
}
func add(acc plugins.Accumulator,
name string, val float64, tags map[string]string) {
if val >= 0 {
acc.Add(name, val, tags)
}
}
type CPUStats struct {
ps PS
}
func (s *CPUStats) Gather(acc plugins.Accumulator) error {
times, err := s.ps.CPUTimes()
if err != nil {
return fmt.Errorf("error getting CPU info: %s", err)
}
for _, cts := range times {
tags := map[string]string{
"cpu": cts.CPU,
}
add(acc, "user", cts.User, tags)
add(acc, "system", cts.System, tags)
add(acc, "idle", cts.Idle, tags)
add(acc, "nice", cts.Nice, tags)
add(acc, "iowait", cts.Iowait, tags)
add(acc, "irq", cts.Irq, tags)
add(acc, "softirq", cts.Softirq, tags)
add(acc, "steal", cts.Steal, tags)
add(acc, "guest", cts.Guest, tags)
add(acc, "guestNice", cts.GuestNice, tags)
add(acc, "stolen", cts.Stolen, tags)
}
return nil
}
type DiskStats struct {
ps PS
}
func (s *DiskStats) Gather(acc plugins.Accumulator) error {
disks, err := s.ps.DiskUsage()
if err != nil {
return fmt.Errorf("error getting disk usage info: %s", err)
}
for _, du := range disks {
tags := map[string]string{
"path": du.Path,
}
acc.Add("total", du.Total, tags)
acc.Add("free", du.Free, tags)
acc.Add("used", du.Total-du.Free, tags)
acc.Add("inodes_total", du.InodesTotal, tags)
acc.Add("inodes_free", du.InodesFree, tags)
acc.Add("inodes_used", du.InodesTotal-du.InodesFree, tags)
}
return nil
}
type DiskIOStats struct {
ps PS
}
func (s *DiskIOStats) Gather(acc plugins.Accumulator) error {
diskio, err := s.ps.DiskIO()
if err != nil {
return fmt.Errorf("error getting disk io info: %s", err)
}
for _, io := range diskio {
tags := map[string]string{
"name": io.Name,
"serial": io.SerialNumber,
}
acc.Add("reads", io.ReadCount, tags)
acc.Add("writes", io.WriteCount, tags)
acc.Add("read_bytes", io.ReadBytes, tags)
acc.Add("write_bytes", io.WriteBytes, tags)
acc.Add("read_time", io.ReadTime, tags)
acc.Add("write_time", io.WriteTime, tags)
acc.Add("io_time", io.IoTime, tags)
}
return nil
}
type NetIOStats struct {
ps PS
}
func (s *NetIOStats) Gather(acc plugins.Accumulator) error {
netio, err := s.ps.NetIO()
if err != nil {
return fmt.Errorf("error getting net io info: %s", err)
}
for _, io := range netio {
tags := map[string]string{
"interface": io.Name,
}
acc.Add("bytes_sent", io.BytesSent, tags)
acc.Add("bytes_recv", io.BytesRecv, tags)
acc.Add("packets_sent", io.PacketsSent, tags)
acc.Add("packets_recv", io.PacketsRecv, tags)
acc.Add("err_in", io.Errin, tags)
acc.Add("err_out", io.Errout, tags)
acc.Add("drop_in", io.Dropin, tags)
acc.Add("drop_out", io.Dropout, tags)
}
return nil
}
type MemStats struct {
ps PS
}
func (s *MemStats) Gather(acc plugins.Accumulator) error {
vm, err := s.ps.VMStat()
if err != nil {
return fmt.Errorf("error getting virtual memory info: %s", err)
}
vmtags := map[string]string(nil)
acc.Add("total", vm.Total, vmtags)
acc.Add("available", vm.Available, vmtags)
acc.Add("used", vm.Used, vmtags)
acc.Add("used_prec", vm.UsedPercent, vmtags)
acc.Add("free", vm.Free, vmtags)
acc.Add("active", vm.Active, vmtags)
acc.Add("inactive", vm.Inactive, vmtags)
acc.Add("buffers", vm.Buffers, vmtags)
acc.Add("cached", vm.Cached, vmtags)
acc.Add("wired", vm.Wired, vmtags)
acc.Add("shared", vm.Shared, vmtags)
return nil
}
type SwapStats struct {
ps PS
}
func (s *SwapStats) Gather(acc plugins.Accumulator) error {
swap, err := s.ps.SwapStat()
if err != nil {
return fmt.Errorf("error getting swap memory info: %s", err)
}
swaptags := map[string]string(nil)
acc.Add("total", swap.Total, swaptags)
acc.Add("used", swap.Used, swaptags)
acc.Add("free", swap.Free, swaptags)
acc.Add("used_perc", swap.UsedPercent, swaptags)
acc.Add("in", swap.Sin, swaptags)
acc.Add("out", swap.Sout, swaptags)
return nil
}
type DockerStats struct {
ps PS
}
func (s *DockerStats) Gather(acc plugins.Accumulator) error {
containers, err := s.ps.DockerStat()
if err != nil {
return fmt.Errorf("error getting docker info: %s", err)
}
for _, cont := range containers {
tags := map[string]string{
"id": cont.Id,
"name": cont.Name,
"command": cont.Command,
}
cts := cont.CPU
acc.Add("user", cts.User, tags)
acc.Add("system", cts.System, tags)
acc.Add("idle", cts.Idle, tags)
acc.Add("nice", cts.Nice, tags)
acc.Add("iowait", cts.Iowait, tags)
acc.Add("irq", cts.Irq, tags)
acc.Add("softirq", cts.Softirq, tags)
acc.Add("steal", cts.Steal, tags)
acc.Add("guest", cts.Guest, tags)
acc.Add("guestNice", cts.GuestNice, tags)
acc.Add("stolen", cts.Stolen, tags)
acc.Add("cache", cont.Mem.Cache, tags)
acc.Add("rss", cont.Mem.RSS, tags)
acc.Add("rss_huge", cont.Mem.RSSHuge, tags)
acc.Add("mapped_file", cont.Mem.MappedFile, tags)
acc.Add("swap_in", cont.Mem.Pgpgin, tags)
acc.Add("swap_out", cont.Mem.Pgpgout, tags)
acc.Add("page_fault", cont.Mem.Pgfault, tags)
acc.Add("page_major_fault", cont.Mem.Pgmajfault, tags)
acc.Add("inactive_anon", cont.Mem.InactiveAnon, tags)
acc.Add("active_anon", cont.Mem.ActiveAnon, tags)
acc.Add("inactive_file", cont.Mem.InactiveFile, tags)
acc.Add("active_file", cont.Mem.ActiveFile, tags)
acc.Add("unevictable", cont.Mem.Unevictable, tags)
acc.Add("memory_limit", cont.Mem.HierarchicalMemoryLimit, tags)
acc.Add("total_cache", cont.Mem.TotalCache, tags)
acc.Add("total_rss", cont.Mem.TotalRSS, tags)
acc.Add("total_rss_huge", cont.Mem.TotalRSSHuge, tags)
acc.Add("total_mapped_file", cont.Mem.TotalMappedFile, tags)
acc.Add("total_swap_in", cont.Mem.TotalPgpgIn, tags)
acc.Add("total_swap_out", cont.Mem.TotalPgpgOut, tags)
acc.Add("total_page_fault", cont.Mem.TotalPgFault, tags)
acc.Add("total_page_major_fault", cont.Mem.TotalPgMajFault, tags)
acc.Add("total_inactive_anon", cont.Mem.TotalInactiveAnon, tags)
acc.Add("total_active_anon", cont.Mem.TotalActiveAnon, tags)
acc.Add("total_inactive_file", cont.Mem.TotalInactiveFile, tags)
acc.Add("total_active_file", cont.Mem.TotalActiveFile, tags)
acc.Add("total_unevictable", cont.Mem.TotalUnevictable, tags)
}
return nil
}
type SystemStats struct {
ps PS
}
@ -52,178 +284,9 @@ func (s *SystemStats) Gather(acc plugins.Accumulator) error {
return err
}
acc.Add("system_load1", lv.Load1, nil)
acc.Add("system_load5", lv.Load5, nil)
acc.Add("system_load15", lv.Load15, nil)
times, err := s.ps.CPUTimes()
if err != nil {
return fmt.Errorf("error getting CPU info: %s", err)
}
for _, cts := range times {
tags := map[string]string{
"cpu": cts.CPU,
}
s.add(acc, "cpu_user", cts.User, tags)
s.add(acc, "cpu_system", cts.System, tags)
s.add(acc, "cpu_idle", cts.Idle, tags)
s.add(acc, "cpu_nice", cts.Nice, tags)
s.add(acc, "cpu_iowait", cts.Iowait, tags)
s.add(acc, "cpu_irq", cts.Irq, tags)
s.add(acc, "cpu_softirq", cts.Softirq, tags)
s.add(acc, "cpu_steal", cts.Steal, tags)
s.add(acc, "cpu_guest", cts.Guest, tags)
s.add(acc, "cpu_guestNice", cts.GuestNice, tags)
s.add(acc, "cpu_stolen", cts.Stolen, tags)
}
disks, err := s.ps.DiskUsage()
if err != nil {
return fmt.Errorf("error getting disk usage info: %s", err)
}
for _, du := range disks {
tags := map[string]string{
"path": du.Path,
}
acc.Add("disk_total", du.Total, tags)
acc.Add("disk_free", du.Free, tags)
acc.Add("disk_used", du.Total-du.Free, tags)
acc.Add("disk_inodes_total", du.InodesTotal, tags)
acc.Add("disk_inodes_free", du.InodesFree, tags)
acc.Add("disk_inodes_used", du.InodesTotal-du.InodesFree, tags)
}
diskio, err := s.ps.DiskIO()
if err != nil {
return fmt.Errorf("error getting disk io info: %s", err)
}
for _, io := range diskio {
tags := map[string]string{
"name": io.Name,
"serial": io.SerialNumber,
}
acc.Add("io_reads", io.ReadCount, tags)
acc.Add("io_writes", io.WriteCount, tags)
acc.Add("io_read_bytes", io.ReadBytes, tags)
acc.Add("io_write_bytes", io.WriteBytes, tags)
acc.Add("io_read_time", io.ReadTime, tags)
acc.Add("io_write_time", io.WriteTime, tags)
acc.Add("io_io_time", io.IoTime, tags)
}
netio, err := s.ps.NetIO()
if err != nil {
return fmt.Errorf("error getting net io info: %s", err)
}
for _, io := range netio {
tags := map[string]string{
"interface": io.Name,
}
acc.Add("net_bytes_sent", io.BytesSent, tags)
acc.Add("net_bytes_recv", io.BytesRecv, tags)
acc.Add("net_packets_sent", io.PacketsSent, tags)
acc.Add("net_packets_recv", io.PacketsRecv, tags)
acc.Add("net_err_in", io.Errin, tags)
acc.Add("net_err_out", io.Errout, tags)
acc.Add("net_drop_in", io.Dropin, tags)
acc.Add("net_drop_out", io.Dropout, tags)
}
vm, err := s.ps.VMStat()
if err != nil {
return fmt.Errorf("error getting virtual memory info: %s", err)
}
vmtags := map[string]string(nil)
acc.Add("mem_total", vm.Total, vmtags)
acc.Add("mem_available", vm.Available, vmtags)
acc.Add("mem_used", vm.Used, vmtags)
acc.Add("mem_used_prec", vm.UsedPercent, vmtags)
acc.Add("mem_free", vm.Free, vmtags)
acc.Add("mem_active", vm.Active, vmtags)
acc.Add("mem_inactive", vm.Inactive, vmtags)
acc.Add("mem_buffers", vm.Buffers, vmtags)
acc.Add("mem_cached", vm.Cached, vmtags)
acc.Add("mem_wired", vm.Wired, vmtags)
acc.Add("mem_shared", vm.Shared, vmtags)
swap, err := s.ps.SwapStat()
if err != nil {
return fmt.Errorf("error getting swap memory info: %s", err)
}
swaptags := map[string]string(nil)
acc.Add("swap_total", swap.Total, swaptags)
acc.Add("swap_used", swap.Used, swaptags)
acc.Add("swap_free", swap.Free, swaptags)
acc.Add("swap_used_perc", swap.UsedPercent, swaptags)
acc.Add("swap_in", swap.Sin, swaptags)
acc.Add("swap_out", swap.Sout, swaptags)
containers, err := s.ps.DockerStat()
if err != nil {
return fmt.Errorf("error getting docker info: %s", err)
}
for _, cont := range containers {
tags := map[string]string{
"id": cont.Id,
"name": cont.Name,
"command": cont.Command,
}
cts := cont.CPU
acc.Add("docker_user", cts.User, tags)
acc.Add("docker_system", cts.System, tags)
acc.Add("docker_idle", cts.Idle, tags)
acc.Add("docker_nice", cts.Nice, tags)
acc.Add("docker_iowait", cts.Iowait, tags)
acc.Add("docker_irq", cts.Irq, tags)
acc.Add("docker_softirq", cts.Softirq, tags)
acc.Add("docker_steal", cts.Steal, tags)
acc.Add("docker_guest", cts.Guest, tags)
acc.Add("docker_guestNice", cts.GuestNice, tags)
acc.Add("docker_stolen", cts.Stolen, tags)
acc.Add("docker_cache", cont.Mem.Cache, tags)
acc.Add("docker_rss", cont.Mem.RSS, tags)
acc.Add("docker_rss_huge", cont.Mem.RSSHuge, tags)
acc.Add("docker_mapped_file", cont.Mem.MappedFile, tags)
acc.Add("docker_swap_in", cont.Mem.Pgpgin, tags)
acc.Add("docker_swap_out", cont.Mem.Pgpgout, tags)
acc.Add("docker_page_fault", cont.Mem.Pgfault, tags)
acc.Add("docker_page_major_fault", cont.Mem.Pgmajfault, tags)
acc.Add("docker_inactive_anon", cont.Mem.InactiveAnon, tags)
acc.Add("docker_active_anon", cont.Mem.ActiveAnon, tags)
acc.Add("docker_inactive_file", cont.Mem.InactiveFile, tags)
acc.Add("docker_active_file", cont.Mem.ActiveFile, tags)
acc.Add("docker_unevictable", cont.Mem.Unevictable, tags)
acc.Add("docker_memory_limit", cont.Mem.HierarchicalMemoryLimit, tags)
acc.Add("docker_total_cache", cont.Mem.TotalCache, tags)
acc.Add("docker_total_rss", cont.Mem.TotalRSS, tags)
acc.Add("docker_total_rss_huge", cont.Mem.TotalRSSHuge, tags)
acc.Add("docker_total_mapped_file", cont.Mem.TotalMappedFile, tags)
acc.Add("docker_total_swap_in", cont.Mem.TotalPgpgIn, tags)
acc.Add("docker_total_swap_out", cont.Mem.TotalPgpgOut, tags)
acc.Add("docker_total_page_fault", cont.Mem.TotalPgFault, tags)
acc.Add("docker_total_page_major_fault", cont.Mem.TotalPgMajFault, tags)
acc.Add("docker_total_inactive_anon", cont.Mem.TotalInactiveAnon, tags)
acc.Add("docker_total_active_anon", cont.Mem.TotalActiveAnon, tags)
acc.Add("docker_total_inactive_file", cont.Mem.TotalInactiveFile, tags)
acc.Add("docker_total_active_file", cont.Mem.TotalActiveFile, tags)
acc.Add("docker_total_unevictable", cont.Mem.TotalUnevictable, tags)
}
acc.Add("load1", lv.Load1, nil)
acc.Add("load5", lv.Load5, nil)
acc.Add("load15", lv.Load15, nil)
return nil
}
@ -330,6 +393,34 @@ func (s *systemPS) DockerStat() ([]*DockerContainerStat, error) {
}
func init() {
plugins.Add("cpu", func() plugins.Plugin {
return &CPUStats{ps: &systemPS{}}
})
plugins.Add("disk", func() plugins.Plugin {
return &DiskStats{ps: &systemPS{}}
})
plugins.Add("io", func() plugins.Plugin {
return &DiskIOStats{ps: &systemPS{}}
})
plugins.Add("net", func() plugins.Plugin {
return &NetIOStats{ps: &systemPS{}}
})
plugins.Add("mem", func() plugins.Plugin {
return &MemStats{ps: &systemPS{}}
})
plugins.Add("swap", func() plugins.Plugin {
return &SwapStats{ps: &systemPS{}}
})
plugins.Add("docker", func() plugins.Plugin {
return &DockerStats{ps: &systemPS{}}
})
plugins.Add("system", func() plugins.Plugin {
return &SystemStats{ps: &systemPS{}}
})

View File

@ -21,8 +21,6 @@ func TestSystemStats_GenerateStats(t *testing.T) {
var acc testutil.Accumulator
ss := &SystemStats{ps: &mps}
lv := &load.LoadAvgStat{
Load1: 0.3,
Load5: 1.5,
@ -163,6 +161,8 @@ func TestSystemStats_GenerateStats(t *testing.T) {
mps.On("DockerStat").Return([]*DockerContainerStat{ds}, nil)
ss := &SystemStats{ps: &mps}
err := ss.Gather(&acc)
require.NoError(t, err)
@ -170,10 +170,15 @@ func TestSystemStats_GenerateStats(t *testing.T) {
assert.True(t, acc.CheckValue("system_load5", 1.5))
assert.True(t, acc.CheckValue("system_load15", 0.8))
cs := &CPUStats{ps: &mps}
cputags := map[string]string{
"cpu": "cpu0",
}
err = cs.Gather(&acc)
require.NoError(t, err)
assert.True(t, acc.CheckTaggedValue("cpu_user", 3.1, cputags))
assert.True(t, acc.CheckTaggedValue("cpu_system", 8.2, cputags))
assert.True(t, acc.CheckTaggedValue("cpu_idle", 80.1, cputags))
@ -186,6 +191,9 @@ func TestSystemStats_GenerateStats(t *testing.T) {
assert.True(t, acc.CheckTaggedValue("cpu_guestNice", 0.324, cputags))
assert.True(t, acc.CheckTaggedValue("cpu_stolen", 0.051, cputags))
err = (&DiskStats{&mps}).Gather(&acc)
require.NoError(t, err)
tags := map[string]string{
"path": "/",
}
@ -197,6 +205,9 @@ func TestSystemStats_GenerateStats(t *testing.T) {
assert.True(t, acc.CheckTaggedValue("disk_inodes_free", uint64(234), tags))
assert.True(t, acc.CheckTaggedValue("disk_inodes_used", uint64(1000), tags))
err = (&NetIOStats{&mps}).Gather(&acc)
require.NoError(t, err)
ntags := map[string]string{
"interface": "eth0",
}
@ -210,6 +221,9 @@ func TestSystemStats_GenerateStats(t *testing.T) {
assert.True(t, acc.CheckTaggedValue("net_drop_in", uint64(7), ntags))
assert.True(t, acc.CheckTaggedValue("net_drop_out", uint64(1), ntags))
err = (&DiskIOStats{&mps}).Gather(&acc)
require.NoError(t, err)
dtags := map[string]string{
"name": "sda1",
"serial": "ab-123-ad",
@ -223,6 +237,9 @@ func TestSystemStats_GenerateStats(t *testing.T) {
assert.True(t, acc.CheckTaggedValue("io_write_time", uint64(9087), dtags))
assert.True(t, acc.CheckTaggedValue("io_io_time", uint64(123552), dtags))
err = (&MemStats{&mps}).Gather(&acc)
require.NoError(t, err)
vmtags := map[string]string(nil)
assert.True(t, acc.CheckTaggedValue("mem_total", uint64(12400), vmtags))
@ -237,6 +254,9 @@ func TestSystemStats_GenerateStats(t *testing.T) {
assert.True(t, acc.CheckTaggedValue("mem_wired", uint64(134), vmtags))
assert.True(t, acc.CheckTaggedValue("mem_shared", uint64(2142), vmtags))
err = (&SwapStats{&mps}).Gather(&acc)
require.NoError(t, err)
swaptags := map[string]string(nil)
assert.True(t, acc.CheckTaggedValue("swap_total", uint64(8123), swaptags))
@ -246,6 +266,9 @@ func TestSystemStats_GenerateStats(t *testing.T) {
assert.True(t, acc.CheckTaggedValue("swap_in", uint64(7), swaptags))
assert.True(t, acc.CheckTaggedValue("swap_out", uint64(830), swaptags))
err = (&DockerStats{&mps}).Gather(&acc)
require.NoError(t, err)
dockertags := map[string]string{
"id": "blah",
}