Filter mount points before stats are collected

fixes #440
This commit is contained in:
Cameron Sparr 2016-01-20 10:42:55 -07:00
parent d3925fe578
commit fc1aa7d3b4
5 changed files with 61 additions and 38 deletions

View File

@ -90,7 +90,7 @@
[[inputs.disk]]
# By default, telegraf gather stats for all mountpoints.
# Setting mountpoints will restrict the stats to the specified mountpoints.
# Mountpoints=["/"]
# mount_points=["/"]
# Read metrics about disk IO by device
[[inputs.diskio]]

View File

@ -9,7 +9,10 @@ import (
type DiskStats struct {
ps PS
// Legacy support
Mountpoints []string
MountPoints []string
}
func (_ *DiskStats) Description() string {
@ -19,7 +22,7 @@ func (_ *DiskStats) Description() string {
var diskSampleConfig = `
# By default, telegraf gather stats for all mountpoints.
# Setting mountpoints will restrict the stats to the specified mountpoints.
# Mountpoints=["/"]
# mount_points = ["/"]
`
func (_ *DiskStats) SampleConfig() string {
@ -27,25 +30,17 @@ func (_ *DiskStats) SampleConfig() string {
}
func (s *DiskStats) Gather(acc inputs.Accumulator) error {
disks, err := s.ps.DiskUsage()
// Legacy support:
if len(s.Mountpoints) != 0 {
s.MountPoints = s.Mountpoints
}
disks, err := s.ps.DiskUsage(s.MountPoints)
if err != nil {
return fmt.Errorf("error getting disk usage info: %s", err)
}
var restrictMpoints bool
mPoints := make(map[string]bool)
if len(s.Mountpoints) != 0 {
restrictMpoints = true
for _, mp := range s.Mountpoints {
mPoints[mp] = true
}
}
for _, du := range disks {
_, member := mPoints[du.Path]
if restrictMpoints && !member {
continue
}
tags := map[string]string{
"path": du.Path,
"fstype": du.Fstype,

View File

@ -15,7 +15,7 @@ func TestDiskStats(t *testing.T) {
var acc testutil.Accumulator
var err error
du := []*disk.DiskUsageStat{
duAll := []*disk.DiskUsageStat{
{
Path: "/",
Fstype: "ext4",
@ -33,8 +33,20 @@ func TestDiskStats(t *testing.T) {
InodesFree: 468,
},
}
duFiltered := []*disk.DiskUsageStat{
{
Path: "/",
Fstype: "ext4",
Total: 128,
Free: 23,
InodesTotal: 1234,
InodesFree: 234,
},
}
mps.On("DiskUsage").Return(du, nil)
mps.On("DiskUsage", []string(nil)).Return(duAll, nil)
mps.On("DiskUsage", []string{"/", "/dev"}).Return(duFiltered, nil)
mps.On("DiskUsage", []string{"/", "/home"}).Return(duAll, nil)
err = (&DiskStats{ps: &mps}).Gather(&acc)
require.NoError(t, err)
@ -53,32 +65,32 @@ func TestDiskStats(t *testing.T) {
}
fields1 := map[string]interface{}{
"total": uint64(128), //tags1)
"used": uint64(105), //tags1)
"free": uint64(23), //tags1)
"inodes_total": uint64(1234), //tags1)
"inodes_free": uint64(234), //tags1)
"inodes_used": uint64(1000), //tags1)
"total": uint64(128),
"used": uint64(105),
"free": uint64(23),
"inodes_total": uint64(1234),
"inodes_free": uint64(234),
"inodes_used": uint64(1000),
}
fields2 := map[string]interface{}{
"total": uint64(256), //tags2)
"used": uint64(210), //tags2)
"free": uint64(46), //tags2)
"inodes_total": uint64(2468), //tags2)
"inodes_free": uint64(468), //tags2)
"inodes_used": uint64(2000), //tags2)
"total": uint64(256),
"used": uint64(210),
"free": uint64(46),
"inodes_total": uint64(2468),
"inodes_free": uint64(468),
"inodes_used": uint64(2000),
}
acc.AssertContainsTaggedFields(t, "disk", fields1, tags1)
acc.AssertContainsTaggedFields(t, "disk", fields2, tags2)
// We expect 6 more DiskPoints to show up with an explicit match on "/"
// and /home not matching the /dev in Mountpoints
err = (&DiskStats{ps: &mps, Mountpoints: []string{"/", "/dev"}}).Gather(&acc)
// and /home not matching the /dev in MountPoints
err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/dev"}}).Gather(&acc)
assert.Equal(t, expectedAllDiskPoints+6, acc.NFields())
// We should see all the diskpoints as Mountpoints includes both
// We should see all the diskpoints as MountPoints includes both
// / and /home
err = (&DiskStats{ps: &mps, Mountpoints: []string{"/", "/home"}}).Gather(&acc)
err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/home"}}).Gather(&acc)
assert.Equal(t, 2*expectedAllDiskPoints+6, acc.NFields())
}

View File

@ -33,8 +33,8 @@ func (m *MockPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) {
return r0, r1
}
func (m *MockPS) DiskUsage() ([]*disk.DiskUsageStat, error) {
ret := m.Called()
func (m *MockPS) DiskUsage(mountPointFilter []string) ([]*disk.DiskUsageStat, error) {
ret := m.Called(mountPointFilter)
r0 := ret.Get(0).([]*disk.DiskUsageStat)
r1 := ret.Error(1)

View File

@ -27,7 +27,7 @@ type DockerContainerStat struct {
type PS interface {
CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error)
DiskUsage() ([]*disk.DiskUsageStat, error)
DiskUsage(mountPointFilter []string) ([]*disk.DiskUsageStat, error)
NetIO() ([]net.NetIOCountersStat, error)
NetProto() ([]net.NetProtoCountersStat, error)
DiskIO() (map[string]disk.DiskIOCountersStat, error)
@ -67,15 +67,31 @@ func (s *systemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) {
return cpuTimes, nil
}
func (s *systemPS) DiskUsage() ([]*disk.DiskUsageStat, error) {
func (s *systemPS) DiskUsage(
mountPointFilter []string,
) ([]*disk.DiskUsageStat, error) {
parts, err := disk.DiskPartitions(true)
if err != nil {
return nil, err
}
// Make a "set" out of the filter slice
filterSet := make(map[string]bool)
for _, filter := range mountPointFilter {
filterSet[filter] = true
}
var usage []*disk.DiskUsageStat
for _, p := range parts {
if len(mountPointFilter) > 0 {
// If the mount point is not a member of the filter set,
// don't gather info on it.
_, ok := filterSet[p.Mountpoint]
if !ok {
continue
}
}
if _, err := os.Stat(p.Mountpoint); err == nil {
du, err := disk.DiskUsage(p.Mountpoint)
if err != nil {