parent
f176c28a56
commit
56aee1ceee
|
@ -5,6 +5,7 @@
|
||||||
- [#1138](https://github.com/influxdata/telegraf/pull/1138): nstat input plugin. Thanks @Maksadbek!
|
- [#1138](https://github.com/influxdata/telegraf/pull/1138): nstat input plugin. Thanks @Maksadbek!
|
||||||
- [#1139](https://github.com/influxdata/telegraf/pull/1139): instrumental output plugin. Thanks @jasonroelofs!
|
- [#1139](https://github.com/influxdata/telegraf/pull/1139): instrumental output plugin. Thanks @jasonroelofs!
|
||||||
- [#1172](https://github.com/influxdata/telegraf/pull/1172): Ceph storage stats. Thanks @robinpercy!
|
- [#1172](https://github.com/influxdata/telegraf/pull/1172): Ceph storage stats. Thanks @robinpercy!
|
||||||
|
- [#1233](https://github.com/influxdata/telegraf/pull/1233): Updated golint gopsutil dependency.
|
||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
|
|
||||||
|
|
2
Godeps
2
Godeps
|
@ -42,7 +42,7 @@ github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||||
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
|
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
|
||||||
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||||
github.com/shirou/gopsutil 37d89088411de59a4ef9fc340afa0e89dfcb4ea9
|
github.com/shirou/gopsutil bae75faa5ad1212d3e80f11f5e9cd147c6be9198
|
||||||
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
||||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||||
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
||||||
|
|
|
@ -71,7 +71,7 @@ func (p *SpecProcessor) pushMetrics() {
|
||||||
fields[prefix+"write_bytes"] = io.WriteCount
|
fields[prefix+"write_bytes"] = io.WriteCount
|
||||||
}
|
}
|
||||||
|
|
||||||
cpu_time, err := p.proc.CPUTimes()
|
cpu_time, err := p.proc.Times()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fields[prefix+"cpu_time_user"] = cpu_time.User
|
fields[prefix+"cpu_time_user"] = cpu_time.User
|
||||||
fields[prefix+"cpu_time_system"] = cpu_time.System
|
fields[prefix+"cpu_time_system"] = cpu_time.System
|
||||||
|
@ -86,7 +86,7 @@ func (p *SpecProcessor) pushMetrics() {
|
||||||
fields[prefix+"cpu_time_guest_nice"] = cpu_time.GuestNice
|
fields[prefix+"cpu_time_guest_nice"] = cpu_time.GuestNice
|
||||||
}
|
}
|
||||||
|
|
||||||
cpu_perc, err := p.proc.CPUPercent(time.Duration(0))
|
cpu_perc, err := p.proc.Percent(time.Duration(0))
|
||||||
if err == nil && cpu_perc != 0 {
|
if err == nil && cpu_perc != 0 {
|
||||||
fields[prefix+"cpu_usage"] = cpu_perc
|
fields[prefix+"cpu_usage"] = cpu_perc
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,7 +11,7 @@ import (
|
||||||
|
|
||||||
type CPUStats struct {
|
type CPUStats struct {
|
||||||
ps PS
|
ps PS
|
||||||
lastStats []cpu.CPUTimesStat
|
lastStats []cpu.TimesStat
|
||||||
|
|
||||||
PerCPU bool `toml:"percpu"`
|
PerCPU bool `toml:"percpu"`
|
||||||
TotalCPU bool `toml:"totalcpu"`
|
TotalCPU bool `toml:"totalcpu"`
|
||||||
|
@ -105,7 +105,7 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func totalCpuTime(t cpu.CPUTimesStat) float64 {
|
func totalCpuTime(t cpu.TimesStat) float64 {
|
||||||
total := t.User + t.System + t.Nice + t.Iowait + t.Irq + t.Softirq + t.Steal +
|
total := t.User + t.System + t.Nice + t.Iowait + t.Irq + t.Softirq + t.Steal +
|
||||||
t.Guest + t.GuestNice + t.Idle
|
t.Guest + t.GuestNice + t.Idle
|
||||||
return total
|
return total
|
||||||
|
|
|
@ -15,7 +15,7 @@ func TestCPUStats(t *testing.T) {
|
||||||
defer mps.AssertExpectations(t)
|
defer mps.AssertExpectations(t)
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
cts := cpu.CPUTimesStat{
|
cts := cpu.TimesStat{
|
||||||
CPU: "cpu0",
|
CPU: "cpu0",
|
||||||
User: 3.1,
|
User: 3.1,
|
||||||
System: 8.2,
|
System: 8.2,
|
||||||
|
@ -29,7 +29,7 @@ func TestCPUStats(t *testing.T) {
|
||||||
GuestNice: 0.324,
|
GuestNice: 0.324,
|
||||||
}
|
}
|
||||||
|
|
||||||
cts2 := cpu.CPUTimesStat{
|
cts2 := cpu.TimesStat{
|
||||||
CPU: "cpu0",
|
CPU: "cpu0",
|
||||||
User: 11.4, // increased by 8.3
|
User: 11.4, // increased by 8.3
|
||||||
System: 10.9, // increased by 2.7
|
System: 10.9, // increased by 2.7
|
||||||
|
@ -43,7 +43,7 @@ func TestCPUStats(t *testing.T) {
|
||||||
GuestNice: 2.524, // increased by 2.2
|
GuestNice: 2.524, // increased by 2.2
|
||||||
}
|
}
|
||||||
|
|
||||||
mps.On("CPUTimes").Return([]cpu.CPUTimesStat{cts}, nil)
|
mps.On("CPUTimes").Return([]cpu.TimesStat{cts}, nil)
|
||||||
|
|
||||||
cs := NewCPUStats(&mps)
|
cs := NewCPUStats(&mps)
|
||||||
|
|
||||||
|
@ -68,7 +68,7 @@ func TestCPUStats(t *testing.T) {
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_guest_nice", 0.324, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "cpu", "time_guest_nice", 0.324, 0, cputags)
|
||||||
|
|
||||||
mps2 := MockPS{}
|
mps2 := MockPS{}
|
||||||
mps2.On("CPUTimes").Return([]cpu.CPUTimesStat{cts2}, nil)
|
mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil)
|
||||||
cs.ps = &mps2
|
cs.ps = &mps2
|
||||||
|
|
||||||
// Should have added cpu percentages too
|
// Should have added cpu percentages too
|
||||||
|
|
|
@ -15,7 +15,7 @@ func TestDiskStats(t *testing.T) {
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
duAll := []*disk.DiskUsageStat{
|
duAll := []*disk.UsageStat{
|
||||||
{
|
{
|
||||||
Path: "/",
|
Path: "/",
|
||||||
Fstype: "ext4",
|
Fstype: "ext4",
|
||||||
|
@ -37,7 +37,7 @@ func TestDiskStats(t *testing.T) {
|
||||||
InodesUsed: 2000,
|
InodesUsed: 2000,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
duFiltered := []*disk.DiskUsageStat{
|
duFiltered := []*disk.UsageStat{
|
||||||
{
|
{
|
||||||
Path: "/",
|
Path: "/",
|
||||||
Fstype: "ext4",
|
Fstype: "ext4",
|
||||||
|
@ -108,7 +108,7 @@ func TestDiskStats(t *testing.T) {
|
||||||
// var acc testutil.Accumulator
|
// var acc testutil.Accumulator
|
||||||
// var err error
|
// var err error
|
||||||
|
|
||||||
// diskio1 := disk.DiskIOCountersStat{
|
// diskio1 := disk.IOCountersStat{
|
||||||
// ReadCount: 888,
|
// ReadCount: 888,
|
||||||
// WriteCount: 5341,
|
// WriteCount: 5341,
|
||||||
// ReadBytes: 100000,
|
// ReadBytes: 100000,
|
||||||
|
@ -119,7 +119,7 @@ func TestDiskStats(t *testing.T) {
|
||||||
// IoTime: 123552,
|
// IoTime: 123552,
|
||||||
// SerialNumber: "ab-123-ad",
|
// SerialNumber: "ab-123-ad",
|
||||||
// }
|
// }
|
||||||
// diskio2 := disk.DiskIOCountersStat{
|
// diskio2 := disk.IOCountersStat{
|
||||||
// ReadCount: 444,
|
// ReadCount: 444,
|
||||||
// WriteCount: 2341,
|
// WriteCount: 2341,
|
||||||
// ReadBytes: 200000,
|
// ReadBytes: 200000,
|
||||||
|
@ -132,7 +132,7 @@ func TestDiskStats(t *testing.T) {
|
||||||
// }
|
// }
|
||||||
|
|
||||||
// mps.On("DiskIO").Return(
|
// mps.On("DiskIO").Return(
|
||||||
// map[string]disk.DiskIOCountersStat{"sda1": diskio1, "sdb1": diskio2},
|
// map[string]disk.IOCountersStat{"sda1": diskio1, "sdb1": diskio2},
|
||||||
// nil)
|
// nil)
|
||||||
|
|
||||||
// err = (&DiskIOStats{ps: &mps}).Gather(&acc)
|
// err = (&DiskIOStats{ps: &mps}).Gather(&acc)
|
||||||
|
|
|
@ -15,55 +15,55 @@ type MockPS struct {
|
||||||
mock.Mock
|
mock.Mock
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockPS) LoadAvg() (*load.LoadAvgStat, error) {
|
func (m *MockPS) LoadAvg() (*load.AvgStat, error) {
|
||||||
ret := m.Called()
|
ret := m.Called()
|
||||||
|
|
||||||
r0 := ret.Get(0).(*load.LoadAvgStat)
|
r0 := ret.Get(0).(*load.AvgStat)
|
||||||
r1 := ret.Error(1)
|
r1 := ret.Error(1)
|
||||||
|
|
||||||
return r0, r1
|
return r0, r1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) {
|
func (m *MockPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.TimesStat, error) {
|
||||||
ret := m.Called()
|
ret := m.Called()
|
||||||
|
|
||||||
r0 := ret.Get(0).([]cpu.CPUTimesStat)
|
r0 := ret.Get(0).([]cpu.TimesStat)
|
||||||
r1 := ret.Error(1)
|
r1 := ret.Error(1)
|
||||||
|
|
||||||
return r0, r1
|
return r0, r1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockPS) DiskUsage(mountPointFilter []string, fstypeExclude []string) ([]*disk.DiskUsageStat, error) {
|
func (m *MockPS) DiskUsage(mountPointFilter []string, fstypeExclude []string) ([]*disk.UsageStat, error) {
|
||||||
ret := m.Called(mountPointFilter, fstypeExclude)
|
ret := m.Called(mountPointFilter, fstypeExclude)
|
||||||
|
|
||||||
r0 := ret.Get(0).([]*disk.DiskUsageStat)
|
r0 := ret.Get(0).([]*disk.UsageStat)
|
||||||
r1 := ret.Error(1)
|
r1 := ret.Error(1)
|
||||||
|
|
||||||
return r0, r1
|
return r0, r1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockPS) NetIO() ([]net.NetIOCountersStat, error) {
|
func (m *MockPS) NetIO() ([]net.IOCountersStat, error) {
|
||||||
ret := m.Called()
|
ret := m.Called()
|
||||||
|
|
||||||
r0 := ret.Get(0).([]net.NetIOCountersStat)
|
r0 := ret.Get(0).([]net.IOCountersStat)
|
||||||
r1 := ret.Error(1)
|
r1 := ret.Error(1)
|
||||||
|
|
||||||
return r0, r1
|
return r0, r1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockPS) NetProto() ([]net.NetProtoCountersStat, error) {
|
func (m *MockPS) NetProto() ([]net.ProtoCountersStat, error) {
|
||||||
ret := m.Called()
|
ret := m.Called()
|
||||||
|
|
||||||
r0 := ret.Get(0).([]net.NetProtoCountersStat)
|
r0 := ret.Get(0).([]net.ProtoCountersStat)
|
||||||
r1 := ret.Error(1)
|
r1 := ret.Error(1)
|
||||||
|
|
||||||
return r0, r1
|
return r0, r1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockPS) DiskIO() (map[string]disk.DiskIOCountersStat, error) {
|
func (m *MockPS) DiskIO() (map[string]disk.IOCountersStat, error) {
|
||||||
ret := m.Called()
|
ret := m.Called()
|
||||||
|
|
||||||
r0 := ret.Get(0).(map[string]disk.DiskIOCountersStat)
|
r0 := ret.Get(0).(map[string]disk.IOCountersStat)
|
||||||
r1 := ret.Error(1)
|
r1 := ret.Error(1)
|
||||||
|
|
||||||
return r0, r1
|
return r0, r1
|
||||||
|
@ -87,10 +87,10 @@ func (m *MockPS) SwapStat() (*mem.SwapMemoryStat, error) {
|
||||||
return r0, r1
|
return r0, r1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockPS) NetConnections() ([]net.NetConnectionStat, error) {
|
func (m *MockPS) NetConnections() ([]net.ConnectionStat, error) {
|
||||||
ret := m.Called()
|
ret := m.Called()
|
||||||
|
|
||||||
r0 := ret.Get(0).([]net.NetConnectionStat)
|
r0 := ret.Get(0).([]net.ConnectionStat)
|
||||||
r1 := ret.Error(1)
|
r1 := ret.Error(1)
|
||||||
|
|
||||||
return r0, r1
|
return r0, r1
|
||||||
|
|
|
@ -15,7 +15,7 @@ func TestNetStats(t *testing.T) {
|
||||||
defer mps.AssertExpectations(t)
|
defer mps.AssertExpectations(t)
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
netio := net.NetIOCountersStat{
|
netio := net.IOCountersStat{
|
||||||
Name: "eth0",
|
Name: "eth0",
|
||||||
BytesSent: 1123,
|
BytesSent: 1123,
|
||||||
BytesRecv: 8734422,
|
BytesRecv: 8734422,
|
||||||
|
@ -27,10 +27,10 @@ func TestNetStats(t *testing.T) {
|
||||||
Dropout: 1,
|
Dropout: 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
mps.On("NetIO").Return([]net.NetIOCountersStat{netio}, nil)
|
mps.On("NetIO").Return([]net.IOCountersStat{netio}, nil)
|
||||||
|
|
||||||
netprotos := []net.NetProtoCountersStat{
|
netprotos := []net.ProtoCountersStat{
|
||||||
net.NetProtoCountersStat{
|
net.ProtoCountersStat{
|
||||||
Protocol: "Udp",
|
Protocol: "Udp",
|
||||||
Stats: map[string]int64{
|
Stats: map[string]int64{
|
||||||
"InDatagrams": 4655,
|
"InDatagrams": 4655,
|
||||||
|
@ -40,17 +40,17 @@ func TestNetStats(t *testing.T) {
|
||||||
}
|
}
|
||||||
mps.On("NetProto").Return(netprotos, nil)
|
mps.On("NetProto").Return(netprotos, nil)
|
||||||
|
|
||||||
netstats := []net.NetConnectionStat{
|
netstats := []net.ConnectionStat{
|
||||||
net.NetConnectionStat{
|
net.ConnectionStat{
|
||||||
Type: syscall.SOCK_DGRAM,
|
Type: syscall.SOCK_DGRAM,
|
||||||
},
|
},
|
||||||
net.NetConnectionStat{
|
net.ConnectionStat{
|
||||||
Status: "ESTABLISHED",
|
Status: "ESTABLISHED",
|
||||||
},
|
},
|
||||||
net.NetConnectionStat{
|
net.ConnectionStat{
|
||||||
Status: "ESTABLISHED",
|
Status: "ESTABLISHED",
|
||||||
},
|
},
|
||||||
net.NetConnectionStat{
|
net.ConnectionStat{
|
||||||
Status: "CLOSE",
|
Status: "CLOSE",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,14 +13,14 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type PS interface {
|
type PS interface {
|
||||||
CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error)
|
CPUTimes(perCPU, totalCPU bool) ([]cpu.TimesStat, error)
|
||||||
DiskUsage(mountPointFilter []string, fstypeExclude []string) ([]*disk.DiskUsageStat, error)
|
DiskUsage(mountPointFilter []string, fstypeExclude []string) ([]*disk.UsageStat, error)
|
||||||
NetIO() ([]net.NetIOCountersStat, error)
|
NetIO() ([]net.IOCountersStat, error)
|
||||||
NetProto() ([]net.NetProtoCountersStat, error)
|
NetProto() ([]net.ProtoCountersStat, error)
|
||||||
DiskIO() (map[string]disk.DiskIOCountersStat, error)
|
DiskIO() (map[string]disk.IOCountersStat, error)
|
||||||
VMStat() (*mem.VirtualMemoryStat, error)
|
VMStat() (*mem.VirtualMemoryStat, error)
|
||||||
SwapStat() (*mem.SwapMemoryStat, error)
|
SwapStat() (*mem.SwapMemoryStat, error)
|
||||||
NetConnections() ([]net.NetConnectionStat, error)
|
NetConnections() ([]net.ConnectionStat, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func add(acc telegraf.Accumulator,
|
func add(acc telegraf.Accumulator,
|
||||||
|
@ -32,17 +32,17 @@ func add(acc telegraf.Accumulator,
|
||||||
|
|
||||||
type systemPS struct{}
|
type systemPS struct{}
|
||||||
|
|
||||||
func (s *systemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) {
|
func (s *systemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.TimesStat, error) {
|
||||||
var cpuTimes []cpu.CPUTimesStat
|
var cpuTimes []cpu.TimesStat
|
||||||
if perCPU {
|
if perCPU {
|
||||||
if perCPUTimes, err := cpu.CPUTimes(true); err == nil {
|
if perCPUTimes, err := cpu.Times(true); err == nil {
|
||||||
cpuTimes = append(cpuTimes, perCPUTimes...)
|
cpuTimes = append(cpuTimes, perCPUTimes...)
|
||||||
} else {
|
} else {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if totalCPU {
|
if totalCPU {
|
||||||
if totalCPUTimes, err := cpu.CPUTimes(false); err == nil {
|
if totalCPUTimes, err := cpu.Times(false); err == nil {
|
||||||
cpuTimes = append(cpuTimes, totalCPUTimes...)
|
cpuTimes = append(cpuTimes, totalCPUTimes...)
|
||||||
} else {
|
} else {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -54,8 +54,8 @@ func (s *systemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) {
|
||||||
func (s *systemPS) DiskUsage(
|
func (s *systemPS) DiskUsage(
|
||||||
mountPointFilter []string,
|
mountPointFilter []string,
|
||||||
fstypeExclude []string,
|
fstypeExclude []string,
|
||||||
) ([]*disk.DiskUsageStat, error) {
|
) ([]*disk.UsageStat, error) {
|
||||||
parts, err := disk.DiskPartitions(true)
|
parts, err := disk.Partitions(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -70,7 +70,7 @@ func (s *systemPS) DiskUsage(
|
||||||
fstypeExcludeSet[filter] = true
|
fstypeExcludeSet[filter] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
var usage []*disk.DiskUsageStat
|
var usage []*disk.UsageStat
|
||||||
|
|
||||||
for _, p := range parts {
|
for _, p := range parts {
|
||||||
if len(mountPointFilter) > 0 {
|
if len(mountPointFilter) > 0 {
|
||||||
|
@ -83,7 +83,7 @@ func (s *systemPS) DiskUsage(
|
||||||
}
|
}
|
||||||
mountpoint := os.Getenv("HOST_MOUNT_PREFIX") + p.Mountpoint
|
mountpoint := os.Getenv("HOST_MOUNT_PREFIX") + p.Mountpoint
|
||||||
if _, err := os.Stat(mountpoint); err == nil {
|
if _, err := os.Stat(mountpoint); err == nil {
|
||||||
du, err := disk.DiskUsage(mountpoint)
|
du, err := disk.Usage(mountpoint)
|
||||||
du.Path = p.Mountpoint
|
du.Path = p.Mountpoint
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -102,20 +102,20 @@ func (s *systemPS) DiskUsage(
|
||||||
return usage, nil
|
return usage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *systemPS) NetProto() ([]net.NetProtoCountersStat, error) {
|
func (s *systemPS) NetProto() ([]net.ProtoCountersStat, error) {
|
||||||
return net.NetProtoCounters(nil)
|
return net.ProtoCounters(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *systemPS) NetIO() ([]net.NetIOCountersStat, error) {
|
func (s *systemPS) NetIO() ([]net.IOCountersStat, error) {
|
||||||
return net.NetIOCounters(true)
|
return net.IOCounters(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *systemPS) NetConnections() ([]net.NetConnectionStat, error) {
|
func (s *systemPS) NetConnections() ([]net.ConnectionStat, error) {
|
||||||
return net.NetConnections("all")
|
return net.Connections("all")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *systemPS) DiskIO() (map[string]disk.DiskIOCountersStat, error) {
|
func (s *systemPS) DiskIO() (map[string]disk.IOCountersStat, error) {
|
||||||
m, err := disk.DiskIOCounters()
|
m, err := disk.IOCounters()
|
||||||
if err == internal.NotImplementedError {
|
if err == internal.NotImplementedError {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,12 +22,12 @@ func (_ *SystemStats) Description() string {
|
||||||
func (_ *SystemStats) SampleConfig() string { return "" }
|
func (_ *SystemStats) SampleConfig() string { return "" }
|
||||||
|
|
||||||
func (_ *SystemStats) Gather(acc telegraf.Accumulator) error {
|
func (_ *SystemStats) Gather(acc telegraf.Accumulator) error {
|
||||||
loadavg, err := load.LoadAvg()
|
loadavg, err := load.Avg()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
hostinfo, err := host.HostInfo()
|
hostinfo, err := host.Info()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue