Merge pull request #673 from miketonks/f-docker-percentages
Add calculated cpu and memory percentages to docker input (via config option)
This commit is contained in:
commit
d003ca46c7
|
@ -67,6 +67,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
|||
var wg sync.WaitGroup
|
||||
wg.Add(len(containers))
|
||||
for _, container := range containers {
|
||||
|
||||
go func(c docker.APIContainers) {
|
||||
defer wg.Done()
|
||||
err := d.gatherContainer(c, acc)
|
||||
|
@ -177,6 +178,7 @@ func gatherContainerStats(
|
|||
"pgfault": stat.MemoryStats.Stats.Pgfault,
|
||||
"inactive_file": stat.MemoryStats.Stats.InactiveFile,
|
||||
"total_pgpgin": stat.MemoryStats.Stats.TotalPgpgin,
|
||||
"usage_percent": calculateMemPercent(stat),
|
||||
}
|
||||
acc.AddFields("docker_mem", memfields, tags, now)
|
||||
|
||||
|
@ -188,6 +190,7 @@ func gatherContainerStats(
|
|||
"throttling_periods": stat.CPUStats.ThrottlingData.Periods,
|
||||
"throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods,
|
||||
"throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime,
|
||||
"usage_percent": calculateCPUPercent(stat),
|
||||
}
|
||||
cputags := copyTags(tags)
|
||||
cputags["cpu"] = "cpu-total"
|
||||
|
@ -219,6 +222,26 @@ func gatherContainerStats(
|
|||
gatherBlockIOMetrics(stat, acc, tags, now)
|
||||
}
|
||||
|
||||
func calculateMemPercent(stat *docker.Stats) float64 {
|
||||
var memPercent = 0.0
|
||||
if stat.MemoryStats.Limit > 0 {
|
||||
memPercent = float64(stat.MemoryStats.Usage) / float64(stat.MemoryStats.Limit) * 100.0
|
||||
}
|
||||
return memPercent
|
||||
}
|
||||
|
||||
func calculateCPUPercent(stat *docker.Stats) float64 {
|
||||
var cpuPercent = 0.0
|
||||
// calculate the change for the cpu and system usage of the container in between readings
|
||||
cpuDelta := float64(stat.CPUStats.CPUUsage.TotalUsage) - float64(stat.PreCPUStats.CPUUsage.TotalUsage)
|
||||
systemDelta := float64(stat.CPUStats.SystemCPUUsage) - float64(stat.PreCPUStats.SystemCPUUsage)
|
||||
|
||||
if systemDelta > 0.0 && cpuDelta > 0.0 {
|
||||
cpuPercent = (cpuDelta / systemDelta) * float64(len(stat.CPUStats.CPUUsage.PercpuUsage)) * 100.0
|
||||
}
|
||||
return cpuPercent
|
||||
}
|
||||
|
||||
func gatherBlockIOMetrics(
|
||||
stat *docker.Stats,
|
||||
acc telegraf.Accumulator,
|
||||
|
|
|
@ -49,7 +49,7 @@ func TestDockerGatherContainerStats(t *testing.T) {
|
|||
"max_usage": uint64(1001),
|
||||
"usage": uint64(1111),
|
||||
"fail_count": uint64(1),
|
||||
"limit": uint64(20),
|
||||
"limit": uint64(2000),
|
||||
"total_pgmafault": uint64(0),
|
||||
"cache": uint64(0),
|
||||
"mapped_file": uint64(0),
|
||||
|
@ -79,7 +79,9 @@ func TestDockerGatherContainerStats(t *testing.T) {
|
|||
"pgfault": uint64(2),
|
||||
"inactive_file": uint64(3),
|
||||
"total_pgpgin": uint64(4),
|
||||
"usage_percent": float64(55.55),
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "docker_mem", memfields, tags)
|
||||
|
||||
// test docker_cpu measurement
|
||||
|
@ -93,6 +95,7 @@ func TestDockerGatherContainerStats(t *testing.T) {
|
|||
"throttling_periods": uint64(1),
|
||||
"throttling_throttled_periods": uint64(0),
|
||||
"throttling_throttled_time": uint64(0),
|
||||
"usage_percent": float64(400.0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "docker_cpu", cpufields, cputags)
|
||||
|
||||
|
@ -122,6 +125,9 @@ func testStats() *docker.Stats {
|
|||
stats.CPUStats.SystemCPUUsage = 100
|
||||
stats.CPUStats.ThrottlingData.Periods = 1
|
||||
|
||||
stats.PreCPUStats.CPUUsage.TotalUsage = 400
|
||||
stats.PreCPUStats.SystemCPUUsage = 50
|
||||
|
||||
stats.MemoryStats.Stats.TotalPgmafault = 0
|
||||
stats.MemoryStats.Stats.Cache = 0
|
||||
stats.MemoryStats.Stats.MappedFile = 0
|
||||
|
@ -155,7 +161,7 @@ func testStats() *docker.Stats {
|
|||
stats.MemoryStats.MaxUsage = 1001
|
||||
stats.MemoryStats.Usage = 1111
|
||||
stats.MemoryStats.Failcnt = 1
|
||||
stats.MemoryStats.Limit = 20
|
||||
stats.MemoryStats.Limit = 2000
|
||||
|
||||
stats.Networks["eth0"] = docker.NetworkStats{
|
||||
RxDropped: 1,
|
||||
|
|
Loading…
Reference in New Issue