Add calculated cpu and memory percentages to docker input (via config option)
This commit is contained in:
parent
9e87128588
commit
1ba3192de8
|
@ -22,8 +22,16 @@ for the stat structure can be found
|
|||
endpoint = "unix:///var/run/docker.sock"
|
||||
# Only collect metrics for these containers, collect all if empty
|
||||
container_names = []
|
||||
calculate_percentages = false
|
||||
```
|
||||
|
||||
### Calculate Percentages
|
||||
|
||||
Optionally percentages can be calculated for cpu and memory usage. This uses
|
||||
the same calculation as the 'docker stats' command line tool. Set this option
|
||||
to 'true' to enable this feature.
|
||||
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
Every effort was made to preserve the names based on the JSON response from the
|
||||
|
|
|
@ -14,8 +14,9 @@ import (
|
|||
)
|
||||
|
||||
type Docker struct {
|
||||
Endpoint string
|
||||
ContainerNames []string
|
||||
Endpoint string
|
||||
ContainerNames []string
|
||||
CalculatePercentages bool
|
||||
|
||||
client *docker.Client
|
||||
}
|
||||
|
@ -27,6 +28,8 @@ var sampleConfig = `
|
|||
endpoint = "unix:///var/run/docker.sock"
|
||||
### Only collect metrics for these containers, collect all if empty
|
||||
container_names = []
|
||||
# Add calculated percentages for mem and cpu, as per 'docker stats' command
|
||||
calculate_percentages = false
|
||||
`
|
||||
|
||||
func (d *Docker) Description() string {
|
||||
|
@ -67,6 +70,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
|||
var wg sync.WaitGroup
|
||||
wg.Add(len(containers))
|
||||
for _, container := range containers {
|
||||
|
||||
go func(c docker.APIContainers) {
|
||||
defer wg.Done()
|
||||
err := d.gatherContainer(c, acc)
|
||||
|
@ -131,7 +135,7 @@ func (d *Docker) gatherContainer(
|
|||
tags[k] = v
|
||||
}
|
||||
|
||||
gatherContainerStats(stat, acc, tags)
|
||||
gatherContainerStats(stat, acc, tags, d.CalculatePercentages)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -140,6 +144,7 @@ func gatherContainerStats(
|
|||
stat *docker.Stats,
|
||||
acc telegraf.Accumulator,
|
||||
tags map[string]string,
|
||||
calculate_percentages bool,
|
||||
) {
|
||||
now := stat.Read
|
||||
|
||||
|
@ -178,6 +183,9 @@ func gatherContainerStats(
|
|||
"inactive_file": stat.MemoryStats.Stats.InactiveFile,
|
||||
"total_pgpgin": stat.MemoryStats.Stats.TotalPgpgin,
|
||||
}
|
||||
if calculate_percentages {
|
||||
memfields["usage_percent"] = float64(stat.MemoryStats.Usage) / float64(stat.MemoryStats.Limit) * 100.0
|
||||
}
|
||||
acc.AddFields("docker_mem", memfields, tags, now)
|
||||
|
||||
cpufields := map[string]interface{}{
|
||||
|
@ -189,6 +197,9 @@ func gatherContainerStats(
|
|||
"throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods,
|
||||
"throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime,
|
||||
}
|
||||
if calculate_percentages {
|
||||
cpufields["usage_percent"] = calculateCPUPercent(stat)
|
||||
}
|
||||
cputags := copyTags(tags)
|
||||
cputags["cpu"] = "cpu-total"
|
||||
acc.AddFields("docker_cpu", cpufields, cputags, now)
|
||||
|
@ -219,6 +230,21 @@ func gatherContainerStats(
|
|||
gatherBlockIOMetrics(stat, acc, tags, now)
|
||||
}
|
||||
|
||||
func calculateCPUPercent(stat *docker.Stats) float64 {
|
||||
var (
|
||||
cpuPercent = 0.0
|
||||
// calculate the change for the cpu usage of the container in between readings
|
||||
cpuDelta = float64(stat.CPUStats.CPUUsage.TotalUsage) - float64(stat.PreCPUStats.CPUUsage.TotalUsage)
|
||||
// calculate the change for the entire system between readings
|
||||
systemDelta = float64(stat.CPUStats.SystemCPUUsage) - float64(stat.PreCPUStats.SystemCPUUsage)
|
||||
)
|
||||
|
||||
if systemDelta > 0.0 && cpuDelta > 0.0 {
|
||||
cpuPercent = (cpuDelta / systemDelta) * float64(len(stat.CPUStats.CPUUsage.PercpuUsage)) * 100.0
|
||||
}
|
||||
return cpuPercent
|
||||
}
|
||||
|
||||
func gatherBlockIOMetrics(
|
||||
stat *docker.Stats,
|
||||
acc telegraf.Accumulator,
|
||||
|
|
|
@ -18,7 +18,7 @@ func TestDockerGatherContainerStats(t *testing.T) {
|
|||
"cont_name": "redis",
|
||||
"cont_image": "redis/image",
|
||||
}
|
||||
gatherContainerStats(stats, &acc, tags)
|
||||
gatherContainerStats(stats, &acc, tags, false)
|
||||
|
||||
// test docker_net measurement
|
||||
netfields := map[string]interface{}{
|
||||
|
@ -45,55 +45,13 @@ func TestDockerGatherContainerStats(t *testing.T) {
|
|||
acc.AssertContainsTaggedFields(t, "docker_blkio", blkiofields, blkiotags)
|
||||
|
||||
// test docker_mem measurement
|
||||
memfields := map[string]interface{}{
|
||||
"max_usage": uint64(1001),
|
||||
"usage": uint64(1111),
|
||||
"fail_count": uint64(1),
|
||||
"limit": uint64(20),
|
||||
"total_pgmafault": uint64(0),
|
||||
"cache": uint64(0),
|
||||
"mapped_file": uint64(0),
|
||||
"total_inactive_file": uint64(0),
|
||||
"pgpgout": uint64(0),
|
||||
"rss": uint64(0),
|
||||
"total_mapped_file": uint64(0),
|
||||
"writeback": uint64(0),
|
||||
"unevictable": uint64(0),
|
||||
"pgpgin": uint64(0),
|
||||
"total_unevictable": uint64(0),
|
||||
"pgmajfault": uint64(0),
|
||||
"total_rss": uint64(44),
|
||||
"total_rss_huge": uint64(444),
|
||||
"total_writeback": uint64(55),
|
||||
"total_inactive_anon": uint64(0),
|
||||
"rss_huge": uint64(0),
|
||||
"hierarchical_memory_limit": uint64(0),
|
||||
"total_pgfault": uint64(0),
|
||||
"total_active_file": uint64(0),
|
||||
"active_anon": uint64(0),
|
||||
"total_active_anon": uint64(0),
|
||||
"total_pgpgout": uint64(0),
|
||||
"total_cache": uint64(0),
|
||||
"inactive_anon": uint64(0),
|
||||
"active_file": uint64(1),
|
||||
"pgfault": uint64(2),
|
||||
"inactive_file": uint64(3),
|
||||
"total_pgpgin": uint64(4),
|
||||
}
|
||||
memfields := sample_mem_fields()
|
||||
acc.AssertContainsTaggedFields(t, "docker_mem", memfields, tags)
|
||||
|
||||
// test docker_cpu measurement
|
||||
cputags := copyTags(tags)
|
||||
cputags["cpu"] = "cpu-total"
|
||||
cpufields := map[string]interface{}{
|
||||
"usage_total": uint64(500),
|
||||
"usage_in_usermode": uint64(100),
|
||||
"usage_in_kernelmode": uint64(200),
|
||||
"usage_system": uint64(100),
|
||||
"throttling_periods": uint64(1),
|
||||
"throttling_throttled_periods": uint64(0),
|
||||
"throttling_throttled_time": uint64(0),
|
||||
}
|
||||
cpufields := sample_cpu_fields()
|
||||
acc.AssertContainsTaggedFields(t, "docker_cpu", cpufields, cputags)
|
||||
|
||||
cputags["cpu"] = "cpu0"
|
||||
|
@ -109,6 +67,30 @@ func TestDockerGatherContainerStats(t *testing.T) {
|
|||
acc.AssertContainsTaggedFields(t, "docker_cpu", cpu1fields, cputags)
|
||||
}
|
||||
|
||||
func TestDockerGatherContainerPercentages(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
stats := testStats()
|
||||
|
||||
tags := map[string]string{
|
||||
"cont_id": "foobarbaz",
|
||||
"cont_name": "redis",
|
||||
"cont_image": "redis/image",
|
||||
}
|
||||
gatherContainerStats(stats, &acc, tags, true)
|
||||
|
||||
// test docker_mem measurement
|
||||
memfields := sample_mem_fields()
|
||||
memfields["usage_percent"] = 55.55
|
||||
acc.AssertContainsTaggedFields(t, "docker_mem", memfields, tags)
|
||||
|
||||
// test docker_cpu measurement
|
||||
cputags := copyTags(tags)
|
||||
cputags["cpu"] = "cpu-total"
|
||||
cpufields := sample_cpu_fields()
|
||||
cpufields["usage_percent"] = 400.0
|
||||
acc.AssertContainsTaggedFields(t, "docker_cpu", cpufields, cputags)
|
||||
}
|
||||
|
||||
func testStats() *docker.Stats {
|
||||
stats := &docker.Stats{
|
||||
Read: time.Now(),
|
||||
|
@ -122,6 +104,9 @@ func testStats() *docker.Stats {
|
|||
stats.CPUStats.SystemCPUUsage = 100
|
||||
stats.CPUStats.ThrottlingData.Periods = 1
|
||||
|
||||
stats.PreCPUStats.CPUUsage.TotalUsage = 400
|
||||
stats.PreCPUStats.SystemCPUUsage = 50
|
||||
|
||||
stats.MemoryStats.Stats.TotalPgmafault = 0
|
||||
stats.MemoryStats.Stats.Cache = 0
|
||||
stats.MemoryStats.Stats.MappedFile = 0
|
||||
|
@ -155,7 +140,7 @@ func testStats() *docker.Stats {
|
|||
stats.MemoryStats.MaxUsage = 1001
|
||||
stats.MemoryStats.Usage = 1111
|
||||
stats.MemoryStats.Failcnt = 1
|
||||
stats.MemoryStats.Limit = 20
|
||||
stats.MemoryStats.Limit = 2000
|
||||
|
||||
stats.Networks["eth0"] = docker.NetworkStats{
|
||||
RxDropped: 1,
|
||||
|
@ -188,3 +173,58 @@ func testStats() *docker.Stats {
|
|||
|
||||
return stats
|
||||
}
|
||||
|
||||
func sample_mem_fields() map[string]interface{} {
|
||||
|
||||
memfields := map[string]interface{}{
|
||||
"max_usage": uint64(1001),
|
||||
"usage": uint64(1111),
|
||||
"fail_count": uint64(1),
|
||||
"limit": uint64(2000),
|
||||
"total_pgmafault": uint64(0),
|
||||
"cache": uint64(0),
|
||||
"mapped_file": uint64(0),
|
||||
"total_inactive_file": uint64(0),
|
||||
"pgpgout": uint64(0),
|
||||
"rss": uint64(0),
|
||||
"total_mapped_file": uint64(0),
|
||||
"writeback": uint64(0),
|
||||
"unevictable": uint64(0),
|
||||
"pgpgin": uint64(0),
|
||||
"total_unevictable": uint64(0),
|
||||
"pgmajfault": uint64(0),
|
||||
"total_rss": uint64(44),
|
||||
"total_rss_huge": uint64(444),
|
||||
"total_writeback": uint64(55),
|
||||
"total_inactive_anon": uint64(0),
|
||||
"rss_huge": uint64(0),
|
||||
"hierarchical_memory_limit": uint64(0),
|
||||
"total_pgfault": uint64(0),
|
||||
"total_active_file": uint64(0),
|
||||
"active_anon": uint64(0),
|
||||
"total_active_anon": uint64(0),
|
||||
"total_pgpgout": uint64(0),
|
||||
"total_cache": uint64(0),
|
||||
"inactive_anon": uint64(0),
|
||||
"active_file": uint64(1),
|
||||
"pgfault": uint64(2),
|
||||
"inactive_file": uint64(3),
|
||||
"total_pgpgin": uint64(4),
|
||||
}
|
||||
|
||||
return memfields
|
||||
}
|
||||
|
||||
func sample_cpu_fields() map[string]interface{} {
|
||||
|
||||
cpufields := map[string]interface{}{
|
||||
"usage_total": uint64(500),
|
||||
"usage_in_usermode": uint64(100),
|
||||
"usage_in_kernelmode": uint64(200),
|
||||
"usage_system": uint64(100),
|
||||
"throttling_periods": uint64(1),
|
||||
"throttling_throttled_periods": uint64(0),
|
||||
"throttling_throttled_time": uint64(0),
|
||||
}
|
||||
return cpufields
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue