Fix docker memory and cpu reporting in Windows (#3043)
This commit is contained in:
parent
5f88be022c
commit
d6cf9f4f30
|
@ -19,6 +19,7 @@ works:
|
||||||
- github.com/dancannon/gorethink [APACHE](https://github.com/dancannon/gorethink/blob/master/LICENSE)
|
- github.com/dancannon/gorethink [APACHE](https://github.com/dancannon/gorethink/blob/master/LICENSE)
|
||||||
- github.com/davecgh/go-spew [ISC](https://github.com/davecgh/go-spew/blob/master/LICENSE)
|
- github.com/davecgh/go-spew [ISC](https://github.com/davecgh/go-spew/blob/master/LICENSE)
|
||||||
- github.com/docker/docker [APACHE](https://github.com/docker/docker/blob/master/LICENSE)
|
- github.com/docker/docker [APACHE](https://github.com/docker/docker/blob/master/LICENSE)
|
||||||
|
- github.com/docker/cli [APACHE](https://github.com/docker/cli/blob/master/LICENSE)
|
||||||
- github.com/eapache/go-resiliency [MIT](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
|
- github.com/eapache/go-resiliency [MIT](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
|
||||||
- github.com/eapache/go-xerial-snappy [MIT](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE)
|
- github.com/eapache/go-xerial-snappy [MIT](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE)
|
||||||
- github.com/eapache/queue [MIT](https://github.com/eapache/queue/blob/master/LICENSE)
|
- github.com/eapache/queue [MIT](https://github.com/eapache/queue/blob/master/LICENSE)
|
||||||
|
|
|
@ -21,7 +21,8 @@ for the stat structure can be found
|
||||||
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||||
endpoint = "unix:///var/run/docker.sock"
|
endpoint = "unix:///var/run/docker.sock"
|
||||||
|
|
||||||
## Only collect metrics for these containers. Values will be appended to container_name_include.
|
## Only collect metrics for these containers. Values will be appended to
|
||||||
|
## container_name_include.
|
||||||
## Deprecated (1.4.0), use container_name_include
|
## Deprecated (1.4.0), use container_name_include
|
||||||
container_names = []
|
container_names = []
|
||||||
|
|
||||||
|
@ -39,6 +40,9 @@ for the stat structure can be found
|
||||||
## Whether to report for each container total blkio and network stats or not
|
## Whether to report for each container total blkio and network stats or not
|
||||||
total = false
|
total = false
|
||||||
|
|
||||||
|
## Which environment variables should we use as a tag
|
||||||
|
tag_env = ["JAVA_HOME", "HEAP_SIZE"]
|
||||||
|
|
||||||
## docker labels to include and exclude as tags. Globs accepted.
|
## docker labels to include and exclude as tags. Globs accepted.
|
||||||
## Note that an empty array for both will include all labels as tags
|
## Note that an empty array for both will include all labels as tags
|
||||||
docker_label_include = []
|
docker_label_include = []
|
||||||
|
@ -46,6 +50,13 @@ for the stat structure can be found
|
||||||
|
|
||||||
## Which environment variables should we use as a tag
|
## Which environment variables should we use as a tag
|
||||||
tag_env = ["JAVA_HOME", "HEAP_SIZE"]
|
tag_env = ["JAVA_HOME", "HEAP_SIZE"]
|
||||||
|
|
||||||
|
## Optional SSL Config
|
||||||
|
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||||
|
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||||
|
# ssl_key = "/etc/telegraf/key.pem"
|
||||||
|
## Use SSL but skip chain & host verification
|
||||||
|
# insecure_skip_verify = false
|
||||||
```
|
```
|
||||||
|
|
||||||
### Measurements & Fields:
|
### Measurements & Fields:
|
||||||
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
package docker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
docker "github.com/docker/docker/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
version string
|
||||||
|
defaultHeaders = map[string]string{"User-Agent": "engine-api-cli-1.0"}
|
||||||
|
)
|
||||||
|
|
||||||
|
type Client interface {
|
||||||
|
Info(ctx context.Context) (types.Info, error)
|
||||||
|
ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
|
||||||
|
ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error)
|
||||||
|
ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewEnvClient() (Client, error) {
|
||||||
|
client, err := docker.NewEnvClient()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &SocketClient{client}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewClient(host string) (Client, error) {
|
||||||
|
client, err := docker.NewClient(host, version, nil, defaultHeaders)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &SocketClient{client}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type SocketClient struct {
|
||||||
|
client *docker.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *SocketClient) Info(ctx context.Context) (types.Info, error) {
|
||||||
|
return c.client.Info(ctx)
|
||||||
|
}
|
||||||
|
func (c *SocketClient) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
|
||||||
|
return c.client.ContainerList(ctx, options)
|
||||||
|
}
|
||||||
|
func (c *SocketClient) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) {
|
||||||
|
return c.client.ContainerStats(ctx, containerID, stream)
|
||||||
|
}
|
||||||
|
func (c *SocketClient) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
|
||||||
|
return c.client.ContainerInspect(ctx, containerID)
|
||||||
|
}
|
|
@ -12,7 +12,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/client"
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/filter"
|
"github.com/influxdata/telegraf/filter"
|
||||||
"github.com/influxdata/telegraf/internal"
|
"github.com/influxdata/telegraf/internal"
|
||||||
|
@ -46,61 +45,14 @@ type Docker struct {
|
||||||
ContainerExclude []string `toml:"container_name_exclude"`
|
ContainerExclude []string `toml:"container_name_exclude"`
|
||||||
ContainerFilter DockerContainerFilter
|
ContainerFilter DockerContainerFilter
|
||||||
|
|
||||||
client *client.Client
|
newEnvClient func() (Client, error)
|
||||||
engine_host string
|
newClient func(host string) (Client, error)
|
||||||
|
|
||||||
testing bool
|
client Client
|
||||||
|
engine_host string
|
||||||
filtersCreated bool
|
filtersCreated bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// infoWrapper wraps client.Client.List for testing.
|
|
||||||
func infoWrapper(c *client.Client, ctx context.Context) (types.Info, error) {
|
|
||||||
if c != nil {
|
|
||||||
return c.Info(ctx)
|
|
||||||
}
|
|
||||||
fc := FakeDockerClient{}
|
|
||||||
return fc.Info(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// listWrapper wraps client.Client.ContainerList for testing.
|
|
||||||
func listWrapper(
|
|
||||||
c *client.Client,
|
|
||||||
ctx context.Context,
|
|
||||||
options types.ContainerListOptions,
|
|
||||||
) ([]types.Container, error) {
|
|
||||||
if c != nil {
|
|
||||||
return c.ContainerList(ctx, options)
|
|
||||||
}
|
|
||||||
fc := FakeDockerClient{}
|
|
||||||
return fc.ContainerList(ctx, options)
|
|
||||||
}
|
|
||||||
|
|
||||||
// statsWrapper wraps client.Client.ContainerStats for testing.
|
|
||||||
func statsWrapper(
|
|
||||||
c *client.Client,
|
|
||||||
ctx context.Context,
|
|
||||||
containerID string,
|
|
||||||
stream bool,
|
|
||||||
) (types.ContainerStats, error) {
|
|
||||||
if c != nil {
|
|
||||||
return c.ContainerStats(ctx, containerID, stream)
|
|
||||||
}
|
|
||||||
fc := FakeDockerClient{}
|
|
||||||
return fc.ContainerStats(ctx, containerID, stream)
|
|
||||||
}
|
|
||||||
|
|
||||||
func inspectWrapper(
|
|
||||||
c *client.Client,
|
|
||||||
ctx context.Context,
|
|
||||||
containerID string,
|
|
||||||
) (types.ContainerJSON, error) {
|
|
||||||
if c != nil {
|
|
||||||
return c.ContainerInspect(ctx, containerID)
|
|
||||||
}
|
|
||||||
fc := FakeDockerClient{}
|
|
||||||
return fc.ContainerInspect(ctx, containerID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// KB, MB, GB, TB, PB...human friendly
|
// KB, MB, GB, TB, PB...human friendly
|
||||||
const (
|
const (
|
||||||
KB = 1000
|
KB = 1000
|
||||||
|
@ -145,32 +97,28 @@ var sampleConfig = `
|
||||||
docker_label_exclude = []
|
docker_label_exclude = []
|
||||||
`
|
`
|
||||||
|
|
||||||
// Description returns input description
|
|
||||||
func (d *Docker) Description() string {
|
func (d *Docker) Description() string {
|
||||||
return "Read metrics about docker containers"
|
return "Read metrics about docker containers"
|
||||||
}
|
}
|
||||||
|
|
||||||
// SampleConfig prints sampleConfig
|
|
||||||
func (d *Docker) SampleConfig() string { return sampleConfig }
|
func (d *Docker) SampleConfig() string { return sampleConfig }
|
||||||
|
|
||||||
// Gather starts stats collection
|
|
||||||
func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||||
if d.client == nil && !d.testing {
|
if d.client == nil {
|
||||||
var c *client.Client
|
var c Client
|
||||||
var err error
|
var err error
|
||||||
defaultHeaders := map[string]string{"User-Agent": "engine-api-cli-1.0"}
|
|
||||||
if d.Endpoint == "ENV" {
|
if d.Endpoint == "ENV" {
|
||||||
c, err = client.NewEnvClient()
|
c, err = d.newEnvClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else if d.Endpoint == "" {
|
} else if d.Endpoint == "" {
|
||||||
c, err = client.NewClient("unix:///var/run/docker.sock", "", nil, defaultHeaders)
|
c, err = d.newClient("unix:///var/run/docker.sock")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
c, err = client.NewClient(d.Endpoint, "", nil, defaultHeaders)
|
c, err = d.newClient(d.Endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -201,7 +149,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||||
opts := types.ContainerListOptions{}
|
opts := types.ContainerListOptions{}
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
containers, err := listWrapper(d.client, ctx, opts)
|
containers, err := d.client.ContainerList(ctx, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -232,7 +180,7 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||||
// Get info from docker daemon
|
// Get info from docker daemon
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
info, err := infoWrapper(d.client, ctx)
|
info, err := d.client.Info(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -338,7 +286,7 @@ func (d *Docker) gatherContainer(
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
r, err := statsWrapper(d.client, ctx, container.ID, false)
|
r, err := d.client.ContainerStats(ctx, container.ID, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error getting docker stats: %s", err.Error())
|
return fmt.Errorf("Error getting docker stats: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
@ -350,6 +298,7 @@ func (d *Docker) gatherContainer(
|
||||||
}
|
}
|
||||||
return fmt.Errorf("Error decoding: %s", err.Error())
|
return fmt.Errorf("Error decoding: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
daemonOSType := r.OSType
|
||||||
|
|
||||||
// Add labels to tags
|
// Add labels to tags
|
||||||
for k, label := range container.Labels {
|
for k, label := range container.Labels {
|
||||||
|
@ -362,7 +311,7 @@ func (d *Docker) gatherContainer(
|
||||||
|
|
||||||
// Add whitelisted environment variables to tags
|
// Add whitelisted environment variables to tags
|
||||||
if len(d.TagEnvironment) > 0 {
|
if len(d.TagEnvironment) > 0 {
|
||||||
info, err := inspectWrapper(d.client, ctx, container.ID)
|
info, err := d.client.ContainerInspect(ctx, container.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error inspecting docker container: %s", err.Error())
|
return fmt.Errorf("Error inspecting docker container: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
@ -377,7 +326,7 @@ func (d *Docker) gatherContainer(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
gatherContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total)
|
gatherContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total, daemonOSType)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -389,46 +338,68 @@ func gatherContainerStats(
|
||||||
id string,
|
id string,
|
||||||
perDevice bool,
|
perDevice bool,
|
||||||
total bool,
|
total bool,
|
||||||
|
daemonOSType string,
|
||||||
) {
|
) {
|
||||||
now := stat.Read
|
now := stat.Read
|
||||||
|
|
||||||
memfields := map[string]interface{}{
|
memfields := map[string]interface{}{
|
||||||
"max_usage": stat.MemoryStats.MaxUsage,
|
"container_id": id,
|
||||||
"usage": stat.MemoryStats.Usage,
|
|
||||||
"fail_count": stat.MemoryStats.Failcnt,
|
|
||||||
"limit": stat.MemoryStats.Limit,
|
|
||||||
"total_pgmafault": stat.MemoryStats.Stats["total_pgmajfault"],
|
|
||||||
"cache": stat.MemoryStats.Stats["cache"],
|
|
||||||
"mapped_file": stat.MemoryStats.Stats["mapped_file"],
|
|
||||||
"total_inactive_file": stat.MemoryStats.Stats["total_inactive_file"],
|
|
||||||
"pgpgout": stat.MemoryStats.Stats["pagpgout"],
|
|
||||||
"rss": stat.MemoryStats.Stats["rss"],
|
|
||||||
"total_mapped_file": stat.MemoryStats.Stats["total_mapped_file"],
|
|
||||||
"writeback": stat.MemoryStats.Stats["writeback"],
|
|
||||||
"unevictable": stat.MemoryStats.Stats["unevictable"],
|
|
||||||
"pgpgin": stat.MemoryStats.Stats["pgpgin"],
|
|
||||||
"total_unevictable": stat.MemoryStats.Stats["total_unevictable"],
|
|
||||||
"pgmajfault": stat.MemoryStats.Stats["pgmajfault"],
|
|
||||||
"total_rss": stat.MemoryStats.Stats["total_rss"],
|
|
||||||
"total_rss_huge": stat.MemoryStats.Stats["total_rss_huge"],
|
|
||||||
"total_writeback": stat.MemoryStats.Stats["total_write_back"],
|
|
||||||
"total_inactive_anon": stat.MemoryStats.Stats["total_inactive_anon"],
|
|
||||||
"rss_huge": stat.MemoryStats.Stats["rss_huge"],
|
|
||||||
"hierarchical_memory_limit": stat.MemoryStats.Stats["hierarchical_memory_limit"],
|
|
||||||
"total_pgfault": stat.MemoryStats.Stats["total_pgfault"],
|
|
||||||
"total_active_file": stat.MemoryStats.Stats["total_active_file"],
|
|
||||||
"active_anon": stat.MemoryStats.Stats["active_anon"],
|
|
||||||
"total_active_anon": stat.MemoryStats.Stats["total_active_anon"],
|
|
||||||
"total_pgpgout": stat.MemoryStats.Stats["total_pgpgout"],
|
|
||||||
"total_cache": stat.MemoryStats.Stats["total_cache"],
|
|
||||||
"inactive_anon": stat.MemoryStats.Stats["inactive_anon"],
|
|
||||||
"active_file": stat.MemoryStats.Stats["active_file"],
|
|
||||||
"pgfault": stat.MemoryStats.Stats["pgfault"],
|
|
||||||
"inactive_file": stat.MemoryStats.Stats["inactive_file"],
|
|
||||||
"total_pgpgin": stat.MemoryStats.Stats["total_pgpgin"],
|
|
||||||
"usage_percent": calculateMemPercent(stat),
|
|
||||||
"container_id": id,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
memstats := []string{
|
||||||
|
"active_anon",
|
||||||
|
"active_file",
|
||||||
|
"cache",
|
||||||
|
"hierarchical_memory_limit",
|
||||||
|
"inactive_anon",
|
||||||
|
"inactive_file",
|
||||||
|
"mapped_file",
|
||||||
|
"pgfault",
|
||||||
|
"pgmajfault",
|
||||||
|
"pgpgin",
|
||||||
|
"pgpgout",
|
||||||
|
"rss",
|
||||||
|
"rss_huge",
|
||||||
|
"total_active_anon",
|
||||||
|
"total_active_file",
|
||||||
|
"total_cache",
|
||||||
|
"total_inactive_anon",
|
||||||
|
"total_inactive_file",
|
||||||
|
"total_mapped_file",
|
||||||
|
"total_pgfault",
|
||||||
|
"total_pgmajfault",
|
||||||
|
"total_pgpgin",
|
||||||
|
"total_pgpgout",
|
||||||
|
"total_rss",
|
||||||
|
"total_rss_huge",
|
||||||
|
"total_unevictable",
|
||||||
|
"total_writeback",
|
||||||
|
"unevictable",
|
||||||
|
"writeback",
|
||||||
|
}
|
||||||
|
for _, field := range memstats {
|
||||||
|
if value, ok := stat.MemoryStats.Stats[field]; ok {
|
||||||
|
memfields[field] = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if stat.MemoryStats.Failcnt != 0 {
|
||||||
|
memfields["fail_count"] = stat.MemoryStats.Failcnt
|
||||||
|
}
|
||||||
|
|
||||||
|
if daemonOSType != "windows" {
|
||||||
|
memfields["limit"] = stat.MemoryStats.Limit
|
||||||
|
memfields["usage"] = stat.MemoryStats.Usage
|
||||||
|
memfields["max_usage"] = stat.MemoryStats.MaxUsage
|
||||||
|
|
||||||
|
mem := calculateMemUsageUnixNoCache(stat.MemoryStats)
|
||||||
|
memLimit := float64(stat.MemoryStats.Limit)
|
||||||
|
memfields["usage_percent"] = calculateMemPercentUnixNoCache(memLimit, mem)
|
||||||
|
} else {
|
||||||
|
memfields["commit_bytes"] = stat.MemoryStats.Commit
|
||||||
|
memfields["commit_peak_bytes"] = stat.MemoryStats.CommitPeak
|
||||||
|
memfields["private_working_set"] = stat.MemoryStats.PrivateWorkingSet
|
||||||
|
}
|
||||||
|
|
||||||
acc.AddFields("docker_container_mem", memfields, tags, now)
|
acc.AddFields("docker_container_mem", memfields, tags, now)
|
||||||
|
|
||||||
cpufields := map[string]interface{}{
|
cpufields := map[string]interface{}{
|
||||||
|
@ -439,9 +410,19 @@ func gatherContainerStats(
|
||||||
"throttling_periods": stat.CPUStats.ThrottlingData.Periods,
|
"throttling_periods": stat.CPUStats.ThrottlingData.Periods,
|
||||||
"throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods,
|
"throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods,
|
||||||
"throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime,
|
"throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime,
|
||||||
"usage_percent": calculateCPUPercent(stat),
|
|
||||||
"container_id": id,
|
"container_id": id,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if daemonOSType != "windows" {
|
||||||
|
previousCPU := stat.PreCPUStats.CPUUsage.TotalUsage
|
||||||
|
previousSystem := stat.PreCPUStats.SystemUsage
|
||||||
|
cpuPercent := calculateCPUPercentUnix(previousCPU, previousSystem, stat)
|
||||||
|
cpufields["usage_percent"] = cpuPercent
|
||||||
|
} else {
|
||||||
|
cpuPercent := calculateCPUPercentWindows(stat)
|
||||||
|
cpufields["usage_percent"] = cpuPercent
|
||||||
|
}
|
||||||
|
|
||||||
cputags := copyTags(tags)
|
cputags := copyTags(tags)
|
||||||
cputags["cpu"] = "cpu-total"
|
cputags["cpu"] = "cpu-total"
|
||||||
acc.AddFields("docker_container_cpu", cpufields, cputags, now)
|
acc.AddFields("docker_container_cpu", cpufields, cputags, now)
|
||||||
|
@ -521,30 +502,6 @@ func gatherContainerStats(
|
||||||
gatherBlockIOMetrics(stat, acc, tags, now, id, perDevice, total)
|
gatherBlockIOMetrics(stat, acc, tags, now, id, perDevice, total)
|
||||||
}
|
}
|
||||||
|
|
||||||
func calculateMemPercent(stat *types.StatsJSON) float64 {
|
|
||||||
var memPercent = 0.0
|
|
||||||
if stat.MemoryStats.Limit > 0 {
|
|
||||||
memPercent = float64(stat.MemoryStats.Usage) / float64(stat.MemoryStats.Limit) * 100.0
|
|
||||||
}
|
|
||||||
return memPercent
|
|
||||||
}
|
|
||||||
|
|
||||||
func calculateCPUPercent(stat *types.StatsJSON) float64 {
|
|
||||||
var cpuPercent = 0.0
|
|
||||||
// calculate the change for the cpu and system usage of the container in between readings
|
|
||||||
cpuDelta := float64(stat.CPUStats.CPUUsage.TotalUsage) - float64(stat.PreCPUStats.CPUUsage.TotalUsage)
|
|
||||||
systemDelta := float64(stat.CPUStats.SystemUsage) - float64(stat.PreCPUStats.SystemUsage)
|
|
||||||
|
|
||||||
if systemDelta > 0.0 && cpuDelta > 0.0 {
|
|
||||||
if stat.CPUStats.OnlineCPUs > 0 {
|
|
||||||
cpuPercent = (cpuDelta / systemDelta) * float64(stat.CPUStats.OnlineCPUs) * 100.0
|
|
||||||
} else {
|
|
||||||
cpuPercent = (cpuDelta / systemDelta) * float64(len(stat.CPUStats.CPUUsage.PercpuUsage)) * 100.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return cpuPercent
|
|
||||||
}
|
|
||||||
|
|
||||||
func gatherBlockIOMetrics(
|
func gatherBlockIOMetrics(
|
||||||
stat *types.StatsJSON,
|
stat *types.StatsJSON,
|
||||||
acc telegraf.Accumulator,
|
acc telegraf.Accumulator,
|
||||||
|
@ -742,6 +699,8 @@ func init() {
|
||||||
return &Docker{
|
return &Docker{
|
||||||
PerDevice: true,
|
PerDevice: true,
|
||||||
Timeout: internal.Duration{Duration: time.Second * 5},
|
Timeout: internal.Duration{Duration: time.Second * 5},
|
||||||
|
newEnvClient: NewEnvClient,
|
||||||
|
newClient: NewClient,
|
||||||
filtersCreated: false,
|
filtersCreated: false,
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
package docker
|
package docker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
|
||||||
|
@ -10,6 +10,56 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type MockClient struct {
|
||||||
|
InfoF func(ctx context.Context) (types.Info, error)
|
||||||
|
ContainerListF func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
|
||||||
|
ContainerStatsF func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error)
|
||||||
|
ContainerInspectF func(ctx context.Context, containerID string) (types.ContainerJSON, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MockClient) Info(ctx context.Context) (types.Info, error) {
|
||||||
|
return c.InfoF(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MockClient) ContainerList(
|
||||||
|
ctx context.Context,
|
||||||
|
options types.ContainerListOptions,
|
||||||
|
) ([]types.Container, error) {
|
||||||
|
return c.ContainerListF(ctx, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MockClient) ContainerStats(
|
||||||
|
ctx context.Context,
|
||||||
|
containerID string,
|
||||||
|
stream bool,
|
||||||
|
) (types.ContainerStats, error) {
|
||||||
|
return c.ContainerStatsF(ctx, containerID, stream)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MockClient) ContainerInspect(
|
||||||
|
ctx context.Context,
|
||||||
|
containerID string,
|
||||||
|
) (types.ContainerJSON, error) {
|
||||||
|
return c.ContainerInspectF(ctx, containerID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newClient(host string) (Client, error) {
|
||||||
|
return &MockClient{
|
||||||
|
InfoF: func(context.Context) (types.Info, error) {
|
||||||
|
return info, nil
|
||||||
|
},
|
||||||
|
ContainerListF: func(context.Context, types.ContainerListOptions) ([]types.Container, error) {
|
||||||
|
return containerList, nil
|
||||||
|
},
|
||||||
|
ContainerStatsF: func(context.Context, string, bool) (types.ContainerStats, error) {
|
||||||
|
return containerStats(), nil
|
||||||
|
},
|
||||||
|
ContainerInspectF: func(context.Context, string) (types.ContainerJSON, error) {
|
||||||
|
return containerInspect, nil
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func TestDockerGatherContainerStats(t *testing.T) {
|
func TestDockerGatherContainerStats(t *testing.T) {
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
stats := testStats()
|
stats := testStats()
|
||||||
|
@ -19,7 +69,7 @@ func TestDockerGatherContainerStats(t *testing.T) {
|
||||||
"container_image": "redis/image",
|
"container_image": "redis/image",
|
||||||
}
|
}
|
||||||
|
|
||||||
gatherContainerStats(stats, &acc, tags, "123456789", true, true)
|
gatherContainerStats(stats, &acc, tags, "123456789", true, true, "linux")
|
||||||
|
|
||||||
// test docker_container_net measurement
|
// test docker_container_net measurement
|
||||||
netfields := map[string]interface{}{
|
netfields := map[string]interface{}{
|
||||||
|
@ -73,41 +123,41 @@ func TestDockerGatherContainerStats(t *testing.T) {
|
||||||
|
|
||||||
// test docker_container_mem measurement
|
// test docker_container_mem measurement
|
||||||
memfields := map[string]interface{}{
|
memfields := map[string]interface{}{
|
||||||
"max_usage": uint64(1001),
|
|
||||||
"usage": uint64(1111),
|
|
||||||
"fail_count": uint64(1),
|
|
||||||
"limit": uint64(2000),
|
|
||||||
"total_pgmafault": uint64(0),
|
|
||||||
"cache": uint64(0),
|
|
||||||
"mapped_file": uint64(0),
|
|
||||||
"total_inactive_file": uint64(0),
|
|
||||||
"pgpgout": uint64(0),
|
|
||||||
"rss": uint64(0),
|
|
||||||
"total_mapped_file": uint64(0),
|
|
||||||
"writeback": uint64(0),
|
|
||||||
"unevictable": uint64(0),
|
|
||||||
"pgpgin": uint64(0),
|
|
||||||
"total_unevictable": uint64(0),
|
|
||||||
"pgmajfault": uint64(0),
|
|
||||||
"total_rss": uint64(44),
|
|
||||||
"total_rss_huge": uint64(444),
|
|
||||||
"total_writeback": uint64(55),
|
|
||||||
"total_inactive_anon": uint64(0),
|
|
||||||
"rss_huge": uint64(0),
|
|
||||||
"hierarchical_memory_limit": uint64(0),
|
|
||||||
"total_pgfault": uint64(0),
|
|
||||||
"total_active_file": uint64(0),
|
|
||||||
"active_anon": uint64(0),
|
"active_anon": uint64(0),
|
||||||
"total_active_anon": uint64(0),
|
|
||||||
"total_pgpgout": uint64(0),
|
|
||||||
"total_cache": uint64(0),
|
|
||||||
"inactive_anon": uint64(0),
|
|
||||||
"active_file": uint64(1),
|
"active_file": uint64(1),
|
||||||
"pgfault": uint64(2),
|
"cache": uint64(0),
|
||||||
"inactive_file": uint64(3),
|
|
||||||
"total_pgpgin": uint64(4),
|
|
||||||
"usage_percent": float64(55.55),
|
|
||||||
"container_id": "123456789",
|
"container_id": "123456789",
|
||||||
|
"fail_count": uint64(1),
|
||||||
|
"hierarchical_memory_limit": uint64(0),
|
||||||
|
"inactive_anon": uint64(0),
|
||||||
|
"inactive_file": uint64(3),
|
||||||
|
"limit": uint64(2000),
|
||||||
|
"mapped_file": uint64(0),
|
||||||
|
"max_usage": uint64(1001),
|
||||||
|
"pgfault": uint64(2),
|
||||||
|
"pgmajfault": uint64(0),
|
||||||
|
"pgpgin": uint64(0),
|
||||||
|
"pgpgout": uint64(0),
|
||||||
|
"rss_huge": uint64(0),
|
||||||
|
"rss": uint64(0),
|
||||||
|
"total_active_anon": uint64(0),
|
||||||
|
"total_active_file": uint64(0),
|
||||||
|
"total_cache": uint64(0),
|
||||||
|
"total_inactive_anon": uint64(0),
|
||||||
|
"total_inactive_file": uint64(0),
|
||||||
|
"total_mapped_file": uint64(0),
|
||||||
|
"total_pgfault": uint64(0),
|
||||||
|
"total_pgmajfault": uint64(0),
|
||||||
|
"total_pgpgin": uint64(4),
|
||||||
|
"total_pgpgout": uint64(0),
|
||||||
|
"total_rss_huge": uint64(444),
|
||||||
|
"total_rss": uint64(44),
|
||||||
|
"total_unevictable": uint64(0),
|
||||||
|
"total_writeback": uint64(55),
|
||||||
|
"unevictable": uint64(0),
|
||||||
|
"usage_percent": float64(55.55),
|
||||||
|
"usage": uint64(1111),
|
||||||
|
"writeback": uint64(0),
|
||||||
}
|
}
|
||||||
|
|
||||||
acc.AssertContainsTaggedFields(t, "docker_container_mem", memfields, tags)
|
acc.AssertContainsTaggedFields(t, "docker_container_mem", memfields, tags)
|
||||||
|
@ -158,231 +208,155 @@ func TestDockerGatherContainerStats(t *testing.T) {
|
||||||
acc.AssertDoesNotContainsTaggedFields(t, "docker_container_cpu", cpu3fields, cputags)
|
acc.AssertDoesNotContainsTaggedFields(t, "docker_container_cpu", cpu3fields, cputags)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testStats() *types.StatsJSON {
|
func TestDocker_WindowsMemoryContainerStats(t *testing.T) {
|
||||||
stats := &types.StatsJSON{}
|
var acc testutil.Accumulator
|
||||||
stats.Read = time.Now()
|
|
||||||
stats.Networks = make(map[string]types.NetworkStats)
|
|
||||||
stats.CPUStats.OnlineCPUs = 2
|
|
||||||
stats.CPUStats.CPUUsage.PercpuUsage = []uint64{1, 1002, 0, 0}
|
|
||||||
stats.CPUStats.CPUUsage.UsageInUsermode = 100
|
|
||||||
stats.CPUStats.CPUUsage.TotalUsage = 500
|
|
||||||
stats.CPUStats.CPUUsage.UsageInKernelmode = 200
|
|
||||||
stats.CPUStats.SystemUsage = 100
|
|
||||||
stats.CPUStats.ThrottlingData.Periods = 1
|
|
||||||
|
|
||||||
stats.PreCPUStats.CPUUsage.TotalUsage = 400
|
d := Docker{
|
||||||
stats.PreCPUStats.SystemUsage = 50
|
newClient: func(string) (Client, error) {
|
||||||
|
return &MockClient{
|
||||||
stats.MemoryStats.Stats = make(map[string]uint64)
|
InfoF: func(ctx context.Context) (types.Info, error) {
|
||||||
stats.MemoryStats.Stats["total_pgmajfault"] = 0
|
return info, nil
|
||||||
stats.MemoryStats.Stats["cache"] = 0
|
},
|
||||||
stats.MemoryStats.Stats["mapped_file"] = 0
|
ContainerListF: func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
|
||||||
stats.MemoryStats.Stats["total_inactive_file"] = 0
|
return containerList, nil
|
||||||
stats.MemoryStats.Stats["pagpgout"] = 0
|
},
|
||||||
stats.MemoryStats.Stats["rss"] = 0
|
ContainerStatsF: func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) {
|
||||||
stats.MemoryStats.Stats["total_mapped_file"] = 0
|
return containerStatsWindows(), nil
|
||||||
stats.MemoryStats.Stats["writeback"] = 0
|
},
|
||||||
stats.MemoryStats.Stats["unevictable"] = 0
|
ContainerInspectF: func(ctx context.Context, containerID string) (types.ContainerJSON, error) {
|
||||||
stats.MemoryStats.Stats["pgpgin"] = 0
|
return containerInspect, nil
|
||||||
stats.MemoryStats.Stats["total_unevictable"] = 0
|
},
|
||||||
stats.MemoryStats.Stats["pgmajfault"] = 0
|
}, nil
|
||||||
stats.MemoryStats.Stats["total_rss"] = 44
|
},
|
||||||
stats.MemoryStats.Stats["total_rss_huge"] = 444
|
|
||||||
stats.MemoryStats.Stats["total_write_back"] = 55
|
|
||||||
stats.MemoryStats.Stats["total_inactive_anon"] = 0
|
|
||||||
stats.MemoryStats.Stats["rss_huge"] = 0
|
|
||||||
stats.MemoryStats.Stats["hierarchical_memory_limit"] = 0
|
|
||||||
stats.MemoryStats.Stats["total_pgfault"] = 0
|
|
||||||
stats.MemoryStats.Stats["total_active_file"] = 0
|
|
||||||
stats.MemoryStats.Stats["active_anon"] = 0
|
|
||||||
stats.MemoryStats.Stats["total_active_anon"] = 0
|
|
||||||
stats.MemoryStats.Stats["total_pgpgout"] = 0
|
|
||||||
stats.MemoryStats.Stats["total_cache"] = 0
|
|
||||||
stats.MemoryStats.Stats["inactive_anon"] = 0
|
|
||||||
stats.MemoryStats.Stats["active_file"] = 1
|
|
||||||
stats.MemoryStats.Stats["pgfault"] = 2
|
|
||||||
stats.MemoryStats.Stats["inactive_file"] = 3
|
|
||||||
stats.MemoryStats.Stats["total_pgpgin"] = 4
|
|
||||||
|
|
||||||
stats.MemoryStats.MaxUsage = 1001
|
|
||||||
stats.MemoryStats.Usage = 1111
|
|
||||||
stats.MemoryStats.Failcnt = 1
|
|
||||||
stats.MemoryStats.Limit = 2000
|
|
||||||
|
|
||||||
stats.Networks["eth0"] = types.NetworkStats{
|
|
||||||
RxDropped: 1,
|
|
||||||
RxBytes: 2,
|
|
||||||
RxErrors: 3,
|
|
||||||
TxPackets: 4,
|
|
||||||
TxDropped: 1,
|
|
||||||
RxPackets: 2,
|
|
||||||
TxErrors: 3,
|
|
||||||
TxBytes: 4,
|
|
||||||
}
|
}
|
||||||
|
err := d.Gather(&acc)
|
||||||
stats.Networks["eth1"] = types.NetworkStats{
|
require.NoError(t, err)
|
||||||
RxDropped: 5,
|
|
||||||
RxBytes: 6,
|
|
||||||
RxErrors: 7,
|
|
||||||
TxPackets: 8,
|
|
||||||
TxDropped: 5,
|
|
||||||
RxPackets: 6,
|
|
||||||
TxErrors: 7,
|
|
||||||
TxBytes: 8,
|
|
||||||
}
|
|
||||||
|
|
||||||
sbr := types.BlkioStatEntry{
|
|
||||||
Major: 6,
|
|
||||||
Minor: 0,
|
|
||||||
Op: "read",
|
|
||||||
Value: 100,
|
|
||||||
}
|
|
||||||
sr := types.BlkioStatEntry{
|
|
||||||
Major: 6,
|
|
||||||
Minor: 0,
|
|
||||||
Op: "write",
|
|
||||||
Value: 101,
|
|
||||||
}
|
|
||||||
sr2 := types.BlkioStatEntry{
|
|
||||||
Major: 6,
|
|
||||||
Minor: 1,
|
|
||||||
Op: "write",
|
|
||||||
Value: 201,
|
|
||||||
}
|
|
||||||
|
|
||||||
stats.BlkioStats.IoServiceBytesRecursive = append(
|
|
||||||
stats.BlkioStats.IoServiceBytesRecursive, sbr)
|
|
||||||
stats.BlkioStats.IoServicedRecursive = append(
|
|
||||||
stats.BlkioStats.IoServicedRecursive, sr)
|
|
||||||
stats.BlkioStats.IoServicedRecursive = append(
|
|
||||||
stats.BlkioStats.IoServicedRecursive, sr2)
|
|
||||||
|
|
||||||
return stats
|
|
||||||
}
|
|
||||||
|
|
||||||
var gatherLabelsTests = []struct {
|
|
||||||
include []string
|
|
||||||
exclude []string
|
|
||||||
expected []string
|
|
||||||
notexpected []string
|
|
||||||
}{
|
|
||||||
{[]string{}, []string{}, []string{"label1", "label2"}, []string{}},
|
|
||||||
{[]string{"*"}, []string{}, []string{"label1", "label2"}, []string{}},
|
|
||||||
{[]string{"lab*"}, []string{}, []string{"label1", "label2"}, []string{}},
|
|
||||||
{[]string{"label1"}, []string{}, []string{"label1"}, []string{"label2"}},
|
|
||||||
{[]string{"label1*"}, []string{}, []string{"label1"}, []string{"label2"}},
|
|
||||||
{[]string{}, []string{"*"}, []string{}, []string{"label1", "label2"}},
|
|
||||||
{[]string{}, []string{"lab*"}, []string{}, []string{"label1", "label2"}},
|
|
||||||
{[]string{}, []string{"label1"}, []string{"label2"}, []string{"label1"}},
|
|
||||||
{[]string{"*"}, []string{"*"}, []string{}, []string{"label1", "label2"}},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDockerGatherLabels(t *testing.T) {
|
func TestDockerGatherLabels(t *testing.T) {
|
||||||
|
var gatherLabelsTests = []struct {
|
||||||
|
include []string
|
||||||
|
exclude []string
|
||||||
|
expected []string
|
||||||
|
notexpected []string
|
||||||
|
}{
|
||||||
|
{[]string{}, []string{}, []string{"label1", "label2"}, []string{}},
|
||||||
|
{[]string{"*"}, []string{}, []string{"label1", "label2"}, []string{}},
|
||||||
|
{[]string{"lab*"}, []string{}, []string{"label1", "label2"}, []string{}},
|
||||||
|
{[]string{"label1"}, []string{}, []string{"label1"}, []string{"label2"}},
|
||||||
|
{[]string{"label1*"}, []string{}, []string{"label1"}, []string{"label2"}},
|
||||||
|
{[]string{}, []string{"*"}, []string{}, []string{"label1", "label2"}},
|
||||||
|
{[]string{}, []string{"lab*"}, []string{}, []string{"label1", "label2"}},
|
||||||
|
{[]string{}, []string{"label1"}, []string{"label2"}, []string{"label1"}},
|
||||||
|
{[]string{"*"}, []string{"*"}, []string{}, []string{"label1", "label2"}},
|
||||||
|
}
|
||||||
|
|
||||||
for _, tt := range gatherLabelsTests {
|
for _, tt := range gatherLabelsTests {
|
||||||
var acc testutil.Accumulator
|
t.Run("", func(t *testing.T) {
|
||||||
d := Docker{
|
var acc testutil.Accumulator
|
||||||
client: nil,
|
d := Docker{
|
||||||
testing: true,
|
newClient: newClient,
|
||||||
}
|
|
||||||
|
|
||||||
for _, label := range tt.include {
|
|
||||||
d.LabelInclude = append(d.LabelInclude, label)
|
|
||||||
}
|
|
||||||
for _, label := range tt.exclude {
|
|
||||||
d.LabelExclude = append(d.LabelExclude, label)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := d.Gather(&acc)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
for _, label := range tt.expected {
|
|
||||||
if !acc.HasTag("docker_container_cpu", label) {
|
|
||||||
t.Errorf("Didn't get expected label of %s. Test was: Include: %s Exclude %s",
|
|
||||||
label, tt.include, tt.exclude)
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
for _, label := range tt.notexpected {
|
for _, label := range tt.include {
|
||||||
if acc.HasTag("docker_container_cpu", label) {
|
d.LabelInclude = append(d.LabelInclude, label)
|
||||||
t.Errorf("Got unexpected label of %s. Test was: Include: %s Exclude %s",
|
|
||||||
label, tt.include, tt.exclude)
|
|
||||||
}
|
}
|
||||||
}
|
for _, label := range tt.exclude {
|
||||||
|
d.LabelExclude = append(d.LabelExclude, label)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := d.Gather(&acc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
for _, label := range tt.expected {
|
||||||
|
if !acc.HasTag("docker_container_cpu", label) {
|
||||||
|
t.Errorf("Didn't get expected label of %s. Test was: Include: %s Exclude %s",
|
||||||
|
label, tt.include, tt.exclude)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, label := range tt.notexpected {
|
||||||
|
if acc.HasTag("docker_container_cpu", label) {
|
||||||
|
t.Errorf("Got unexpected label of %s. Test was: Include: %s Exclude %s",
|
||||||
|
label, tt.include, tt.exclude)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var gatherContainerNames = []struct {
|
|
||||||
include []string
|
|
||||||
exclude []string
|
|
||||||
expected []string
|
|
||||||
notexpected []string
|
|
||||||
}{
|
|
||||||
{[]string{}, []string{}, []string{"etcd", "etcd2"}, []string{}},
|
|
||||||
{[]string{"*"}, []string{}, []string{"etcd", "etcd2"}, []string{}},
|
|
||||||
{[]string{"etc*"}, []string{}, []string{"etcd", "etcd2"}, []string{}},
|
|
||||||
{[]string{"etcd"}, []string{}, []string{"etcd"}, []string{"etcd2"}},
|
|
||||||
{[]string{"etcd2*"}, []string{}, []string{"etcd2"}, []string{"etcd"}},
|
|
||||||
{[]string{}, []string{"etc*"}, []string{}, []string{"etcd", "etcd2"}},
|
|
||||||
{[]string{}, []string{"etcd"}, []string{"etcd2"}, []string{"etcd"}},
|
|
||||||
{[]string{"*"}, []string{"*"}, []string{"etcd", "etcd2"}, []string{}},
|
|
||||||
{[]string{}, []string{"*"}, []string{""}, []string{"etcd", "etcd2"}},
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestContainerNames(t *testing.T) {
|
func TestContainerNames(t *testing.T) {
|
||||||
|
var gatherContainerNames = []struct {
|
||||||
|
include []string
|
||||||
|
exclude []string
|
||||||
|
expected []string
|
||||||
|
notexpected []string
|
||||||
|
}{
|
||||||
|
{[]string{}, []string{}, []string{"etcd", "etcd2"}, []string{}},
|
||||||
|
{[]string{"*"}, []string{}, []string{"etcd", "etcd2"}, []string{}},
|
||||||
|
{[]string{"etc*"}, []string{}, []string{"etcd", "etcd2"}, []string{}},
|
||||||
|
{[]string{"etcd"}, []string{}, []string{"etcd"}, []string{"etcd2"}},
|
||||||
|
{[]string{"etcd2*"}, []string{}, []string{"etcd2"}, []string{"etcd"}},
|
||||||
|
{[]string{}, []string{"etc*"}, []string{}, []string{"etcd", "etcd2"}},
|
||||||
|
{[]string{}, []string{"etcd"}, []string{"etcd2"}, []string{"etcd"}},
|
||||||
|
{[]string{"*"}, []string{"*"}, []string{"etcd", "etcd2"}, []string{}},
|
||||||
|
{[]string{}, []string{"*"}, []string{""}, []string{"etcd", "etcd2"}},
|
||||||
|
}
|
||||||
|
|
||||||
for _, tt := range gatherContainerNames {
|
for _, tt := range gatherContainerNames {
|
||||||
var acc testutil.Accumulator
|
t.Run("", func(t *testing.T) {
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
d := Docker{
|
d := Docker{
|
||||||
client: nil,
|
newClient: newClient,
|
||||||
testing: true,
|
ContainerInclude: tt.include,
|
||||||
ContainerInclude: tt.include,
|
ContainerExclude: tt.exclude,
|
||||||
ContainerExclude: tt.exclude,
|
}
|
||||||
}
|
|
||||||
|
|
||||||
err := d.Gather(&acc)
|
err := d.Gather(&acc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
for _, metric := range acc.Metrics {
|
for _, metric := range acc.Metrics {
|
||||||
if metric.Measurement == "docker_container_cpu" {
|
if metric.Measurement == "docker_container_cpu" {
|
||||||
if val, ok := metric.Tags["container_name"]; ok {
|
if val, ok := metric.Tags["container_name"]; ok {
|
||||||
var found bool = false
|
var found bool = false
|
||||||
for _, cname := range tt.expected {
|
for _, cname := range tt.expected {
|
||||||
if val == cname {
|
if val == cname {
|
||||||
found = true
|
found = true
|
||||||
break
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
t.Errorf("Got unexpected container of %s. Test was -> Include: %s, Exclude: %s", val, tt.include, tt.exclude)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
t.Errorf("Got unexpected container of %s. Test was -> Include: %s, Exclude: %s", val, tt.include, tt.exclude)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
for _, metric := range acc.Metrics {
|
for _, metric := range acc.Metrics {
|
||||||
if metric.Measurement == "docker_container_cpu" {
|
if metric.Measurement == "docker_container_cpu" {
|
||||||
if val, ok := metric.Tags["container_name"]; ok {
|
if val, ok := metric.Tags["container_name"]; ok {
|
||||||
var found bool = false
|
var found bool = false
|
||||||
for _, cname := range tt.notexpected {
|
for _, cname := range tt.notexpected {
|
||||||
if val == cname {
|
if val == cname {
|
||||||
found = true
|
found = true
|
||||||
break
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if found {
|
||||||
|
t.Errorf("Got unexpected container of %s. Test was -> Include: %s, Exclude: %s", val, tt.include, tt.exclude)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if found {
|
|
||||||
t.Errorf("Got unexpected container of %s. Test was -> Include: %s, Exclude: %s", val, tt.include, tt.exclude)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDockerGatherInfo(t *testing.T) {
|
func TestDockerGatherInfo(t *testing.T) {
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
d := Docker{
|
d := Docker{
|
||||||
client: nil,
|
newClient: newClient,
|
||||||
testing: true,
|
|
||||||
TagEnvironment: []string{"ENVVAR1", "ENVVAR2", "ENVVAR3", "ENVVAR5",
|
TagEnvironment: []string{"ENVVAR1", "ENVVAR2", "ENVVAR3", "ENVVAR5",
|
||||||
"ENVVAR6", "ENVVAR7", "ENVVAR8", "ENVVAR9"},
|
"ENVVAR6", "ENVVAR7", "ENVVAR8", "ENVVAR9"},
|
||||||
}
|
}
|
||||||
|
@ -441,41 +415,11 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||||
acc.AssertContainsTaggedFields(t,
|
acc.AssertContainsTaggedFields(t,
|
||||||
"docker_container_mem",
|
"docker_container_mem",
|
||||||
map[string]interface{}{
|
map[string]interface{}{
|
||||||
"total_pgpgout": uint64(0),
|
"container_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
||||||
"usage_percent": float64(0),
|
"limit": uint64(18935443456),
|
||||||
"rss": uint64(0),
|
"max_usage": uint64(0),
|
||||||
"total_writeback": uint64(0),
|
"usage": uint64(0),
|
||||||
"active_anon": uint64(0),
|
"usage_percent": float64(0),
|
||||||
"total_pgmafault": uint64(0),
|
|
||||||
"total_rss": uint64(0),
|
|
||||||
"total_unevictable": uint64(0),
|
|
||||||
"active_file": uint64(0),
|
|
||||||
"total_mapped_file": uint64(0),
|
|
||||||
"pgpgin": uint64(0),
|
|
||||||
"total_active_file": uint64(0),
|
|
||||||
"total_active_anon": uint64(0),
|
|
||||||
"total_cache": uint64(0),
|
|
||||||
"inactive_anon": uint64(0),
|
|
||||||
"pgmajfault": uint64(0),
|
|
||||||
"total_inactive_anon": uint64(0),
|
|
||||||
"total_rss_huge": uint64(0),
|
|
||||||
"rss_huge": uint64(0),
|
|
||||||
"hierarchical_memory_limit": uint64(0),
|
|
||||||
"pgpgout": uint64(0),
|
|
||||||
"unevictable": uint64(0),
|
|
||||||
"total_inactive_file": uint64(0),
|
|
||||||
"writeback": uint64(0),
|
|
||||||
"total_pgfault": uint64(0),
|
|
||||||
"total_pgpgin": uint64(0),
|
|
||||||
"cache": uint64(0),
|
|
||||||
"mapped_file": uint64(0),
|
|
||||||
"inactive_file": uint64(0),
|
|
||||||
"max_usage": uint64(0),
|
|
||||||
"fail_count": uint64(0),
|
|
||||||
"pgfault": uint64(0),
|
|
||||||
"usage": uint64(0),
|
|
||||||
"limit": uint64(18935443456),
|
|
||||||
"container_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
|
||||||
},
|
},
|
||||||
map[string]string{
|
map[string]string{
|
||||||
"engine_host": "absol",
|
"engine_host": "absol",
|
||||||
|
@ -490,6 +434,4 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||||
"label2": "test_value_2",
|
"label2": "test_value_2",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
//fmt.Print(info)
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,406 @@
|
||||||
|
package docker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/api/types/container"
|
||||||
|
"github.com/docker/docker/api/types/registry"
|
||||||
|
)
|
||||||
|
|
||||||
|
var info = types.Info{
|
||||||
|
Containers: 108,
|
||||||
|
ContainersRunning: 98,
|
||||||
|
ContainersStopped: 6,
|
||||||
|
ContainersPaused: 3,
|
||||||
|
OomKillDisable: false,
|
||||||
|
SystemTime: "2016-02-24T00:55:09.15073105-05:00",
|
||||||
|
NEventsListener: 0,
|
||||||
|
ID: "5WQQ:TFWR:FDNG:OKQ3:37Y4:FJWG:QIKK:623T:R3ME:QTKB:A7F7:OLHD",
|
||||||
|
Debug: false,
|
||||||
|
LoggingDriver: "json-file",
|
||||||
|
KernelVersion: "4.3.0-1-amd64",
|
||||||
|
IndexServerAddress: "https://index.docker.io/v1/",
|
||||||
|
MemTotal: 3840757760,
|
||||||
|
Images: 199,
|
||||||
|
CPUCfsQuota: true,
|
||||||
|
Name: "absol",
|
||||||
|
SwapLimit: false,
|
||||||
|
IPv4Forwarding: true,
|
||||||
|
ExperimentalBuild: false,
|
||||||
|
CPUCfsPeriod: true,
|
||||||
|
RegistryConfig: ®istry.ServiceConfig{
|
||||||
|
IndexConfigs: map[string]*registry.IndexInfo{
|
||||||
|
"docker.io": {
|
||||||
|
Name: "docker.io",
|
||||||
|
Mirrors: []string{},
|
||||||
|
Official: true,
|
||||||
|
Secure: true,
|
||||||
|
},
|
||||||
|
}, InsecureRegistryCIDRs: []*registry.NetIPNet{{IP: []byte{127, 0, 0, 0}, Mask: []byte{255, 0, 0, 0}}}, Mirrors: []string{}},
|
||||||
|
OperatingSystem: "Linux Mint LMDE (containerized)",
|
||||||
|
BridgeNfIptables: true,
|
||||||
|
HTTPSProxy: "",
|
||||||
|
Labels: []string{},
|
||||||
|
MemoryLimit: false,
|
||||||
|
DriverStatus: [][2]string{{"Pool Name", "docker-8:1-1182287-pool"}, {"Pool Blocksize", "65.54 kB"}, {"Backing Filesystem", "extfs"}, {"Data file", "/dev/loop0"}, {"Metadata file", "/dev/loop1"}, {"Data Space Used", "17.3 GB"}, {"Data Space Total", "107.4 GB"}, {"Data Space Available", "36.53 GB"}, {"Metadata Space Used", "20.97 MB"}, {"Metadata Space Total", "2.147 GB"}, {"Metadata Space Available", "2.127 GB"}, {"Udev Sync Supported", "true"}, {"Deferred Removal Enabled", "false"}, {"Data loop file", "/var/lib/docker/devicemapper/devicemapper/data"}, {"Metadata loop file", "/var/lib/docker/devicemapper/devicemapper/metadata"}, {"Library Version", "1.02.115 (2016-01-25)"}},
|
||||||
|
NFd: 19,
|
||||||
|
HTTPProxy: "",
|
||||||
|
Driver: "devicemapper",
|
||||||
|
NGoroutines: 39,
|
||||||
|
NCPU: 4,
|
||||||
|
DockerRootDir: "/var/lib/docker",
|
||||||
|
NoProxy: "",
|
||||||
|
BridgeNfIP6tables: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
var containerList = []types.Container{
|
||||||
|
types.Container{
|
||||||
|
ID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb",
|
||||||
|
Names: []string{"/etcd"},
|
||||||
|
Image: "quay.io/coreos/etcd:v2.2.2",
|
||||||
|
Command: "/etcd -name etcd0 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
|
||||||
|
Created: 1455941930,
|
||||||
|
Status: "Up 4 hours",
|
||||||
|
Ports: []types.Port{
|
||||||
|
types.Port{
|
||||||
|
PrivatePort: 7001,
|
||||||
|
PublicPort: 0,
|
||||||
|
Type: "tcp",
|
||||||
|
},
|
||||||
|
types.Port{
|
||||||
|
PrivatePort: 4001,
|
||||||
|
PublicPort: 0,
|
||||||
|
Type: "tcp",
|
||||||
|
},
|
||||||
|
types.Port{
|
||||||
|
PrivatePort: 2380,
|
||||||
|
PublicPort: 0,
|
||||||
|
Type: "tcp",
|
||||||
|
},
|
||||||
|
types.Port{
|
||||||
|
PrivatePort: 2379,
|
||||||
|
PublicPort: 2379,
|
||||||
|
Type: "tcp",
|
||||||
|
IP: "0.0.0.0",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Labels: map[string]string{
|
||||||
|
"label1": "test_value_1",
|
||||||
|
"label2": "test_value_2",
|
||||||
|
},
|
||||||
|
SizeRw: 0,
|
||||||
|
SizeRootFs: 0,
|
||||||
|
},
|
||||||
|
types.Container{
|
||||||
|
ID: "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
||||||
|
Names: []string{"/etcd2"},
|
||||||
|
Image: "quay.io:4443/coreos/etcd:v2.2.2",
|
||||||
|
Command: "/etcd -name etcd2 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
|
||||||
|
Created: 1455941933,
|
||||||
|
Status: "Up 4 hours",
|
||||||
|
Ports: []types.Port{
|
||||||
|
types.Port{
|
||||||
|
PrivatePort: 7002,
|
||||||
|
PublicPort: 0,
|
||||||
|
Type: "tcp",
|
||||||
|
},
|
||||||
|
types.Port{
|
||||||
|
PrivatePort: 4002,
|
||||||
|
PublicPort: 0,
|
||||||
|
Type: "tcp",
|
||||||
|
},
|
||||||
|
types.Port{
|
||||||
|
PrivatePort: 2381,
|
||||||
|
PublicPort: 0,
|
||||||
|
Type: "tcp",
|
||||||
|
},
|
||||||
|
types.Port{
|
||||||
|
PrivatePort: 2382,
|
||||||
|
PublicPort: 2382,
|
||||||
|
Type: "tcp",
|
||||||
|
IP: "0.0.0.0",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Labels: map[string]string{
|
||||||
|
"label1": "test_value_1",
|
||||||
|
"label2": "test_value_2",
|
||||||
|
},
|
||||||
|
SizeRw: 0,
|
||||||
|
SizeRootFs: 0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func containerStats() types.ContainerStats {
|
||||||
|
var stat types.ContainerStats
|
||||||
|
jsonStat := `
|
||||||
|
{
|
||||||
|
"blkio_stats": {
|
||||||
|
"io_service_bytes_recursive": [
|
||||||
|
{
|
||||||
|
"major": 252,
|
||||||
|
"minor": 1,
|
||||||
|
"op": "Read",
|
||||||
|
"value": 753664
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"major": 252,
|
||||||
|
"minor": 1,
|
||||||
|
"op": "Write"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"major": 252,
|
||||||
|
"minor": 1,
|
||||||
|
"op": "Sync"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"major": 252,
|
||||||
|
"minor": 1,
|
||||||
|
"op": "Async",
|
||||||
|
"value": 753664
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"major": 252,
|
||||||
|
"minor": 1,
|
||||||
|
"op": "Total",
|
||||||
|
"value": 753664
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"io_serviced_recursive": [
|
||||||
|
{
|
||||||
|
"major": 252,
|
||||||
|
"minor": 1,
|
||||||
|
"op": "Read",
|
||||||
|
"value": 26
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"major": 252,
|
||||||
|
"minor": 1,
|
||||||
|
"op": "Write"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"major": 252,
|
||||||
|
"minor": 1,
|
||||||
|
"op": "Sync"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"major": 252,
|
||||||
|
"minor": 1,
|
||||||
|
"op": "Async",
|
||||||
|
"value": 26
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"major": 252,
|
||||||
|
"minor": 1,
|
||||||
|
"op": "Total",
|
||||||
|
"value": 26
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"cpu_stats": {
|
||||||
|
"cpu_usage": {
|
||||||
|
"percpu_usage": [
|
||||||
|
17871,
|
||||||
|
4959158,
|
||||||
|
1646137,
|
||||||
|
1231652,
|
||||||
|
11829401,
|
||||||
|
244656,
|
||||||
|
369972,
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"total_usage": 20298847,
|
||||||
|
"usage_in_usermode": 10000000
|
||||||
|
},
|
||||||
|
"system_cpu_usage": 24052607520000000,
|
||||||
|
"throttling_data": {}
|
||||||
|
},
|
||||||
|
"memory_stats": {
|
||||||
|
"limit": 18935443456,
|
||||||
|
"stats": {}
|
||||||
|
},
|
||||||
|
"precpu_stats": {
|
||||||
|
"cpu_usage": {
|
||||||
|
"percpu_usage": [
|
||||||
|
17871,
|
||||||
|
4959158,
|
||||||
|
1646137,
|
||||||
|
1231652,
|
||||||
|
11829401,
|
||||||
|
244656,
|
||||||
|
369972,
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"total_usage": 20298847,
|
||||||
|
"usage_in_usermode": 10000000
|
||||||
|
},
|
||||||
|
"system_cpu_usage": 24052599550000000,
|
||||||
|
"throttling_data": {}
|
||||||
|
},
|
||||||
|
"read": "2016-02-24T11:42:27.472459608-05:00"
|
||||||
|
}`
|
||||||
|
stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat))
|
||||||
|
return stat
|
||||||
|
}
|
||||||
|
|
||||||
|
func testStats() *types.StatsJSON {
|
||||||
|
stats := &types.StatsJSON{}
|
||||||
|
stats.Read = time.Now()
|
||||||
|
stats.Networks = make(map[string]types.NetworkStats)
|
||||||
|
stats.CPUStats.OnlineCPUs = 2
|
||||||
|
stats.CPUStats.CPUUsage.PercpuUsage = []uint64{1, 1002, 0, 0}
|
||||||
|
stats.CPUStats.CPUUsage.UsageInUsermode = 100
|
||||||
|
stats.CPUStats.CPUUsage.TotalUsage = 500
|
||||||
|
stats.CPUStats.CPUUsage.UsageInKernelmode = 200
|
||||||
|
stats.CPUStats.SystemUsage = 100
|
||||||
|
stats.CPUStats.ThrottlingData.Periods = 1
|
||||||
|
|
||||||
|
stats.PreCPUStats.CPUUsage.TotalUsage = 400
|
||||||
|
stats.PreCPUStats.SystemUsage = 50
|
||||||
|
|
||||||
|
stats.MemoryStats.Stats = make(map[string]uint64)
|
||||||
|
stats.MemoryStats.Stats["active_anon"] = 0
|
||||||
|
stats.MemoryStats.Stats["active_file"] = 1
|
||||||
|
stats.MemoryStats.Stats["cache"] = 0
|
||||||
|
stats.MemoryStats.Stats["hierarchical_memory_limit"] = 0
|
||||||
|
stats.MemoryStats.Stats["inactive_anon"] = 0
|
||||||
|
stats.MemoryStats.Stats["inactive_file"] = 3
|
||||||
|
stats.MemoryStats.Stats["mapped_file"] = 0
|
||||||
|
stats.MemoryStats.Stats["pgfault"] = 2
|
||||||
|
stats.MemoryStats.Stats["pgmajfault"] = 0
|
||||||
|
stats.MemoryStats.Stats["pgpgin"] = 0
|
||||||
|
stats.MemoryStats.Stats["pgpgout"] = 0
|
||||||
|
stats.MemoryStats.Stats["rss"] = 0
|
||||||
|
stats.MemoryStats.Stats["rss_huge"] = 0
|
||||||
|
stats.MemoryStats.Stats["total_active_anon"] = 0
|
||||||
|
stats.MemoryStats.Stats["total_active_file"] = 0
|
||||||
|
stats.MemoryStats.Stats["total_cache"] = 0
|
||||||
|
stats.MemoryStats.Stats["total_inactive_anon"] = 0
|
||||||
|
stats.MemoryStats.Stats["total_inactive_file"] = 0
|
||||||
|
stats.MemoryStats.Stats["total_mapped_file"] = 0
|
||||||
|
stats.MemoryStats.Stats["total_pgfault"] = 0
|
||||||
|
stats.MemoryStats.Stats["total_pgmajfault"] = 0
|
||||||
|
stats.MemoryStats.Stats["total_pgpgin"] = 4
|
||||||
|
stats.MemoryStats.Stats["total_pgpgout"] = 0
|
||||||
|
stats.MemoryStats.Stats["total_rss"] = 44
|
||||||
|
stats.MemoryStats.Stats["total_rss_huge"] = 444
|
||||||
|
stats.MemoryStats.Stats["total_unevictable"] = 0
|
||||||
|
stats.MemoryStats.Stats["total_writeback"] = 55
|
||||||
|
stats.MemoryStats.Stats["unevictable"] = 0
|
||||||
|
stats.MemoryStats.Stats["writeback"] = 0
|
||||||
|
|
||||||
|
stats.MemoryStats.MaxUsage = 1001
|
||||||
|
stats.MemoryStats.Usage = 1111
|
||||||
|
stats.MemoryStats.Failcnt = 1
|
||||||
|
stats.MemoryStats.Limit = 2000
|
||||||
|
|
||||||
|
stats.Networks["eth0"] = types.NetworkStats{
|
||||||
|
RxDropped: 1,
|
||||||
|
RxBytes: 2,
|
||||||
|
RxErrors: 3,
|
||||||
|
TxPackets: 4,
|
||||||
|
TxDropped: 1,
|
||||||
|
RxPackets: 2,
|
||||||
|
TxErrors: 3,
|
||||||
|
TxBytes: 4,
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.Networks["eth1"] = types.NetworkStats{
|
||||||
|
RxDropped: 5,
|
||||||
|
RxBytes: 6,
|
||||||
|
RxErrors: 7,
|
||||||
|
TxPackets: 8,
|
||||||
|
TxDropped: 5,
|
||||||
|
RxPackets: 6,
|
||||||
|
TxErrors: 7,
|
||||||
|
TxBytes: 8,
|
||||||
|
}
|
||||||
|
|
||||||
|
sbr := types.BlkioStatEntry{
|
||||||
|
Major: 6,
|
||||||
|
Minor: 0,
|
||||||
|
Op: "read",
|
||||||
|
Value: 100,
|
||||||
|
}
|
||||||
|
sr := types.BlkioStatEntry{
|
||||||
|
Major: 6,
|
||||||
|
Minor: 0,
|
||||||
|
Op: "write",
|
||||||
|
Value: 101,
|
||||||
|
}
|
||||||
|
sr2 := types.BlkioStatEntry{
|
||||||
|
Major: 6,
|
||||||
|
Minor: 1,
|
||||||
|
Op: "write",
|
||||||
|
Value: 201,
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.BlkioStats.IoServiceBytesRecursive = append(
|
||||||
|
stats.BlkioStats.IoServiceBytesRecursive, sbr)
|
||||||
|
stats.BlkioStats.IoServicedRecursive = append(
|
||||||
|
stats.BlkioStats.IoServicedRecursive, sr)
|
||||||
|
stats.BlkioStats.IoServicedRecursive = append(
|
||||||
|
stats.BlkioStats.IoServicedRecursive, sr2)
|
||||||
|
|
||||||
|
return stats
|
||||||
|
}
|
||||||
|
|
||||||
|
func containerStatsWindows() types.ContainerStats {
|
||||||
|
var stat types.ContainerStats
|
||||||
|
jsonStat := `
|
||||||
|
{
|
||||||
|
"read":"2017-01-11T08:32:46.2413794Z",
|
||||||
|
"preread":"0001-01-01T00:00:00Z",
|
||||||
|
"num_procs":64,
|
||||||
|
"cpu_stats":{
|
||||||
|
"cpu_usage":{
|
||||||
|
"total_usage":536718750,
|
||||||
|
"usage_in_kernelmode":390468750,
|
||||||
|
"usage_in_usermode":390468750
|
||||||
|
},
|
||||||
|
"throttling_data":{
|
||||||
|
"periods":0,
|
||||||
|
"throttled_periods":0,
|
||||||
|
"throttled_time":0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"precpu_stats":{
|
||||||
|
"cpu_usage":{
|
||||||
|
"total_usage":0,
|
||||||
|
"usage_in_kernelmode":0,
|
||||||
|
"usage_in_usermode":0
|
||||||
|
},
|
||||||
|
"throttling_data":{
|
||||||
|
"periods":0,
|
||||||
|
"throttled_periods":0,
|
||||||
|
"throttled_time":0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"memory_stats":{
|
||||||
|
"commitbytes":77160448,
|
||||||
|
"commitpeakbytes":105000960,
|
||||||
|
"privateworkingset":59961344
|
||||||
|
},
|
||||||
|
"name":"/gt_test_iis",
|
||||||
|
}`
|
||||||
|
stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat))
|
||||||
|
return stat
|
||||||
|
}
|
||||||
|
|
||||||
|
var containerInspect = types.ContainerJSON{
|
||||||
|
Config: &container.Config{
|
||||||
|
Env: []string{
|
||||||
|
"ENVVAR1=loremipsum",
|
||||||
|
"ENVVAR1FOO=loremipsum",
|
||||||
|
"ENVVAR2=dolorsitamet",
|
||||||
|
"ENVVAR3==ubuntu:10.04",
|
||||||
|
"ENVVAR4",
|
||||||
|
"ENVVAR5=",
|
||||||
|
"ENVVAR6= ",
|
||||||
|
"ENVVAR7=ENVVAR8=ENVVAR9",
|
||||||
|
"PATH=/bin:/sbin",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
|
@ -1,172 +0,0 @@
|
||||||
package docker
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io/ioutil"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/docker/api/types/container"
|
|
||||||
"github.com/docker/docker/api/types/registry"
|
|
||||||
)
|
|
||||||
|
|
||||||
type FakeDockerClient struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d FakeDockerClient) Info(ctx context.Context) (types.Info, error) {
|
|
||||||
env := types.Info{
|
|
||||||
Containers: 108,
|
|
||||||
ContainersRunning: 98,
|
|
||||||
ContainersStopped: 6,
|
|
||||||
ContainersPaused: 3,
|
|
||||||
OomKillDisable: false,
|
|
||||||
SystemTime: "2016-02-24T00:55:09.15073105-05:00",
|
|
||||||
NEventsListener: 0,
|
|
||||||
ID: "5WQQ:TFWR:FDNG:OKQ3:37Y4:FJWG:QIKK:623T:R3ME:QTKB:A7F7:OLHD",
|
|
||||||
Debug: false,
|
|
||||||
LoggingDriver: "json-file",
|
|
||||||
KernelVersion: "4.3.0-1-amd64",
|
|
||||||
IndexServerAddress: "https://index.docker.io/v1/",
|
|
||||||
MemTotal: 3840757760,
|
|
||||||
Images: 199,
|
|
||||||
CPUCfsQuota: true,
|
|
||||||
Name: "absol",
|
|
||||||
SwapLimit: false,
|
|
||||||
IPv4Forwarding: true,
|
|
||||||
ExperimentalBuild: false,
|
|
||||||
CPUCfsPeriod: true,
|
|
||||||
RegistryConfig: ®istry.ServiceConfig{
|
|
||||||
IndexConfigs: map[string]*registry.IndexInfo{
|
|
||||||
"docker.io": {
|
|
||||||
Name: "docker.io",
|
|
||||||
Mirrors: []string{},
|
|
||||||
Official: true,
|
|
||||||
Secure: true,
|
|
||||||
},
|
|
||||||
}, InsecureRegistryCIDRs: []*registry.NetIPNet{{IP: []byte{127, 0, 0, 0}, Mask: []byte{255, 0, 0, 0}}}, Mirrors: []string{}},
|
|
||||||
OperatingSystem: "Linux Mint LMDE (containerized)",
|
|
||||||
BridgeNfIptables: true,
|
|
||||||
HTTPSProxy: "",
|
|
||||||
Labels: []string{},
|
|
||||||
MemoryLimit: false,
|
|
||||||
DriverStatus: [][2]string{{"Pool Name", "docker-8:1-1182287-pool"}, {"Pool Blocksize", "65.54 kB"}, {"Backing Filesystem", "extfs"}, {"Data file", "/dev/loop0"}, {"Metadata file", "/dev/loop1"}, {"Data Space Used", "17.3 GB"}, {"Data Space Total", "107.4 GB"}, {"Data Space Available", "36.53 GB"}, {"Metadata Space Used", "20.97 MB"}, {"Metadata Space Total", "2.147 GB"}, {"Metadata Space Available", "2.127 GB"}, {"Udev Sync Supported", "true"}, {"Deferred Removal Enabled", "false"}, {"Data loop file", "/var/lib/docker/devicemapper/devicemapper/data"}, {"Metadata loop file", "/var/lib/docker/devicemapper/devicemapper/metadata"}, {"Library Version", "1.02.115 (2016-01-25)"}},
|
|
||||||
NFd: 19,
|
|
||||||
HTTPProxy: "",
|
|
||||||
Driver: "devicemapper",
|
|
||||||
NGoroutines: 39,
|
|
||||||
NCPU: 4,
|
|
||||||
DockerRootDir: "/var/lib/docker",
|
|
||||||
NoProxy: "",
|
|
||||||
BridgeNfIP6tables: true,
|
|
||||||
}
|
|
||||||
return env, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d FakeDockerClient) ContainerList(octx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
|
|
||||||
container1 := types.Container{
|
|
||||||
ID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb",
|
|
||||||
Names: []string{"/etcd"},
|
|
||||||
Image: "quay.io/coreos/etcd:v2.2.2",
|
|
||||||
Command: "/etcd -name etcd0 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
|
|
||||||
Created: 1455941930,
|
|
||||||
Status: "Up 4 hours",
|
|
||||||
Ports: []types.Port{
|
|
||||||
types.Port{
|
|
||||||
PrivatePort: 7001,
|
|
||||||
PublicPort: 0,
|
|
||||||
Type: "tcp",
|
|
||||||
},
|
|
||||||
types.Port{
|
|
||||||
PrivatePort: 4001,
|
|
||||||
PublicPort: 0,
|
|
||||||
Type: "tcp",
|
|
||||||
},
|
|
||||||
types.Port{
|
|
||||||
PrivatePort: 2380,
|
|
||||||
PublicPort: 0,
|
|
||||||
Type: "tcp",
|
|
||||||
},
|
|
||||||
types.Port{
|
|
||||||
PrivatePort: 2379,
|
|
||||||
PublicPort: 2379,
|
|
||||||
Type: "tcp",
|
|
||||||
IP: "0.0.0.0",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Labels: map[string]string{
|
|
||||||
"label1": "test_value_1",
|
|
||||||
"label2": "test_value_2",
|
|
||||||
},
|
|
||||||
SizeRw: 0,
|
|
||||||
SizeRootFs: 0,
|
|
||||||
}
|
|
||||||
container2 := types.Container{
|
|
||||||
ID: "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
|
||||||
Names: []string{"/etcd2"},
|
|
||||||
Image: "quay.io:4443/coreos/etcd:v2.2.2",
|
|
||||||
Command: "/etcd -name etcd2 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
|
|
||||||
Created: 1455941933,
|
|
||||||
Status: "Up 4 hours",
|
|
||||||
Ports: []types.Port{
|
|
||||||
types.Port{
|
|
||||||
PrivatePort: 7002,
|
|
||||||
PublicPort: 0,
|
|
||||||
Type: "tcp",
|
|
||||||
},
|
|
||||||
types.Port{
|
|
||||||
PrivatePort: 4002,
|
|
||||||
PublicPort: 0,
|
|
||||||
Type: "tcp",
|
|
||||||
},
|
|
||||||
types.Port{
|
|
||||||
PrivatePort: 2381,
|
|
||||||
PublicPort: 0,
|
|
||||||
Type: "tcp",
|
|
||||||
},
|
|
||||||
types.Port{
|
|
||||||
PrivatePort: 2382,
|
|
||||||
PublicPort: 2382,
|
|
||||||
Type: "tcp",
|
|
||||||
IP: "0.0.0.0",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Labels: map[string]string{
|
|
||||||
"label1": "test_value_1",
|
|
||||||
"label2": "test_value_2",
|
|
||||||
},
|
|
||||||
SizeRw: 0,
|
|
||||||
SizeRootFs: 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
containers := []types.Container{container1, container2}
|
|
||||||
return containers, nil
|
|
||||||
|
|
||||||
//#{e6a96c84ca91a5258b7cb752579fb68826b68b49ff957487695cd4d13c343b44 titilambert/snmpsim /bin/sh -c 'snmpsimd --agent-udpv4-endpoint=0.0.0.0:31161 --process-user=root --process-group=user' 1455724831 Up 4 hours [{31161 31161 udp 0.0.0.0}] 0 0 [/snmp] map[]}]2016/02/24 01:05:01 Gathered metrics, (3s interval), from 1 inputs in 1.233836656s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d FakeDockerClient) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) {
|
|
||||||
var stat types.ContainerStats
|
|
||||||
jsonStat := `{"read":"2016-02-24T11:42:27.472459608-05:00","memory_stats":{"stats":{},"limit":18935443456},"blkio_stats":{"io_service_bytes_recursive":[{"major":252,"minor":1,"op":"Read","value":753664},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":753664},{"major":252,"minor":1,"op":"Total","value":753664}],"io_serviced_recursive":[{"major":252,"minor":1,"op":"Read","value":26},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":26},{"major":252,"minor":1,"op":"Total","value":26}]},"cpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052607520000000,"throttling_data":{}},"precpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052599550000000,"throttling_data":{}}}`
|
|
||||||
stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat))
|
|
||||||
return stat, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d FakeDockerClient) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
|
|
||||||
json := types.ContainerJSON{
|
|
||||||
Config: &container.Config{
|
|
||||||
Env: []string{
|
|
||||||
"ENVVAR1=loremipsum",
|
|
||||||
"ENVVAR1FOO=loremipsum",
|
|
||||||
"ENVVAR2=dolorsitamet",
|
|
||||||
"ENVVAR3==ubuntu:10.04",
|
|
||||||
"ENVVAR4",
|
|
||||||
"ENVVAR5=",
|
|
||||||
"ENVVAR6= ",
|
|
||||||
"ENVVAR7=ENVVAR8=ENVVAR9",
|
|
||||||
"PATH=/bin:/sbin",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
return json, nil
|
|
||||||
}
|
|
|
@ -0,0 +1,55 @@
|
||||||
|
// Helper functions copied from
|
||||||
|
// https://github.com/docker/cli/blob/master/cli/command/container/stats_helpers.go
|
||||||
|
package docker
|
||||||
|
|
||||||
|
import "github.com/docker/docker/api/types"
|
||||||
|
|
||||||
|
func calculateCPUPercentUnix(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 {
|
||||||
|
var (
|
||||||
|
cpuPercent = 0.0
|
||||||
|
// calculate the change for the cpu usage of the container in between readings
|
||||||
|
cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU)
|
||||||
|
// calculate the change for the entire system between readings
|
||||||
|
systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem)
|
||||||
|
onlineCPUs = float64(v.CPUStats.OnlineCPUs)
|
||||||
|
)
|
||||||
|
|
||||||
|
if onlineCPUs == 0.0 {
|
||||||
|
onlineCPUs = float64(len(v.CPUStats.CPUUsage.PercpuUsage))
|
||||||
|
}
|
||||||
|
if systemDelta > 0.0 && cpuDelta > 0.0 {
|
||||||
|
cpuPercent = (cpuDelta / systemDelta) * onlineCPUs * 100.0
|
||||||
|
}
|
||||||
|
return cpuPercent
|
||||||
|
}
|
||||||
|
|
||||||
|
func calculateCPUPercentWindows(v *types.StatsJSON) float64 {
|
||||||
|
// Max number of 100ns intervals between the previous time read and now
|
||||||
|
possIntervals := uint64(v.Read.Sub(v.PreRead).Nanoseconds()) // Start with number of ns intervals
|
||||||
|
possIntervals /= 100 // Convert to number of 100ns intervals
|
||||||
|
possIntervals *= uint64(v.NumProcs) // Multiple by the number of processors
|
||||||
|
|
||||||
|
// Intervals used
|
||||||
|
intervalsUsed := v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage
|
||||||
|
|
||||||
|
// Percentage avoiding divide-by-zero
|
||||||
|
if possIntervals > 0 {
|
||||||
|
return float64(intervalsUsed) / float64(possIntervals) * 100.0
|
||||||
|
}
|
||||||
|
return 0.00
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculateMemUsageUnixNoCache calculate memory usage of the container.
|
||||||
|
// Page cache is intentionally excluded to avoid misinterpretation of the output.
|
||||||
|
func calculateMemUsageUnixNoCache(mem types.MemoryStats) float64 {
|
||||||
|
return float64(mem.Usage - mem.Stats["cache"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func calculateMemPercentUnixNoCache(limit float64, usedNoCache float64) float64 {
|
||||||
|
// MemoryStats.Limit will never be 0 unless the container is not running and we haven't
|
||||||
|
// got any data from cgroup
|
||||||
|
if limit != 0 {
|
||||||
|
return usedNoCache / limit * 100.0
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
Loading…
Reference in New Issue