2017-02-17 19:36:44 +00:00
|
|
|
package docker
|
2016-01-20 23:21:19 +00:00
|
|
|
|
|
|
|
import (
|
2017-02-17 19:36:44 +00:00
|
|
|
"context"
|
2017-07-28 00:18:44 +00:00
|
|
|
"crypto/tls"
|
2016-02-24 04:58:14 +00:00
|
|
|
"encoding/json"
|
2016-01-20 23:21:19 +00:00
|
|
|
"fmt"
|
2016-04-02 22:34:34 +00:00
|
|
|
"io"
|
2017-07-28 00:18:44 +00:00
|
|
|
"net/http"
|
2016-02-24 04:58:14 +00:00
|
|
|
"regexp"
|
|
|
|
"strconv"
|
2016-01-20 23:21:19 +00:00
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2017-02-17 19:36:44 +00:00
|
|
|
"github.com/docker/docker/api/types"
|
2018-03-30 20:17:48 +00:00
|
|
|
"github.com/docker/docker/api/types/filters"
|
2017-10-03 21:36:26 +00:00
|
|
|
"github.com/docker/docker/api/types/swarm"
|
2016-01-27 21:21:36 +00:00
|
|
|
"github.com/influxdata/telegraf"
|
2017-04-03 20:43:15 +00:00
|
|
|
"github.com/influxdata/telegraf/filter"
|
2016-05-01 16:20:15 +00:00
|
|
|
"github.com/influxdata/telegraf/internal"
|
2019-06-20 18:54:12 +00:00
|
|
|
"github.com/influxdata/telegraf/internal/docker"
|
2018-05-04 23:33:23 +00:00
|
|
|
tlsint "github.com/influxdata/telegraf/internal/tls"
|
2016-01-20 23:21:19 +00:00
|
|
|
"github.com/influxdata/telegraf/plugins/inputs"
|
|
|
|
)
|
|
|
|
|
2016-04-02 22:34:34 +00:00
|
|
|
// Docker object
|
2016-01-20 23:21:19 +00:00
|
|
|
type Docker struct {
|
|
|
|
Endpoint string
|
2018-03-30 20:17:48 +00:00
|
|
|
ContainerNames []string // deprecated in 1.4; use container_name_include
|
2017-06-08 20:17:31 +00:00
|
|
|
|
2017-10-03 21:36:26 +00:00
|
|
|
GatherServices bool `toml:"gather_services"`
|
|
|
|
|
2016-05-01 16:20:15 +00:00
|
|
|
Timeout internal.Duration
|
2017-04-03 20:43:15 +00:00
|
|
|
PerDevice bool `toml:"perdevice"`
|
|
|
|
Total bool `toml:"total"`
|
2017-05-18 23:58:34 +00:00
|
|
|
TagEnvironment []string `toml:"tag_env"`
|
2017-04-03 20:43:15 +00:00
|
|
|
LabelInclude []string `toml:"docker_label_include"`
|
|
|
|
LabelExclude []string `toml:"docker_label_exclude"`
|
|
|
|
|
2017-06-08 20:17:31 +00:00
|
|
|
ContainerInclude []string `toml:"container_name_include"`
|
|
|
|
ContainerExclude []string `toml:"container_name_exclude"`
|
2016-01-20 23:21:19 +00:00
|
|
|
|
2018-03-30 20:17:48 +00:00
|
|
|
ContainerStateInclude []string `toml:"container_state_include"`
|
|
|
|
ContainerStateExclude []string `toml:"container_state_exclude"`
|
|
|
|
|
2019-10-08 00:27:32 +00:00
|
|
|
IncludeSourceTag bool `toml:"source_tag"`
|
|
|
|
|
2019-09-23 22:39:50 +00:00
|
|
|
Log telegraf.Logger
|
|
|
|
|
2018-05-04 23:33:23 +00:00
|
|
|
tlsint.ClientConfig
|
2017-07-28 00:18:44 +00:00
|
|
|
|
2017-07-27 22:12:29 +00:00
|
|
|
newEnvClient func() (Client, error)
|
2017-07-28 00:18:44 +00:00
|
|
|
newClient func(string, *tls.Config) (Client, error)
|
2017-02-17 19:36:44 +00:00
|
|
|
|
2017-10-12 22:50:09 +00:00
|
|
|
client Client
|
|
|
|
httpClient *http.Client
|
2019-06-20 01:02:51 +00:00
|
|
|
engineHost string
|
2018-04-23 22:09:04 +00:00
|
|
|
serverVersion string
|
2017-10-12 22:50:09 +00:00
|
|
|
filtersCreated bool
|
|
|
|
labelFilter filter.Filter
|
|
|
|
containerFilter filter.Filter
|
2018-03-30 20:17:48 +00:00
|
|
|
stateFilter filter.Filter
|
2017-02-17 19:36:44 +00:00
|
|
|
}
|
|
|
|
|
2016-04-02 22:34:34 +00:00
|
|
|
// KB, MB, GB, TB, PB...human friendly
|
2016-02-24 04:58:14 +00:00
|
|
|
const (
|
|
|
|
KB = 1000
|
|
|
|
MB = 1000 * KB
|
|
|
|
GB = 1000 * MB
|
|
|
|
TB = 1000 * GB
|
|
|
|
PB = 1000 * TB
|
2017-07-28 00:18:44 +00:00
|
|
|
|
|
|
|
defaultEndpoint = "unix:///var/run/docker.sock"
|
2016-02-24 04:58:14 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2018-03-30 20:17:48 +00:00
|
|
|
sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`)
|
|
|
|
containerStates = []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"}
|
2019-06-19 22:37:10 +00:00
|
|
|
now = time.Now
|
2016-02-24 04:58:14 +00:00
|
|
|
)
|
|
|
|
|
2016-01-20 23:21:19 +00:00
|
|
|
var sampleConfig = `
|
2016-02-18 21:26:51 +00:00
|
|
|
## Docker Endpoint
|
|
|
|
## To use TCP, set endpoint = "tcp://[ip]:[port]"
|
|
|
|
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
2016-01-20 23:21:19 +00:00
|
|
|
endpoint = "unix:///var/run/docker.sock"
|
2017-06-08 20:17:31 +00:00
|
|
|
|
2017-10-03 21:36:26 +00:00
|
|
|
## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
|
|
|
|
gather_services = false
|
|
|
|
|
2016-02-18 21:26:51 +00:00
|
|
|
## Only collect metrics for these containers, collect all if empty
|
2016-01-20 23:21:19 +00:00
|
|
|
container_names = []
|
2017-06-08 20:17:31 +00:00
|
|
|
|
2019-10-08 00:27:32 +00:00
|
|
|
## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
|
|
|
|
source_tag = false
|
|
|
|
|
2017-06-08 20:17:31 +00:00
|
|
|
## Containers to include and exclude. Globs accepted.
|
|
|
|
## Note that an empty array for both will include all containers
|
|
|
|
container_name_include = []
|
|
|
|
container_name_exclude = []
|
|
|
|
|
2018-03-30 20:17:48 +00:00
|
|
|
## Container states to include and exclude. Globs accepted.
|
|
|
|
## When empty only containers in the "running" state will be captured.
|
2019-07-16 00:10:42 +00:00
|
|
|
## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
|
|
|
|
## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
|
2018-03-30 20:17:48 +00:00
|
|
|
# container_state_include = []
|
|
|
|
# container_state_exclude = []
|
|
|
|
|
2016-05-01 16:20:15 +00:00
|
|
|
## Timeout for docker list, info, and stats commands
|
|
|
|
timeout = "5s"
|
2016-07-21 15:50:12 +00:00
|
|
|
|
|
|
|
## Whether to report for each container per-device blkio (8:0, 8:1...) and
|
|
|
|
## network (eth0, eth1, ...) stats or not
|
|
|
|
perdevice = true
|
2019-09-23 22:39:50 +00:00
|
|
|
|
2016-07-21 15:50:12 +00:00
|
|
|
## Whether to report for each container total blkio and network stats or not
|
|
|
|
total = false
|
2019-09-23 22:39:50 +00:00
|
|
|
|
2017-05-18 23:58:34 +00:00
|
|
|
## Which environment variables should we use as a tag
|
|
|
|
##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
|
2016-07-21 15:50:12 +00:00
|
|
|
|
2017-04-03 20:43:15 +00:00
|
|
|
## docker labels to include and exclude as tags. Globs accepted.
|
|
|
|
## Note that an empty array for both will include all labels as tags
|
|
|
|
docker_label_include = []
|
|
|
|
docker_label_exclude = []
|
2017-07-28 00:18:44 +00:00
|
|
|
|
2018-05-04 23:33:23 +00:00
|
|
|
## Optional TLS Config
|
|
|
|
# tls_ca = "/etc/telegraf/ca.pem"
|
|
|
|
# tls_cert = "/etc/telegraf/cert.pem"
|
|
|
|
# tls_key = "/etc/telegraf/key.pem"
|
|
|
|
## Use TLS but skip chain & host verification
|
2017-07-28 00:18:44 +00:00
|
|
|
# insecure_skip_verify = false
|
2016-01-20 23:21:19 +00:00
|
|
|
`
|
|
|
|
|
2019-06-20 01:02:51 +00:00
|
|
|
// SampleConfig returns the default Docker TOML configuration.
|
|
|
|
func (d *Docker) SampleConfig() string { return sampleConfig }
|
|
|
|
|
|
|
|
// Description the metrics returned.
|
2016-01-20 23:21:19 +00:00
|
|
|
func (d *Docker) Description() string {
|
|
|
|
return "Read metrics about docker containers"
|
|
|
|
}
|
|
|
|
|
2019-06-20 01:02:51 +00:00
|
|
|
// Gather metrics from the docker server.
|
2016-01-27 21:21:36 +00:00
|
|
|
func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
2017-07-27 22:12:29 +00:00
|
|
|
if d.client == nil {
|
2019-01-03 23:57:39 +00:00
|
|
|
c, err := d.getNewClient()
|
2017-07-28 00:18:44 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2016-01-20 23:21:19 +00:00
|
|
|
}
|
|
|
|
d.client = c
|
|
|
|
}
|
2017-06-08 20:17:31 +00:00
|
|
|
|
2017-04-03 20:43:15 +00:00
|
|
|
// Create label filters if not already created
|
2017-06-08 20:17:31 +00:00
|
|
|
if !d.filtersCreated {
|
2017-04-03 20:43:15 +00:00
|
|
|
err := d.createLabelFilters()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-06-08 20:17:31 +00:00
|
|
|
err = d.createContainerFilters()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-03-30 20:17:48 +00:00
|
|
|
err = d.createContainerStateFilters()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-06-08 20:17:31 +00:00
|
|
|
d.filtersCreated = true
|
2017-04-03 20:43:15 +00:00
|
|
|
}
|
2016-01-20 23:21:19 +00:00
|
|
|
|
2016-02-24 04:58:14 +00:00
|
|
|
// Get daemon info
|
|
|
|
err := d.gatherInfo(acc)
|
|
|
|
if err != nil {
|
2017-04-24 18:13:26 +00:00
|
|
|
acc.AddError(err)
|
2016-02-24 04:58:14 +00:00
|
|
|
}
|
|
|
|
|
2017-10-03 21:36:26 +00:00
|
|
|
if d.GatherServices {
|
|
|
|
err := d.gatherSwarmInfo(acc)
|
|
|
|
if err != nil {
|
|
|
|
acc.AddError(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-30 20:17:48 +00:00
|
|
|
filterArgs := filters.NewArgs()
|
|
|
|
for _, state := range containerStates {
|
|
|
|
if d.stateFilter.Match(state) {
|
|
|
|
filterArgs.Add("status", state)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// All container states were excluded
|
|
|
|
if filterArgs.Len() == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-02-24 04:58:14 +00:00
|
|
|
// List containers
|
2018-03-30 20:17:48 +00:00
|
|
|
opts := types.ContainerListOptions{
|
|
|
|
Filters: filterArgs,
|
|
|
|
}
|
2016-05-01 16:20:15 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
|
|
|
defer cancel()
|
2019-06-20 01:02:51 +00:00
|
|
|
|
2017-07-27 22:12:29 +00:00
|
|
|
containers, err := d.client.ContainerList(ctx, opts)
|
2019-06-20 01:02:51 +00:00
|
|
|
if err == context.DeadlineExceeded {
|
|
|
|
return errListTimeout
|
|
|
|
}
|
2016-01-20 23:21:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-02-24 04:58:14 +00:00
|
|
|
// Get container data
|
2016-01-20 23:21:19 +00:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(len(containers))
|
|
|
|
for _, container := range containers {
|
2016-04-02 22:34:34 +00:00
|
|
|
go func(c types.Container) {
|
2016-01-20 23:21:19 +00:00
|
|
|
defer wg.Done()
|
2019-06-20 01:02:51 +00:00
|
|
|
if err := d.gatherContainer(c, acc); err != nil {
|
|
|
|
acc.AddError(err)
|
2016-01-20 23:21:19 +00:00
|
|
|
}
|
|
|
|
}(container)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-10-03 21:36:26 +00:00
|
|
|
func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error {
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
|
|
|
defer cancel()
|
2019-06-20 01:02:51 +00:00
|
|
|
|
2017-10-03 21:36:26 +00:00
|
|
|
services, err := d.client.ServiceList(ctx, types.ServiceListOptions{})
|
2019-06-20 01:02:51 +00:00
|
|
|
if err == context.DeadlineExceeded {
|
|
|
|
return errServiceTimeout
|
|
|
|
}
|
2017-10-03 21:36:26 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(services) > 0 {
|
|
|
|
tasks, err := d.client.TaskList(ctx, types.TaskListOptions{})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
nodes, err := d.client.NodeList(ctx, types.NodeListOptions{})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
running := map[string]int{}
|
|
|
|
tasksNoShutdown := map[string]int{}
|
|
|
|
|
|
|
|
activeNodes := make(map[string]struct{})
|
|
|
|
for _, n := range nodes {
|
|
|
|
if n.Status.State != swarm.NodeStateDown {
|
|
|
|
activeNodes[n.ID] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, task := range tasks {
|
|
|
|
if task.DesiredState != swarm.TaskStateShutdown {
|
|
|
|
tasksNoShutdown[task.ServiceID]++
|
|
|
|
}
|
|
|
|
|
|
|
|
if task.Status.State == swarm.TaskStateRunning {
|
|
|
|
running[task.ServiceID]++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, service := range services {
|
|
|
|
tags := map[string]string{}
|
|
|
|
fields := make(map[string]interface{})
|
|
|
|
now := time.Now()
|
|
|
|
tags["service_id"] = service.ID
|
|
|
|
tags["service_name"] = service.Spec.Name
|
|
|
|
if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil {
|
|
|
|
tags["service_mode"] = "replicated"
|
|
|
|
fields["tasks_running"] = running[service.ID]
|
|
|
|
fields["tasks_desired"] = *service.Spec.Mode.Replicated.Replicas
|
|
|
|
} else if service.Spec.Mode.Global != nil {
|
|
|
|
tags["service_mode"] = "global"
|
|
|
|
fields["tasks_running"] = running[service.ID]
|
|
|
|
fields["tasks_desired"] = tasksNoShutdown[service.ID]
|
|
|
|
} else {
|
2019-09-23 22:39:50 +00:00
|
|
|
d.Log.Error("Unknown replica mode")
|
2017-10-03 21:36:26 +00:00
|
|
|
}
|
|
|
|
// Add metrics
|
|
|
|
acc.AddFields("docker_swarm",
|
|
|
|
fields,
|
|
|
|
tags,
|
|
|
|
now)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-02-24 04:58:14 +00:00
|
|
|
func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
|
|
|
// Init vars
|
|
|
|
dataFields := make(map[string]interface{})
|
|
|
|
metadataFields := make(map[string]interface{})
|
|
|
|
now := time.Now()
|
2019-06-20 01:02:51 +00:00
|
|
|
|
2016-02-24 04:58:14 +00:00
|
|
|
// Get info from docker daemon
|
2016-05-01 16:20:15 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
|
|
|
defer cancel()
|
2019-06-20 01:02:51 +00:00
|
|
|
|
2017-07-27 22:12:29 +00:00
|
|
|
info, err := d.client.Info(ctx)
|
2019-06-20 01:02:51 +00:00
|
|
|
if err == context.DeadlineExceeded {
|
|
|
|
return errInfoTimeout
|
|
|
|
}
|
2016-02-24 04:58:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-04-23 22:09:04 +00:00
|
|
|
|
2019-06-20 01:02:51 +00:00
|
|
|
d.engineHost = info.Name
|
2018-04-23 22:09:04 +00:00
|
|
|
d.serverVersion = info.ServerVersion
|
|
|
|
|
|
|
|
tags := map[string]string{
|
2019-06-20 01:02:51 +00:00
|
|
|
"engine_host": d.engineHost,
|
2018-04-23 22:09:04 +00:00
|
|
|
"server_version": d.serverVersion,
|
|
|
|
}
|
2016-02-24 04:58:14 +00:00
|
|
|
|
|
|
|
fields := map[string]interface{}{
|
2016-04-02 22:34:34 +00:00
|
|
|
"n_cpus": info.NCPU,
|
|
|
|
"n_used_file_descriptors": info.NFd,
|
|
|
|
"n_containers": info.Containers,
|
2016-09-21 09:37:49 +00:00
|
|
|
"n_containers_running": info.ContainersRunning,
|
|
|
|
"n_containers_stopped": info.ContainersStopped,
|
|
|
|
"n_containers_paused": info.ContainersPaused,
|
2016-04-02 22:34:34 +00:00
|
|
|
"n_images": info.Images,
|
|
|
|
"n_goroutines": info.NGoroutines,
|
|
|
|
"n_listener_events": info.NEventsListener,
|
2016-02-24 04:58:14 +00:00
|
|
|
}
|
2019-07-15 09:24:47 +00:00
|
|
|
|
2016-02-24 04:58:14 +00:00
|
|
|
// Add metrics
|
2018-04-23 22:09:04 +00:00
|
|
|
acc.AddFields("docker", fields, tags, now)
|
2016-02-24 04:58:14 +00:00
|
|
|
acc.AddFields("docker",
|
2016-04-02 22:34:34 +00:00
|
|
|
map[string]interface{}{"memory_total": info.MemTotal},
|
2018-04-23 22:09:04 +00:00
|
|
|
tags,
|
2016-02-24 04:58:14 +00:00
|
|
|
now)
|
2019-07-15 09:24:47 +00:00
|
|
|
|
2016-02-24 04:58:14 +00:00
|
|
|
// Get storage metrics
|
2018-04-23 22:09:04 +00:00
|
|
|
tags["unit"] = "bytes"
|
2019-07-15 09:24:47 +00:00
|
|
|
|
|
|
|
var (
|
|
|
|
// "docker_devicemapper" measurement fields
|
|
|
|
poolName string
|
|
|
|
deviceMapperFields = map[string]interface{}{}
|
|
|
|
)
|
|
|
|
|
2016-04-02 22:34:34 +00:00
|
|
|
for _, rawData := range info.DriverStatus {
|
2019-07-15 09:24:47 +00:00
|
|
|
name := strings.ToLower(strings.Replace(rawData[0], " ", "_", -1))
|
|
|
|
if name == "pool_name" {
|
|
|
|
poolName = rawData[1]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2016-02-24 04:58:14 +00:00
|
|
|
// Try to convert string to int (bytes)
|
|
|
|
value, err := parseSize(rawData[1])
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
2019-07-15 09:24:47 +00:00
|
|
|
|
|
|
|
switch name {
|
|
|
|
case "pool_blocksize",
|
|
|
|
"base_device_size",
|
|
|
|
"data_space_used",
|
|
|
|
"data_space_total",
|
|
|
|
"data_space_available",
|
|
|
|
"metadata_space_used",
|
|
|
|
"metadata_space_total",
|
|
|
|
"metadata_space_available",
|
|
|
|
"thin_pool_minimum_free_space":
|
|
|
|
deviceMapperFields[name+"_bytes"] = value
|
|
|
|
}
|
|
|
|
|
|
|
|
// Legacy devicemapper measurements
|
2016-02-24 04:58:14 +00:00
|
|
|
if name == "pool_blocksize" {
|
|
|
|
// pool blocksize
|
|
|
|
acc.AddFields("docker",
|
|
|
|
map[string]interface{}{"pool_blocksize": value},
|
2018-04-23 22:09:04 +00:00
|
|
|
tags,
|
2016-02-24 04:58:14 +00:00
|
|
|
now)
|
|
|
|
} else if strings.HasPrefix(name, "data_space_") {
|
|
|
|
// data space
|
2016-04-02 22:34:34 +00:00
|
|
|
fieldName := strings.TrimPrefix(name, "data_space_")
|
|
|
|
dataFields[fieldName] = value
|
2016-02-24 04:58:14 +00:00
|
|
|
} else if strings.HasPrefix(name, "metadata_space_") {
|
|
|
|
// metadata space
|
2016-04-02 22:34:34 +00:00
|
|
|
fieldName := strings.TrimPrefix(name, "metadata_space_")
|
|
|
|
metadataFields[fieldName] = value
|
2016-02-24 04:58:14 +00:00
|
|
|
}
|
|
|
|
}
|
2019-07-15 09:24:47 +00:00
|
|
|
|
2016-02-24 04:58:14 +00:00
|
|
|
if len(dataFields) > 0 {
|
2018-04-23 22:09:04 +00:00
|
|
|
acc.AddFields("docker_data", dataFields, tags, now)
|
2016-02-24 04:58:14 +00:00
|
|
|
}
|
2019-07-15 09:24:47 +00:00
|
|
|
|
2016-02-24 04:58:14 +00:00
|
|
|
if len(metadataFields) > 0 {
|
2018-04-23 22:09:04 +00:00
|
|
|
acc.AddFields("docker_metadata", metadataFields, tags, now)
|
2016-02-24 04:58:14 +00:00
|
|
|
}
|
2019-07-15 09:24:47 +00:00
|
|
|
|
|
|
|
if len(deviceMapperFields) > 0 {
|
|
|
|
tags := map[string]string{
|
|
|
|
"engine_host": d.engineHost,
|
|
|
|
"server_version": d.serverVersion,
|
|
|
|
}
|
|
|
|
|
|
|
|
if poolName != "" {
|
|
|
|
tags["pool_name"] = poolName
|
|
|
|
}
|
|
|
|
|
|
|
|
acc.AddFields("docker_devicemapper", deviceMapperFields, tags, now)
|
|
|
|
}
|
|
|
|
|
2016-02-24 04:58:14 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-10-08 00:27:32 +00:00
|
|
|
func hostnameFromID(id string) string {
|
|
|
|
if len(id) > 12 {
|
|
|
|
return id[0:12]
|
|
|
|
}
|
|
|
|
return id
|
|
|
|
}
|
|
|
|
|
2016-01-20 23:21:19 +00:00
|
|
|
func (d *Docker) gatherContainer(
|
2016-04-02 22:34:34 +00:00
|
|
|
container types.Container,
|
2016-01-27 21:21:36 +00:00
|
|
|
acc telegraf.Accumulator,
|
2016-01-20 23:21:19 +00:00
|
|
|
) error {
|
2016-04-02 22:34:34 +00:00
|
|
|
var v *types.StatsJSON
|
2019-06-20 18:54:12 +00:00
|
|
|
|
2016-01-20 23:21:19 +00:00
|
|
|
// Parse container name
|
2018-07-13 01:41:49 +00:00
|
|
|
var cname string
|
|
|
|
for _, name := range container.Names {
|
|
|
|
trimmedName := strings.TrimPrefix(name, "/")
|
|
|
|
match := d.containerFilter.Match(trimmedName)
|
|
|
|
if match {
|
|
|
|
cname = trimmedName
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if cname == "" {
|
|
|
|
return nil
|
2016-01-20 23:21:19 +00:00
|
|
|
}
|
|
|
|
|
2019-06-20 18:54:12 +00:00
|
|
|
imageName, imageVersion := docker.ParseImage(container.Image)
|
2016-12-16 13:53:16 +00:00
|
|
|
|
2016-01-20 23:21:19 +00:00
|
|
|
tags := map[string]string{
|
2019-06-20 01:02:51 +00:00
|
|
|
"engine_host": d.engineHost,
|
2018-04-23 22:09:04 +00:00
|
|
|
"server_version": d.serverVersion,
|
2016-07-19 16:31:01 +00:00
|
|
|
"container_name": cname,
|
|
|
|
"container_image": imageName,
|
|
|
|
"container_version": imageVersion,
|
2016-01-20 23:21:19 +00:00
|
|
|
}
|
2017-06-08 20:17:31 +00:00
|
|
|
|
2019-10-08 00:27:32 +00:00
|
|
|
if d.IncludeSourceTag {
|
|
|
|
tags["source"] = hostnameFromID(container.ID)
|
|
|
|
}
|
|
|
|
|
2016-05-01 16:20:15 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
|
|
|
defer cancel()
|
2019-06-20 01:02:51 +00:00
|
|
|
|
2017-07-27 22:12:29 +00:00
|
|
|
r, err := d.client.ContainerStats(ctx, container.ID, false)
|
2019-06-20 01:02:51 +00:00
|
|
|
if err == context.DeadlineExceeded {
|
|
|
|
return errStatsTimeout
|
|
|
|
}
|
2016-04-02 22:34:34 +00:00
|
|
|
if err != nil {
|
2019-06-20 01:02:51 +00:00
|
|
|
return fmt.Errorf("error getting docker stats: %v", err)
|
2016-01-20 23:21:19 +00:00
|
|
|
}
|
2019-06-20 01:02:51 +00:00
|
|
|
|
2017-02-17 19:36:44 +00:00
|
|
|
defer r.Body.Close()
|
|
|
|
dec := json.NewDecoder(r.Body)
|
2016-04-02 22:34:34 +00:00
|
|
|
if err = dec.Decode(&v); err != nil {
|
2016-04-12 20:59:19 +00:00
|
|
|
if err == io.EOF {
|
|
|
|
return nil
|
|
|
|
}
|
2019-06-20 01:02:51 +00:00
|
|
|
return fmt.Errorf("error decoding: %v", err)
|
2016-01-29 17:11:28 +00:00
|
|
|
}
|
2017-07-27 22:12:29 +00:00
|
|
|
daemonOSType := r.OSType
|
2016-01-29 17:11:28 +00:00
|
|
|
|
2018-07-13 01:41:49 +00:00
|
|
|
// use common (printed at `docker ps`) name for container
|
2018-10-12 21:36:55 +00:00
|
|
|
if v.Name != "" {
|
|
|
|
tags["container_name"] = strings.TrimPrefix(v.Name, "/")
|
|
|
|
}
|
2018-07-13 01:41:49 +00:00
|
|
|
|
2016-01-20 23:21:19 +00:00
|
|
|
// Add labels to tags
|
2016-04-12 20:59:19 +00:00
|
|
|
for k, label := range container.Labels {
|
2017-10-12 22:50:09 +00:00
|
|
|
if d.labelFilter.Match(k) {
|
|
|
|
tags[k] = label
|
2017-04-03 20:43:15 +00:00
|
|
|
}
|
2016-01-20 23:21:19 +00:00
|
|
|
}
|
|
|
|
|
2019-06-20 01:02:51 +00:00
|
|
|
return d.gatherContainerInspect(container, acc, tags, daemonOSType, v)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Docker) gatherContainerInspect(
|
|
|
|
container types.Container,
|
|
|
|
acc telegraf.Accumulator,
|
|
|
|
tags map[string]string,
|
|
|
|
daemonOSType string,
|
|
|
|
v *types.StatsJSON,
|
|
|
|
) error {
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
|
|
|
defer cancel()
|
|
|
|
|
2018-01-13 01:43:51 +00:00
|
|
|
info, err := d.client.ContainerInspect(ctx, container.ID)
|
2019-06-20 01:02:51 +00:00
|
|
|
if err == context.DeadlineExceeded {
|
|
|
|
return errInspectTimeout
|
|
|
|
}
|
2018-01-13 01:43:51 +00:00
|
|
|
if err != nil {
|
2019-06-20 01:02:51 +00:00
|
|
|
return fmt.Errorf("error inspecting docker container: %v", err)
|
2018-01-13 01:43:51 +00:00
|
|
|
}
|
|
|
|
|
2017-05-18 23:58:34 +00:00
|
|
|
// Add whitelisted environment variables to tags
|
|
|
|
if len(d.TagEnvironment) > 0 {
|
|
|
|
for _, envvar := range info.Config.Env {
|
|
|
|
for _, configvar := range d.TagEnvironment {
|
2019-06-20 01:02:51 +00:00
|
|
|
dockEnv := strings.SplitN(envvar, "=", 2)
|
2017-05-18 23:58:34 +00:00
|
|
|
//check for presence of tag in whitelist
|
2019-06-20 01:02:51 +00:00
|
|
|
if len(dockEnv) == 2 && len(strings.TrimSpace(dockEnv[1])) != 0 && configvar == dockEnv[0] {
|
|
|
|
tags[dockEnv[0]] = dockEnv[1]
|
2017-05-18 23:58:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-10-12 21:36:55 +00:00
|
|
|
|
2018-06-18 22:33:14 +00:00
|
|
|
if info.State != nil {
|
|
|
|
tags["container_status"] = info.State.Status
|
|
|
|
statefields := map[string]interface{}{
|
2019-06-21 19:20:35 +00:00
|
|
|
"oomkilled": info.State.OOMKilled,
|
|
|
|
"pid": info.State.Pid,
|
|
|
|
"exitcode": info.State.ExitCode,
|
|
|
|
"container_id": container.ID,
|
2018-06-18 22:33:14 +00:00
|
|
|
}
|
2019-06-19 22:37:10 +00:00
|
|
|
|
|
|
|
finished, err := time.Parse(time.RFC3339, info.State.FinishedAt)
|
|
|
|
if err == nil && !finished.IsZero() {
|
|
|
|
statefields["finished_at"] = finished.UnixNano()
|
|
|
|
} else {
|
|
|
|
// set finished to now for use in uptime
|
|
|
|
finished = now()
|
2018-06-18 22:33:14 +00:00
|
|
|
}
|
2019-06-19 22:37:10 +00:00
|
|
|
|
|
|
|
started, err := time.Parse(time.RFC3339, info.State.StartedAt)
|
|
|
|
if err == nil && !started.IsZero() {
|
|
|
|
statefields["started_at"] = started.UnixNano()
|
2019-11-12 19:58:13 +00:00
|
|
|
|
|
|
|
uptime := finished.Sub(started)
|
|
|
|
if finished.Before(started) {
|
|
|
|
uptime = now().Sub(started)
|
|
|
|
}
|
|
|
|
statefields["uptime_ns"] = uptime.Nanoseconds()
|
2018-06-18 22:33:14 +00:00
|
|
|
}
|
2019-06-19 22:37:10 +00:00
|
|
|
|
2019-11-12 19:58:13 +00:00
|
|
|
acc.AddFields("docker_container_status", statefields, tags, now())
|
2017-05-18 23:58:34 +00:00
|
|
|
|
2018-10-12 21:36:55 +00:00
|
|
|
if info.State.Health != nil {
|
|
|
|
healthfields := map[string]interface{}{
|
|
|
|
"health_status": info.State.Health.Status,
|
|
|
|
"failing_streak": info.ContainerJSONBase.State.Health.FailingStreak,
|
|
|
|
}
|
2019-11-12 19:58:13 +00:00
|
|
|
acc.AddFields("docker_container_health", healthfields, tags, now())
|
2018-01-13 01:43:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-13 01:41:49 +00:00
|
|
|
parseContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total, daemonOSType)
|
2016-01-20 23:21:19 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-07-13 01:41:49 +00:00
|
|
|
func parseContainerStats(
|
2016-04-02 22:34:34 +00:00
|
|
|
stat *types.StatsJSON,
|
2016-01-27 21:21:36 +00:00
|
|
|
acc telegraf.Accumulator,
|
2016-01-20 23:21:19 +00:00
|
|
|
tags map[string]string,
|
2016-04-12 20:59:19 +00:00
|
|
|
id string,
|
2016-07-21 15:50:12 +00:00
|
|
|
perDevice bool,
|
|
|
|
total bool,
|
2017-07-27 22:12:29 +00:00
|
|
|
daemonOSType string,
|
2016-01-20 23:21:19 +00:00
|
|
|
) {
|
2017-11-07 19:41:53 +00:00
|
|
|
tm := stat.Read
|
|
|
|
|
|
|
|
if tm.Before(time.Unix(0, 0)) {
|
|
|
|
tm = time.Now()
|
|
|
|
}
|
2016-01-20 23:21:19 +00:00
|
|
|
|
|
|
|
memfields := map[string]interface{}{
|
2017-07-27 22:12:29 +00:00
|
|
|
"container_id": id,
|
|
|
|
}
|
|
|
|
|
|
|
|
memstats := []string{
|
|
|
|
"active_anon",
|
|
|
|
"active_file",
|
|
|
|
"cache",
|
|
|
|
"hierarchical_memory_limit",
|
|
|
|
"inactive_anon",
|
|
|
|
"inactive_file",
|
|
|
|
"mapped_file",
|
|
|
|
"pgfault",
|
|
|
|
"pgmajfault",
|
|
|
|
"pgpgin",
|
|
|
|
"pgpgout",
|
|
|
|
"rss",
|
|
|
|
"rss_huge",
|
|
|
|
"total_active_anon",
|
|
|
|
"total_active_file",
|
|
|
|
"total_cache",
|
|
|
|
"total_inactive_anon",
|
|
|
|
"total_inactive_file",
|
|
|
|
"total_mapped_file",
|
|
|
|
"total_pgfault",
|
|
|
|
"total_pgmajfault",
|
|
|
|
"total_pgpgin",
|
|
|
|
"total_pgpgout",
|
|
|
|
"total_rss",
|
|
|
|
"total_rss_huge",
|
|
|
|
"total_unevictable",
|
|
|
|
"total_writeback",
|
|
|
|
"unevictable",
|
|
|
|
"writeback",
|
|
|
|
}
|
|
|
|
for _, field := range memstats {
|
|
|
|
if value, ok := stat.MemoryStats.Stats[field]; ok {
|
|
|
|
memfields[field] = value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if stat.MemoryStats.Failcnt != 0 {
|
|
|
|
memfields["fail_count"] = stat.MemoryStats.Failcnt
|
|
|
|
}
|
|
|
|
|
|
|
|
if daemonOSType != "windows" {
|
|
|
|
memfields["limit"] = stat.MemoryStats.Limit
|
|
|
|
memfields["max_usage"] = stat.MemoryStats.MaxUsage
|
|
|
|
|
2019-05-27 03:01:02 +00:00
|
|
|
mem := CalculateMemUsageUnixNoCache(stat.MemoryStats)
|
2017-07-27 22:12:29 +00:00
|
|
|
memLimit := float64(stat.MemoryStats.Limit)
|
2018-07-17 23:02:03 +00:00
|
|
|
memfields["usage"] = uint64(mem)
|
2019-05-27 03:01:02 +00:00
|
|
|
memfields["usage_percent"] = CalculateMemPercentUnixNoCache(memLimit, mem)
|
2017-07-27 22:12:29 +00:00
|
|
|
} else {
|
|
|
|
memfields["commit_bytes"] = stat.MemoryStats.Commit
|
|
|
|
memfields["commit_peak_bytes"] = stat.MemoryStats.CommitPeak
|
|
|
|
memfields["private_working_set"] = stat.MemoryStats.PrivateWorkingSet
|
2016-01-20 23:21:19 +00:00
|
|
|
}
|
2017-07-27 22:12:29 +00:00
|
|
|
|
2017-11-07 19:41:53 +00:00
|
|
|
acc.AddFields("docker_container_mem", memfields, tags, tm)
|
2016-01-20 23:21:19 +00:00
|
|
|
|
|
|
|
cpufields := map[string]interface{}{
|
|
|
|
"usage_total": stat.CPUStats.CPUUsage.TotalUsage,
|
|
|
|
"usage_in_usermode": stat.CPUStats.CPUUsage.UsageInUsermode,
|
|
|
|
"usage_in_kernelmode": stat.CPUStats.CPUUsage.UsageInKernelmode,
|
2016-04-02 22:34:34 +00:00
|
|
|
"usage_system": stat.CPUStats.SystemUsage,
|
2016-01-20 23:21:19 +00:00
|
|
|
"throttling_periods": stat.CPUStats.ThrottlingData.Periods,
|
|
|
|
"throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods,
|
|
|
|
"throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime,
|
2016-04-12 20:59:19 +00:00
|
|
|
"container_id": id,
|
2016-01-20 23:21:19 +00:00
|
|
|
}
|
2017-07-27 22:12:29 +00:00
|
|
|
|
|
|
|
if daemonOSType != "windows" {
|
|
|
|
previousCPU := stat.PreCPUStats.CPUUsage.TotalUsage
|
|
|
|
previousSystem := stat.PreCPUStats.SystemUsage
|
2019-05-27 03:01:02 +00:00
|
|
|
cpuPercent := CalculateCPUPercentUnix(previousCPU, previousSystem, stat)
|
2017-07-27 22:12:29 +00:00
|
|
|
cpufields["usage_percent"] = cpuPercent
|
|
|
|
} else {
|
|
|
|
cpuPercent := calculateCPUPercentWindows(stat)
|
|
|
|
cpufields["usage_percent"] = cpuPercent
|
|
|
|
}
|
|
|
|
|
2016-01-20 23:21:19 +00:00
|
|
|
cputags := copyTags(tags)
|
|
|
|
cputags["cpu"] = "cpu-total"
|
2017-11-07 19:41:53 +00:00
|
|
|
acc.AddFields("docker_container_cpu", cpufields, cputags, tm)
|
2016-01-20 23:21:19 +00:00
|
|
|
|
2017-07-21 21:25:17 +00:00
|
|
|
// If we have OnlineCPUs field, then use it to restrict stats gathering to only Online CPUs
|
|
|
|
// (https://github.com/moby/moby/commit/115f91d7575d6de6c7781a96a082f144fd17e400)
|
|
|
|
var percpuusage []uint64
|
|
|
|
if stat.CPUStats.OnlineCPUs > 0 {
|
|
|
|
percpuusage = stat.CPUStats.CPUUsage.PercpuUsage[:stat.CPUStats.OnlineCPUs]
|
|
|
|
} else {
|
|
|
|
percpuusage = stat.CPUStats.CPUUsage.PercpuUsage
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, percpu := range percpuusage {
|
2016-01-20 23:21:19 +00:00
|
|
|
percputags := copyTags(tags)
|
|
|
|
percputags["cpu"] = fmt.Sprintf("cpu%d", i)
|
2016-05-09 08:22:53 +00:00
|
|
|
fields := map[string]interface{}{
|
|
|
|
"usage_total": percpu,
|
|
|
|
"container_id": id,
|
|
|
|
}
|
2017-11-07 19:41:53 +00:00
|
|
|
acc.AddFields("docker_container_cpu", fields, percputags, tm)
|
2016-01-20 23:21:19 +00:00
|
|
|
}
|
|
|
|
|
2016-07-21 15:50:12 +00:00
|
|
|
totalNetworkStatMap := make(map[string]interface{})
|
2016-01-20 23:21:19 +00:00
|
|
|
for network, netstats := range stat.Networks {
|
|
|
|
netfields := map[string]interface{}{
|
2016-04-12 20:59:19 +00:00
|
|
|
"rx_dropped": netstats.RxDropped,
|
|
|
|
"rx_bytes": netstats.RxBytes,
|
|
|
|
"rx_errors": netstats.RxErrors,
|
|
|
|
"tx_packets": netstats.TxPackets,
|
|
|
|
"tx_dropped": netstats.TxDropped,
|
|
|
|
"rx_packets": netstats.RxPackets,
|
|
|
|
"tx_errors": netstats.TxErrors,
|
|
|
|
"tx_bytes": netstats.TxBytes,
|
|
|
|
"container_id": id,
|
2016-01-20 23:21:19 +00:00
|
|
|
}
|
|
|
|
// Create a new network tag dictionary for the "network" tag
|
2016-07-21 15:50:12 +00:00
|
|
|
if perDevice {
|
|
|
|
nettags := copyTags(tags)
|
|
|
|
nettags["network"] = network
|
2017-11-07 19:41:53 +00:00
|
|
|
acc.AddFields("docker_container_net", netfields, nettags, tm)
|
2016-07-21 15:50:12 +00:00
|
|
|
}
|
|
|
|
if total {
|
|
|
|
for field, value := range netfields {
|
|
|
|
if field == "container_id" {
|
|
|
|
continue
|
|
|
|
}
|
2016-12-21 10:51:07 +00:00
|
|
|
|
|
|
|
var uintV uint64
|
|
|
|
switch v := value.(type) {
|
|
|
|
case uint64:
|
|
|
|
uintV = v
|
|
|
|
case int64:
|
|
|
|
uintV = uint64(v)
|
|
|
|
default:
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2016-07-21 15:50:12 +00:00
|
|
|
_, ok := totalNetworkStatMap[field]
|
|
|
|
if ok {
|
2016-12-21 10:51:07 +00:00
|
|
|
totalNetworkStatMap[field] = totalNetworkStatMap[field].(uint64) + uintV
|
2016-07-21 15:50:12 +00:00
|
|
|
} else {
|
2016-12-21 10:51:07 +00:00
|
|
|
totalNetworkStatMap[field] = uintV
|
2016-07-21 15:50:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// totalNetworkStatMap could be empty if container is running with --net=host.
|
|
|
|
if total && len(totalNetworkStatMap) != 0 {
|
2016-01-20 23:21:19 +00:00
|
|
|
nettags := copyTags(tags)
|
2016-07-21 15:50:12 +00:00
|
|
|
nettags["network"] = "total"
|
|
|
|
totalNetworkStatMap["container_id"] = id
|
2017-11-07 19:41:53 +00:00
|
|
|
acc.AddFields("docker_container_net", totalNetworkStatMap, nettags, tm)
|
2016-01-20 23:21:19 +00:00
|
|
|
}
|
|
|
|
|
2017-11-07 19:41:53 +00:00
|
|
|
gatherBlockIOMetrics(stat, acc, tags, tm, id, perDevice, total)
|
2016-01-20 23:21:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func gatherBlockIOMetrics(
|
2016-04-02 22:34:34 +00:00
|
|
|
stat *types.StatsJSON,
|
2016-01-27 21:21:36 +00:00
|
|
|
acc telegraf.Accumulator,
|
2016-01-20 23:21:19 +00:00
|
|
|
tags map[string]string,
|
2017-11-07 19:41:53 +00:00
|
|
|
tm time.Time,
|
2016-05-04 12:17:23 +00:00
|
|
|
id string,
|
2016-07-21 15:50:12 +00:00
|
|
|
perDevice bool,
|
|
|
|
total bool,
|
2016-01-20 23:21:19 +00:00
|
|
|
) {
|
|
|
|
blkioStats := stat.BlkioStats
|
|
|
|
// Make a map of devices to their block io stats
|
|
|
|
deviceStatMap := make(map[string]map[string]interface{})
|
|
|
|
|
2016-04-02 22:34:34 +00:00
|
|
|
for _, metric := range blkioStats.IoServiceBytesRecursive {
|
2016-01-20 23:21:19 +00:00
|
|
|
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
|
|
|
_, ok := deviceStatMap[device]
|
|
|
|
if !ok {
|
|
|
|
deviceStatMap[device] = make(map[string]interface{})
|
|
|
|
}
|
|
|
|
|
|
|
|
field := fmt.Sprintf("io_service_bytes_recursive_%s", strings.ToLower(metric.Op))
|
|
|
|
deviceStatMap[device][field] = metric.Value
|
|
|
|
}
|
|
|
|
|
2016-04-02 22:34:34 +00:00
|
|
|
for _, metric := range blkioStats.IoServicedRecursive {
|
2016-01-20 23:21:19 +00:00
|
|
|
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
|
|
|
_, ok := deviceStatMap[device]
|
|
|
|
if !ok {
|
|
|
|
deviceStatMap[device] = make(map[string]interface{})
|
|
|
|
}
|
|
|
|
|
|
|
|
field := fmt.Sprintf("io_serviced_recursive_%s", strings.ToLower(metric.Op))
|
|
|
|
deviceStatMap[device][field] = metric.Value
|
|
|
|
}
|
|
|
|
|
2016-04-02 22:34:34 +00:00
|
|
|
for _, metric := range blkioStats.IoQueuedRecursive {
|
2016-01-20 23:21:19 +00:00
|
|
|
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
|
|
|
field := fmt.Sprintf("io_queue_recursive_%s", strings.ToLower(metric.Op))
|
|
|
|
deviceStatMap[device][field] = metric.Value
|
|
|
|
}
|
|
|
|
|
2016-04-02 22:34:34 +00:00
|
|
|
for _, metric := range blkioStats.IoServiceTimeRecursive {
|
2016-01-20 23:21:19 +00:00
|
|
|
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
|
|
|
field := fmt.Sprintf("io_service_time_recursive_%s", strings.ToLower(metric.Op))
|
|
|
|
deviceStatMap[device][field] = metric.Value
|
|
|
|
}
|
|
|
|
|
2016-04-02 22:34:34 +00:00
|
|
|
for _, metric := range blkioStats.IoWaitTimeRecursive {
|
2016-01-20 23:21:19 +00:00
|
|
|
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
|
|
|
field := fmt.Sprintf("io_wait_time_%s", strings.ToLower(metric.Op))
|
|
|
|
deviceStatMap[device][field] = metric.Value
|
|
|
|
}
|
|
|
|
|
2016-04-02 22:34:34 +00:00
|
|
|
for _, metric := range blkioStats.IoMergedRecursive {
|
2016-01-20 23:21:19 +00:00
|
|
|
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
|
|
|
field := fmt.Sprintf("io_merged_recursive_%s", strings.ToLower(metric.Op))
|
|
|
|
deviceStatMap[device][field] = metric.Value
|
|
|
|
}
|
|
|
|
|
2016-04-02 22:34:34 +00:00
|
|
|
for _, metric := range blkioStats.IoTimeRecursive {
|
2016-01-20 23:21:19 +00:00
|
|
|
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
2016-04-03 17:40:12 +00:00
|
|
|
deviceStatMap[device]["io_time_recursive"] = metric.Value
|
2016-01-20 23:21:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, metric := range blkioStats.SectorsRecursive {
|
|
|
|
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
2016-04-03 17:40:12 +00:00
|
|
|
deviceStatMap[device]["sectors_recursive"] = metric.Value
|
2016-01-20 23:21:19 +00:00
|
|
|
}
|
|
|
|
|
2016-07-21 15:50:12 +00:00
|
|
|
totalStatMap := make(map[string]interface{})
|
2016-01-20 23:21:19 +00:00
|
|
|
for device, fields := range deviceStatMap {
|
2016-05-04 12:17:23 +00:00
|
|
|
fields["container_id"] = id
|
2016-07-21 15:50:12 +00:00
|
|
|
if perDevice {
|
|
|
|
iotags := copyTags(tags)
|
|
|
|
iotags["device"] = device
|
2017-11-07 19:41:53 +00:00
|
|
|
acc.AddFields("docker_container_blkio", fields, iotags, tm)
|
2016-07-21 15:50:12 +00:00
|
|
|
}
|
|
|
|
if total {
|
|
|
|
for field, value := range fields {
|
|
|
|
if field == "container_id" {
|
|
|
|
continue
|
|
|
|
}
|
2016-12-21 10:51:07 +00:00
|
|
|
|
|
|
|
var uintV uint64
|
|
|
|
switch v := value.(type) {
|
|
|
|
case uint64:
|
|
|
|
uintV = v
|
|
|
|
case int64:
|
|
|
|
uintV = uint64(v)
|
|
|
|
default:
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2016-07-21 15:50:12 +00:00
|
|
|
_, ok := totalStatMap[field]
|
|
|
|
if ok {
|
2016-12-21 10:51:07 +00:00
|
|
|
totalStatMap[field] = totalStatMap[field].(uint64) + uintV
|
2016-07-21 15:50:12 +00:00
|
|
|
} else {
|
2016-12-21 10:51:07 +00:00
|
|
|
totalStatMap[field] = uintV
|
2016-07-21 15:50:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if total {
|
|
|
|
totalStatMap["container_id"] = id
|
|
|
|
iotags := copyTags(tags)
|
|
|
|
iotags["device"] = "total"
|
2017-11-07 19:41:53 +00:00
|
|
|
acc.AddFields("docker_container_blkio", totalStatMap, iotags, tm)
|
2016-01-20 23:21:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func copyTags(in map[string]string) map[string]string {
|
|
|
|
out := make(map[string]string)
|
|
|
|
for k, v := range in {
|
|
|
|
out[k] = v
|
|
|
|
}
|
|
|
|
return out
|
|
|
|
}
|
|
|
|
|
|
|
|
func sliceContains(in string, sl []string) bool {
|
|
|
|
for _, str := range sl {
|
|
|
|
if str == in {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2016-02-24 04:58:14 +00:00
|
|
|
// Parses the human-readable size string into the amount it represents.
|
|
|
|
func parseSize(sizeStr string) (int64, error) {
|
|
|
|
matches := sizeRegex.FindStringSubmatch(sizeStr)
|
|
|
|
if len(matches) != 4 {
|
2019-06-20 01:02:51 +00:00
|
|
|
return -1, fmt.Errorf("invalid size: %s", sizeStr)
|
2016-02-24 04:58:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
size, err := strconv.ParseFloat(matches[1], 64)
|
|
|
|
if err != nil {
|
|
|
|
return -1, err
|
|
|
|
}
|
|
|
|
|
|
|
|
uMap := map[string]int64{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
|
|
|
|
unitPrefix := strings.ToLower(matches[3])
|
|
|
|
if mul, ok := uMap[unitPrefix]; ok {
|
|
|
|
size *= float64(mul)
|
|
|
|
}
|
|
|
|
|
|
|
|
return int64(size), nil
|
|
|
|
}
|
|
|
|
|
2017-06-08 20:17:31 +00:00
|
|
|
func (d *Docker) createContainerFilters() error {
|
2017-10-12 22:50:09 +00:00
|
|
|
// Backwards compatibility for deprecated `container_names` parameter.
|
2017-06-08 20:17:31 +00:00
|
|
|
if len(d.ContainerNames) > 0 {
|
|
|
|
d.ContainerInclude = append(d.ContainerInclude, d.ContainerNames...)
|
|
|
|
}
|
|
|
|
|
2017-10-12 22:50:09 +00:00
|
|
|
filter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-06-08 20:17:31 +00:00
|
|
|
}
|
2017-10-12 22:50:09 +00:00
|
|
|
d.containerFilter = filter
|
2017-06-08 20:17:31 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-04-03 20:43:15 +00:00
|
|
|
func (d *Docker) createLabelFilters() error {
|
2017-10-12 22:50:09 +00:00
|
|
|
filter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-04-03 20:43:15 +00:00
|
|
|
}
|
2017-10-12 22:50:09 +00:00
|
|
|
d.labelFilter = filter
|
2017-04-03 20:43:15 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-03-30 20:17:48 +00:00
|
|
|
func (d *Docker) createContainerStateFilters() error {
|
|
|
|
if len(d.ContainerStateInclude) == 0 && len(d.ContainerStateExclude) == 0 {
|
|
|
|
d.ContainerStateInclude = []string{"running"}
|
|
|
|
}
|
|
|
|
filter, err := filter.NewIncludeExcludeFilter(d.ContainerStateInclude, d.ContainerStateExclude)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
d.stateFilter = filter
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-01-03 23:57:39 +00:00
|
|
|
func (d *Docker) getNewClient() (Client, error) {
|
|
|
|
if d.Endpoint == "ENV" {
|
|
|
|
return d.newEnvClient()
|
|
|
|
}
|
|
|
|
|
|
|
|
tlsConfig, err := d.ClientConfig.TLSConfig()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return d.newClient(d.Endpoint, tlsConfig)
|
|
|
|
}
|
|
|
|
|
2016-01-20 23:21:19 +00:00
|
|
|
func init() {
|
2016-01-27 21:21:36 +00:00
|
|
|
inputs.Add("docker", func() telegraf.Input {
|
2016-05-04 20:49:59 +00:00
|
|
|
return &Docker{
|
2017-06-08 20:17:31 +00:00
|
|
|
PerDevice: true,
|
|
|
|
Timeout: internal.Duration{Duration: time.Second * 5},
|
2017-07-28 00:18:44 +00:00
|
|
|
Endpoint: defaultEndpoint,
|
2017-07-27 22:12:29 +00:00
|
|
|
newEnvClient: NewEnvClient,
|
|
|
|
newClient: NewClient,
|
2017-06-08 20:17:31 +00:00
|
|
|
filtersCreated: false,
|
2016-05-04 20:49:59 +00:00
|
|
|
}
|
2016-01-20 23:21:19 +00:00
|
|
|
})
|
|
|
|
}
|