add cgroup plugin

This commit is contained in:
Vladimir Sagan 2016-06-08 11:13:22 +03:00 committed by Cameron Sparr
parent 5ddd61d2e2
commit b0484d8a0c
30 changed files with 724 additions and 0 deletions

View File

@ -6,6 +6,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
_ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
_ "github.com/influxdata/telegraf/plugins/inputs/ceph"
_ "github.com/influxdata/telegraf/plugins/inputs/cgroup"
_ "github.com/influxdata/telegraf/plugins/inputs/chrony"
_ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch"
_ "github.com/influxdata/telegraf/plugins/inputs/conntrack"

View File

@ -0,0 +1,59 @@
# CGroup Input Plugin For Telegraf Agent
This input plugin will capture specific statistics per cgroup.
Following file formats are supported:
* Single value
```
VAL\n
```
* New line separated values
```
VAL0\n
VAL1\n
```
* Space separated values
```
VAL0 VAL1 ...\n
```
* New line separated key-space-value's
```
KEY0 VAL0\n
KEY1 VAL1\n
```
### Tags:
All measurements have the following tags:
- path
### Configuration:
```
# [[inputs.cgroup]]
# flush_scope = 10 # optional (the fields will be divided into parts of 10 items)
# paths = [
# "/cgroup/memory", # root cgroup
# "/cgroup/memory/child1", # container cgroup
# "/cgroup/memory/child2/*", # all children cgroups under child2, but not child2 itself
# ]
# fields = ["memory.*usage*", "memory.limit_in_bytes"]
# [[inputs.cgroup]]
# paths = [
# "/cgroup/cpu", # root cgroup
# "/cgroup/cpu/*", # all container cgroups
# "/cgroup/cpu/*/*", # all children cgroups under each container cgroup
# ]
# fields = ["cpuacct.usage", "cpu.cfs_period_us", "cpu.cfs_quota_us"]
```

View File

@ -0,0 +1,292 @@
// +build linux
package cgroup
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"regexp"
"strconv"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
const metricName = "cgroup"
type CGroup struct {
Paths []string `toml:"paths"`
Files []string `toml:"fields"`
FlushScope int `toml:"flush_scope"`
}
var sampleConfig = `
# paths = [
# "/cgroup/memory",
# "/cgroup/memory/child1",
# "/cgroup/memory/child2/*",
# ]
# fields = ["memory.*usage*", "memory.limit_in_bytes"]
`
func (g *CGroup) SampleConfig() string {
return sampleConfig
}
func (g *CGroup) Description() string {
return "Read specific statistics per cgroup"
}
func (g *CGroup) Gather(acc telegraf.Accumulator) error {
list := make(chan pathInfo)
go g.generateDirs(list)
for dir := range list {
if dir.err != nil {
return dir.err
}
if err := g.gatherDir(dir.path, acc); err != nil {
return err
}
}
return nil
}
func (g *CGroup) gatherDir(dir string, acc telegraf.Accumulator) error {
fields := make(map[string]interface{})
list := make(chan pathInfo)
go g.generateFiles(dir, list)
for file := range list {
if file.err != nil {
return file.err
}
raw, err := ioutil.ReadFile(file.path)
if err != nil {
return err
}
if len(raw) == 0 {
continue
}
fd := fileData{data: raw, path: file.path}
if err := fd.parse(fields); err != nil {
return err
}
}
tags := map[string]string{"path": dir}
if g.FlushScope <= 0 {
acc.AddFields(metricName, fields, tags)
return nil
}
writeWithBatches(acc, fields, tags, g.FlushScope)
return nil
}
func writeWithBatches(acc telegraf.Accumulator, fields map[string]interface{}, tags map[string]string, scope int) {
for len(fields) > 0 {
batch := make(map[string]interface{})
for k, v := range fields {
batch[k] = v
delete(fields, k)
if len(batch) == scope || len(fields) == 0 {
break
}
}
acc.AddFields(metricName, batch, tags)
}
}
// ======================================================================
type pathInfo struct {
path string
err error
}
func isDir(path string) (bool, error) {
result, err := os.Stat(path)
if err != nil {
return false, err
}
return result.IsDir(), nil
}
func (g *CGroup) generateDirs(list chan<- pathInfo) {
for _, dir := range g.Paths {
// getting all dirs that match the pattern 'dir'
items, err := filepath.Glob(dir)
if err != nil {
list <- pathInfo{err: err}
return
}
for _, item := range items {
ok, err := isDir(item)
if err != nil {
list <- pathInfo{err: err}
return
}
// supply only dirs
if ok {
list <- pathInfo{path: item}
}
}
}
close(list)
}
func (g *CGroup) generateFiles(dir string, list chan<- pathInfo) {
for _, file := range g.Files {
// getting all file paths that match the pattern 'dir + file'
// path.Base make sure that file variable does not contains part of path
items, err := filepath.Glob(path.Join(dir, path.Base(file)))
if err != nil {
list <- pathInfo{err: err}
return
}
for _, item := range items {
ok, err := isDir(item)
if err != nil {
list <- pathInfo{err: err}
return
}
// supply only files not dirs
if !ok {
list <- pathInfo{path: item}
}
}
}
close(list)
}
// ======================================================================
type fileData struct {
data []byte
path string
}
func (fd *fileData) format() (*fileFormat, error) {
for _, ff := range fileFormats {
ok, err := ff.match(fd.data)
if err != nil {
return nil, err
}
if ok {
return &ff, nil
}
}
return nil, fmt.Errorf("%v: unknown file format", fd.path)
}
func (fd *fileData) parse(fields map[string]interface{}) error {
format, err := fd.format()
if err != nil {
return err
}
format.parser(filepath.Base(fd.path), fields, fd.data)
return nil
}
// ======================================================================
type fileFormat struct {
name string
pattern string
parser func(measurement string, fields map[string]interface{}, b []byte)
}
const keyPattern = "[[:alpha:]_]+"
const valuePattern = "[\\d-]+"
var fileFormats = [...]fileFormat{
// VAL\n
fileFormat{
name: "Single value",
pattern: "^" + valuePattern + "\n$",
parser: func(measurement string, fields map[string]interface{}, b []byte) {
re := regexp.MustCompile("^(" + valuePattern + ")\n$")
matches := re.FindAllStringSubmatch(string(b), -1)
fields[measurement] = numberOrString(matches[0][1])
},
},
// VAL0\n
// VAL1\n
// ...
fileFormat{
name: "New line separated values",
pattern: "^(" + valuePattern + "\n){2,}$",
parser: func(measurement string, fields map[string]interface{}, b []byte) {
re := regexp.MustCompile("(" + valuePattern + ")\n")
matches := re.FindAllStringSubmatch(string(b), -1)
for i, v := range matches {
fields[measurement+"."+strconv.Itoa(i)] = numberOrString(v[1])
}
},
},
// VAL0 VAL1 ...\n
fileFormat{
name: "Space separated values",
pattern: "^(" + valuePattern + " )+\n$",
parser: func(measurement string, fields map[string]interface{}, b []byte) {
re := regexp.MustCompile("(" + valuePattern + ") ")
matches := re.FindAllStringSubmatch(string(b), -1)
for i, v := range matches {
fields[measurement+"."+strconv.Itoa(i)] = numberOrString(v[1])
}
},
},
// KEY0 VAL0\n
// KEY1 VAL1\n
// ...
fileFormat{
name: "New line separated key-space-value's",
pattern: "^(" + keyPattern + " " + valuePattern + "\n)+$",
parser: func(measurement string, fields map[string]interface{}, b []byte) {
re := regexp.MustCompile("(" + keyPattern + ") (" + valuePattern + ")\n")
matches := re.FindAllStringSubmatch(string(b), -1)
for _, v := range matches {
fields[measurement+"."+v[1]] = numberOrString(v[2])
}
},
},
}
func numberOrString(s string) interface{} {
i, err := strconv.Atoi(s)
if err == nil {
return i
}
return s
}
func (f fileFormat) match(b []byte) (bool, error) {
ok, err := regexp.Match(f.pattern, b)
if err != nil {
return false, err
}
if ok {
return true, nil
}
return false, nil
}
func init() {
inputs.Add("cgroup", func() telegraf.Input { return &CGroup{} })
}

View File

@ -0,0 +1,3 @@
// +build !linux
package cgroup

View File

@ -0,0 +1,182 @@
// +build linux
package cgroup
import (
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
var cg1 = &CGroup{
Paths: []string{"testdata/memory"},
Files: []string{
"memory.empty",
"memory.max_usage_in_bytes",
"memory.limit_in_bytes",
"memory.stat",
"memory.use_hierarchy",
"notify_on_release",
},
}
func TestCgroupStatistics_1(t *testing.T) {
var acc testutil.Accumulator
err := cg1.Gather(&acc)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/memory",
}
fields := map[string]interface{}{
"memory.stat.cache": 1739362304123123123,
"memory.stat.rss": 1775325184,
"memory.stat.rss_huge": 778043392,
"memory.stat.mapped_file": 421036032,
"memory.stat.dirty": -307200,
"memory.max_usage_in_bytes.0": 0,
"memory.max_usage_in_bytes.1": -1,
"memory.max_usage_in_bytes.2": 2,
"memory.limit_in_bytes": 223372036854771712,
"memory.use_hierarchy": "12-781",
"notify_on_release": 0,
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}
// ======================================================================
var cg2 = &CGroup{
Paths: []string{"testdata/cpu"},
Files: []string{"cpuacct.usage_percpu"},
}
func TestCgroupStatistics_2(t *testing.T) {
var acc testutil.Accumulator
err := cg2.Gather(&acc)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/cpu",
}
fields := map[string]interface{}{
"cpuacct.usage_percpu.0": -1452543795404,
"cpuacct.usage_percpu.1": 1376681271659,
"cpuacct.usage_percpu.2": 1450950799997,
"cpuacct.usage_percpu.3": -1473113374257,
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}
// ======================================================================
var cg3 = &CGroup{
Paths: []string{"testdata/memory/*"},
Files: []string{"memory.limit_in_bytes"},
}
func TestCgroupStatistics_3(t *testing.T) {
var acc testutil.Accumulator
err := cg3.Gather(&acc)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/memory/group_1",
}
fields := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
tags = map[string]string{
"path": "testdata/memory/group_2",
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}
// ======================================================================
var cg4 = &CGroup{
Paths: []string{"testdata/memory/*/*", "testdata/memory/group_2"},
Files: []string{"memory.limit_in_bytes"},
}
func TestCgroupStatistics_4(t *testing.T) {
var acc testutil.Accumulator
err := cg4.Gather(&acc)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/memory/group_1/group_1_1",
}
fields := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
tags = map[string]string{
"path": "testdata/memory/group_1/group_1_2",
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
tags = map[string]string{
"path": "testdata/memory/group_2",
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}
// ======================================================================
var cg5 = &CGroup{
Paths: []string{"testdata/memory/*/group_1_1"},
Files: []string{"memory.limit_in_bytes"},
}
func TestCgroupStatistics_5(t *testing.T) {
var acc testutil.Accumulator
err := cg5.Gather(&acc)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/memory/group_1/group_1_1",
}
fields := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
tags = map[string]string{
"path": "testdata/memory/group_2/group_1_1",
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}
// ======================================================================
var cg6 = &CGroup{
Paths: []string{"testdata/memory"},
Files: []string{"memory.us*", "*/memory.kmem.*"},
}
func TestCgroupStatistics_6(t *testing.T) {
var acc testutil.Accumulator
err := cg6.Gather(&acc)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/memory",
}
fields := map[string]interface{}{
"memory.usage_in_bytes": 3513667584,
"memory.use_hierarchy": "12-781",
"memory.kmem.limit_in_bytes": 9223372036854771712,
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}

View File

@ -0,0 +1 @@
Total 0

View File

@ -0,0 +1,131 @@
11:0 Read 0
11:0 Write 0
11:0 Sync 0
11:0 Async 0
11:0 Total 0
8:0 Read 49134
8:0 Write 216703
8:0 Sync 177906
8:0 Async 87931
8:0 Total 265837
7:7 Read 0
7:7 Write 0
7:7 Sync 0
7:7 Async 0
7:7 Total 0
7:6 Read 0
7:6 Write 0
7:6 Sync 0
7:6 Async 0
7:6 Total 0
7:5 Read 0
7:5 Write 0
7:5 Sync 0
7:5 Async 0
7:5 Total 0
7:4 Read 0
7:4 Write 0
7:4 Sync 0
7:4 Async 0
7:4 Total 0
7:3 Read 0
7:3 Write 0
7:3 Sync 0
7:3 Async 0
7:3 Total 0
7:2 Read 0
7:2 Write 0
7:2 Sync 0
7:2 Async 0
7:2 Total 0
7:1 Read 0
7:1 Write 0
7:1 Sync 0
7:1 Async 0
7:1 Total 0
7:0 Read 0
7:0 Write 0
7:0 Sync 0
7:0 Async 0
7:0 Total 0
1:15 Read 3
1:15 Write 0
1:15 Sync 0
1:15 Async 3
1:15 Total 3
1:14 Read 3
1:14 Write 0
1:14 Sync 0
1:14 Async 3
1:14 Total 3
1:13 Read 3
1:13 Write 0
1:13 Sync 0
1:13 Async 3
1:13 Total 3
1:12 Read 3
1:12 Write 0
1:12 Sync 0
1:12 Async 3
1:12 Total 3
1:11 Read 3
1:11 Write 0
1:11 Sync 0
1:11 Async 3
1:11 Total 3
1:10 Read 3
1:10 Write 0
1:10 Sync 0
1:10 Async 3
1:10 Total 3
1:9 Read 3
1:9 Write 0
1:9 Sync 0
1:9 Async 3
1:9 Total 3
1:8 Read 3
1:8 Write 0
1:8 Sync 0
1:8 Async 3
1:8 Total 3
1:7 Read 3
1:7 Write 0
1:7 Sync 0
1:7 Async 3
1:7 Total 3
1:6 Read 3
1:6 Write 0
1:6 Sync 0
1:6 Async 3
1:6 Total 3
1:5 Read 3
1:5 Write 0
1:5 Sync 0
1:5 Async 3
1:5 Total 3
1:4 Read 3
1:4 Write 0
1:4 Sync 0
1:4 Async 3
1:4 Total 3
1:3 Read 3
1:3 Write 0
1:3 Sync 0
1:3 Async 3
1:3 Total 3
1:2 Read 3
1:2 Write 0
1:2 Sync 0
1:2 Async 3
1:2 Total 3
1:1 Read 3
1:1 Write 0
1:1 Sync 0
1:1 Async 3
1:1 Total 3
1:0 Read 3
1:0 Write 0
1:0 Sync 0
1:0 Async 3
1:0 Total 3
Total 265885

View File

@ -0,0 +1 @@
-1

View File

@ -0,0 +1 @@
-1452543795404 1376681271659 1450950799997 -1473113374257

View File

@ -0,0 +1 @@
223372036854771712

View File

@ -0,0 +1,5 @@
cache 1739362304123123123
rss 1775325184
rss_huge 778043392
mapped_file 421036032
dirty -307200

View File

@ -0,0 +1 @@
223372036854771712

View File

@ -0,0 +1,5 @@
cache 1739362304123123123
rss 1775325184
rss_huge 778043392
mapped_file 421036032
dirty -307200

View File

@ -0,0 +1 @@
9223372036854771712

View File

@ -0,0 +1 @@
0

View File

@ -0,0 +1 @@
223372036854771712

View File

@ -0,0 +1,5 @@
cache 1739362304123123123
rss 1775325184
rss_huge 778043392
mapped_file 421036032
dirty -307200

View File

@ -0,0 +1 @@
223372036854771712

View File

@ -0,0 +1,5 @@
cache 1739362304123123123
rss 1775325184
rss_huge 778043392
mapped_file 421036032
dirty -307200

View File

@ -0,0 +1 @@
223372036854771712

View File

@ -0,0 +1,5 @@
cache 1739362304123123123
rss 1775325184
rss_huge 778043392
mapped_file 421036032
dirty -307200

View File

View File

@ -0,0 +1 @@
9223372036854771712

View File

@ -0,0 +1 @@
223372036854771712

View File

@ -0,0 +1,3 @@
0
-1
2

View File

@ -0,0 +1,8 @@
total=858067 N0=858067
file=406254 N0=406254
anon=451792 N0=451792
unevictable=21 N0=21
hierarchical_total=858067 N0=858067
hierarchical_file=406254 N0=406254
hierarchical_anon=451792 N0=451792
hierarchical_unevictable=21 N0=21

View File

@ -0,0 +1,5 @@
cache 1739362304123123123
rss 1775325184
rss_huge 778043392
mapped_file 421036032
dirty -307200

View File

@ -0,0 +1 @@
3513667584

View File

@ -0,0 +1 @@
12-781

View File

@ -0,0 +1 @@
0