From 7c20522a30cff17e5bd88b3e838a66e440a5e859 Mon Sep 17 00:00:00 2001 From: Mike Tonks Date: Tue, 9 Feb 2016 15:20:56 +0000 Subject: [PATCH 001/287] Add calculated cpu and memory percentages to docker input (via config option) --- plugins/inputs/docker/README.md | 8 ++ plugins/inputs/docker/docker.go | 32 ++++++- plugins/inputs/docker/docker_test.go | 132 +++++++++++++++++---------- 3 files changed, 123 insertions(+), 49 deletions(-) diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index fa662ca80..9aea54c07 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -22,8 +22,16 @@ for the stat structure can be found endpoint = "unix:///var/run/docker.sock" # Only collect metrics for these containers, collect all if empty container_names = [] + calculate_percentages = false ``` +### Calculate Percentages + +Optionally percentages can be calculated for cpu and memory usage. This uses +the same calculation as the 'docker stats' command line tool. Set this option +to 'true' to enable this feature. + + ### Measurements & Fields: Every effort was made to preserve the names based on the JSON response from the diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index 6814c190a..8c927f954 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -14,8 +14,9 @@ import ( ) type Docker struct { - Endpoint string - ContainerNames []string + Endpoint string + ContainerNames []string + CalculatePercentages bool client *docker.Client } @@ -27,6 +28,8 @@ var sampleConfig = ` endpoint = "unix:///var/run/docker.sock" # Only collect metrics for these containers, collect all if empty container_names = [] + # Add calculated percentages for mem and cpu, as per 'docker stats' command + calculate_percentages = false ` func (d *Docker) Description() string { @@ -67,6 +70,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup wg.Add(len(containers)) for _, container := range containers { + go func(c docker.APIContainers) { defer wg.Done() err := d.gatherContainer(c, acc) @@ -131,7 +135,7 @@ func (d *Docker) gatherContainer( tags[k] = v } - gatherContainerStats(stat, acc, tags) + gatherContainerStats(stat, acc, tags, d.CalculatePercentages) return nil } @@ -140,6 +144,7 @@ func gatherContainerStats( stat *docker.Stats, acc telegraf.Accumulator, tags map[string]string, + calculate_percentages bool, ) { now := stat.Read @@ -178,6 +183,9 @@ func gatherContainerStats( "inactive_file": stat.MemoryStats.Stats.InactiveFile, "total_pgpgin": stat.MemoryStats.Stats.TotalPgpgin, } + if calculate_percentages { + memfields["usage_percent"] = float64(stat.MemoryStats.Usage) / float64(stat.MemoryStats.Limit) * 100.0 + } acc.AddFields("docker_mem", memfields, tags, now) cpufields := map[string]interface{}{ @@ -189,6 +197,9 @@ func gatherContainerStats( "throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods, "throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime, } + if calculate_percentages { + cpufields["usage_percent"] = calculateCPUPercent(stat) + } cputags := copyTags(tags) cputags["cpu"] = "cpu-total" acc.AddFields("docker_cpu", cpufields, cputags, now) @@ -219,6 +230,21 @@ func gatherContainerStats( gatherBlockIOMetrics(stat, acc, tags, now) } +func calculateCPUPercent(stat *docker.Stats) float64 { + var ( + cpuPercent = 0.0 + // calculate the change for the cpu usage of the container in between readings + cpuDelta = float64(stat.CPUStats.CPUUsage.TotalUsage) - float64(stat.PreCPUStats.CPUUsage.TotalUsage) + // calculate the change for the entire system between readings + systemDelta = float64(stat.CPUStats.SystemCPUUsage) - float64(stat.PreCPUStats.SystemCPUUsage) + ) + + if systemDelta > 0.0 && cpuDelta > 0.0 { + cpuPercent = (cpuDelta / systemDelta) * float64(len(stat.CPUStats.CPUUsage.PercpuUsage)) * 100.0 + } + return cpuPercent +} + func gatherBlockIOMetrics( stat *docker.Stats, acc telegraf.Accumulator, diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index 9b85d1029..3e27ea59b 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -18,7 +18,7 @@ func TestDockerGatherContainerStats(t *testing.T) { "cont_name": "redis", "cont_image": "redis/image", } - gatherContainerStats(stats, &acc, tags) + gatherContainerStats(stats, &acc, tags, false) // test docker_net measurement netfields := map[string]interface{}{ @@ -45,55 +45,13 @@ func TestDockerGatherContainerStats(t *testing.T) { acc.AssertContainsTaggedFields(t, "docker_blkio", blkiofields, blkiotags) // test docker_mem measurement - memfields := map[string]interface{}{ - "max_usage": uint64(1001), - "usage": uint64(1111), - "fail_count": uint64(1), - "limit": uint64(20), - "total_pgmafault": uint64(0), - "cache": uint64(0), - "mapped_file": uint64(0), - "total_inactive_file": uint64(0), - "pgpgout": uint64(0), - "rss": uint64(0), - "total_mapped_file": uint64(0), - "writeback": uint64(0), - "unevictable": uint64(0), - "pgpgin": uint64(0), - "total_unevictable": uint64(0), - "pgmajfault": uint64(0), - "total_rss": uint64(44), - "total_rss_huge": uint64(444), - "total_writeback": uint64(55), - "total_inactive_anon": uint64(0), - "rss_huge": uint64(0), - "hierarchical_memory_limit": uint64(0), - "total_pgfault": uint64(0), - "total_active_file": uint64(0), - "active_anon": uint64(0), - "total_active_anon": uint64(0), - "total_pgpgout": uint64(0), - "total_cache": uint64(0), - "inactive_anon": uint64(0), - "active_file": uint64(1), - "pgfault": uint64(2), - "inactive_file": uint64(3), - "total_pgpgin": uint64(4), - } + memfields := sample_mem_fields() acc.AssertContainsTaggedFields(t, "docker_mem", memfields, tags) // test docker_cpu measurement cputags := copyTags(tags) cputags["cpu"] = "cpu-total" - cpufields := map[string]interface{}{ - "usage_total": uint64(500), - "usage_in_usermode": uint64(100), - "usage_in_kernelmode": uint64(200), - "usage_system": uint64(100), - "throttling_periods": uint64(1), - "throttling_throttled_periods": uint64(0), - "throttling_throttled_time": uint64(0), - } + cpufields := sample_cpu_fields() acc.AssertContainsTaggedFields(t, "docker_cpu", cpufields, cputags) cputags["cpu"] = "cpu0" @@ -109,6 +67,30 @@ func TestDockerGatherContainerStats(t *testing.T) { acc.AssertContainsTaggedFields(t, "docker_cpu", cpu1fields, cputags) } +func TestDockerGatherContainerPercentages(t *testing.T) { + var acc testutil.Accumulator + stats := testStats() + + tags := map[string]string{ + "cont_id": "foobarbaz", + "cont_name": "redis", + "cont_image": "redis/image", + } + gatherContainerStats(stats, &acc, tags, true) + + // test docker_mem measurement + memfields := sample_mem_fields() + memfields["usage_percent"] = 55.55 + acc.AssertContainsTaggedFields(t, "docker_mem", memfields, tags) + + // test docker_cpu measurement + cputags := copyTags(tags) + cputags["cpu"] = "cpu-total" + cpufields := sample_cpu_fields() + cpufields["usage_percent"] = 400.0 + acc.AssertContainsTaggedFields(t, "docker_cpu", cpufields, cputags) +} + func testStats() *docker.Stats { stats := &docker.Stats{ Read: time.Now(), @@ -122,6 +104,9 @@ func testStats() *docker.Stats { stats.CPUStats.SystemCPUUsage = 100 stats.CPUStats.ThrottlingData.Periods = 1 + stats.PreCPUStats.CPUUsage.TotalUsage = 400 + stats.PreCPUStats.SystemCPUUsage = 50 + stats.MemoryStats.Stats.TotalPgmafault = 0 stats.MemoryStats.Stats.Cache = 0 stats.MemoryStats.Stats.MappedFile = 0 @@ -155,7 +140,7 @@ func testStats() *docker.Stats { stats.MemoryStats.MaxUsage = 1001 stats.MemoryStats.Usage = 1111 stats.MemoryStats.Failcnt = 1 - stats.MemoryStats.Limit = 20 + stats.MemoryStats.Limit = 2000 stats.Networks["eth0"] = docker.NetworkStats{ RxDropped: 1, @@ -188,3 +173,58 @@ func testStats() *docker.Stats { return stats } + +func sample_mem_fields() map[string]interface{} { + + memfields := map[string]interface{}{ + "max_usage": uint64(1001), + "usage": uint64(1111), + "fail_count": uint64(1), + "limit": uint64(2000), + "total_pgmafault": uint64(0), + "cache": uint64(0), + "mapped_file": uint64(0), + "total_inactive_file": uint64(0), + "pgpgout": uint64(0), + "rss": uint64(0), + "total_mapped_file": uint64(0), + "writeback": uint64(0), + "unevictable": uint64(0), + "pgpgin": uint64(0), + "total_unevictable": uint64(0), + "pgmajfault": uint64(0), + "total_rss": uint64(44), + "total_rss_huge": uint64(444), + "total_writeback": uint64(55), + "total_inactive_anon": uint64(0), + "rss_huge": uint64(0), + "hierarchical_memory_limit": uint64(0), + "total_pgfault": uint64(0), + "total_active_file": uint64(0), + "active_anon": uint64(0), + "total_active_anon": uint64(0), + "total_pgpgout": uint64(0), + "total_cache": uint64(0), + "inactive_anon": uint64(0), + "active_file": uint64(1), + "pgfault": uint64(2), + "inactive_file": uint64(3), + "total_pgpgin": uint64(4), + } + + return memfields +} + +func sample_cpu_fields() map[string]interface{} { + + cpufields := map[string]interface{}{ + "usage_total": uint64(500), + "usage_in_usermode": uint64(100), + "usage_in_kernelmode": uint64(200), + "usage_system": uint64(100), + "throttling_periods": uint64(1), + "throttling_throttled_periods": uint64(0), + "throttling_throttled_time": uint64(0), + } + return cpufields +} From 7587dc350e158fbb8f7550ad873d0acea6633457 Mon Sep 17 00:00:00 2001 From: Mike Tonks Date: Thu, 11 Feb 2016 10:49:48 +0000 Subject: [PATCH 002/287] Remove config option, percent option always activated. Fix review issues --- plugins/inputs/docker/README.md | 8 -- plugins/inputs/docker/docker.go | 37 ++++---- plugins/inputs/docker/docker_test.go | 130 ++++++++++----------------- 3 files changed, 65 insertions(+), 110 deletions(-) diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index 9aea54c07..fa662ca80 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -22,16 +22,8 @@ for the stat structure can be found endpoint = "unix:///var/run/docker.sock" # Only collect metrics for these containers, collect all if empty container_names = [] - calculate_percentages = false ``` -### Calculate Percentages - -Optionally percentages can be calculated for cpu and memory usage. This uses -the same calculation as the 'docker stats' command line tool. Set this option -to 'true' to enable this feature. - - ### Measurements & Fields: Every effort was made to preserve the names based on the JSON response from the diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index 8c927f954..cdb1dff31 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -14,9 +14,8 @@ import ( ) type Docker struct { - Endpoint string - ContainerNames []string - CalculatePercentages bool + Endpoint string + ContainerNames []string client *docker.Client } @@ -28,8 +27,6 @@ var sampleConfig = ` endpoint = "unix:///var/run/docker.sock" # Only collect metrics for these containers, collect all if empty container_names = [] - # Add calculated percentages for mem and cpu, as per 'docker stats' command - calculate_percentages = false ` func (d *Docker) Description() string { @@ -135,7 +132,7 @@ func (d *Docker) gatherContainer( tags[k] = v } - gatherContainerStats(stat, acc, tags, d.CalculatePercentages) + gatherContainerStats(stat, acc, tags) return nil } @@ -144,7 +141,6 @@ func gatherContainerStats( stat *docker.Stats, acc telegraf.Accumulator, tags map[string]string, - calculate_percentages bool, ) { now := stat.Read @@ -182,9 +178,7 @@ func gatherContainerStats( "pgfault": stat.MemoryStats.Stats.Pgfault, "inactive_file": stat.MemoryStats.Stats.InactiveFile, "total_pgpgin": stat.MemoryStats.Stats.TotalPgpgin, - } - if calculate_percentages { - memfields["usage_percent"] = float64(stat.MemoryStats.Usage) / float64(stat.MemoryStats.Limit) * 100.0 + "usage_percent": calculateMemPercent(stat), } acc.AddFields("docker_mem", memfields, tags, now) @@ -196,9 +190,7 @@ func gatherContainerStats( "throttling_periods": stat.CPUStats.ThrottlingData.Periods, "throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods, "throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime, - } - if calculate_percentages { - cpufields["usage_percent"] = calculateCPUPercent(stat) + "usage_percent": calculateCPUPercent(stat), } cputags := copyTags(tags) cputags["cpu"] = "cpu-total" @@ -230,14 +222,19 @@ func gatherContainerStats( gatherBlockIOMetrics(stat, acc, tags, now) } +func calculateMemPercent(stat *docker.Stats) float64 { + var memPercent = 0.0 + if stat.MemoryStats.Limit > 0 { + memPercent = float64(stat.MemoryStats.Usage) / float64(stat.MemoryStats.Limit) * 100.0 + } + return memPercent +} + func calculateCPUPercent(stat *docker.Stats) float64 { - var ( - cpuPercent = 0.0 - // calculate the change for the cpu usage of the container in between readings - cpuDelta = float64(stat.CPUStats.CPUUsage.TotalUsage) - float64(stat.PreCPUStats.CPUUsage.TotalUsage) - // calculate the change for the entire system between readings - systemDelta = float64(stat.CPUStats.SystemCPUUsage) - float64(stat.PreCPUStats.SystemCPUUsage) - ) + var cpuPercent = 0.0 + // calculate the change for the cpu and system usage of the container in between readings + cpuDelta := float64(stat.CPUStats.CPUUsage.TotalUsage) - float64(stat.PreCPUStats.CPUUsage.TotalUsage) + systemDelta := float64(stat.CPUStats.SystemCPUUsage) - float64(stat.PreCPUStats.SystemCPUUsage) if systemDelta > 0.0 && cpuDelta > 0.0 { cpuPercent = (cpuDelta / systemDelta) * float64(len(stat.CPUStats.CPUUsage.PercpuUsage)) * 100.0 diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index 3e27ea59b..aebe8102e 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -18,7 +18,7 @@ func TestDockerGatherContainerStats(t *testing.T) { "cont_name": "redis", "cont_image": "redis/image", } - gatherContainerStats(stats, &acc, tags, false) + gatherContainerStats(stats, &acc, tags) // test docker_net measurement netfields := map[string]interface{}{ @@ -45,13 +45,58 @@ func TestDockerGatherContainerStats(t *testing.T) { acc.AssertContainsTaggedFields(t, "docker_blkio", blkiofields, blkiotags) // test docker_mem measurement - memfields := sample_mem_fields() + memfields := map[string]interface{}{ + "max_usage": uint64(1001), + "usage": uint64(1111), + "fail_count": uint64(1), + "limit": uint64(2000), + "total_pgmafault": uint64(0), + "cache": uint64(0), + "mapped_file": uint64(0), + "total_inactive_file": uint64(0), + "pgpgout": uint64(0), + "rss": uint64(0), + "total_mapped_file": uint64(0), + "writeback": uint64(0), + "unevictable": uint64(0), + "pgpgin": uint64(0), + "total_unevictable": uint64(0), + "pgmajfault": uint64(0), + "total_rss": uint64(44), + "total_rss_huge": uint64(444), + "total_writeback": uint64(55), + "total_inactive_anon": uint64(0), + "rss_huge": uint64(0), + "hierarchical_memory_limit": uint64(0), + "total_pgfault": uint64(0), + "total_active_file": uint64(0), + "active_anon": uint64(0), + "total_active_anon": uint64(0), + "total_pgpgout": uint64(0), + "total_cache": uint64(0), + "inactive_anon": uint64(0), + "active_file": uint64(1), + "pgfault": uint64(2), + "inactive_file": uint64(3), + "total_pgpgin": uint64(4), + "usage_percent": float64(55.55), + } + acc.AssertContainsTaggedFields(t, "docker_mem", memfields, tags) // test docker_cpu measurement cputags := copyTags(tags) cputags["cpu"] = "cpu-total" - cpufields := sample_cpu_fields() + cpufields := map[string]interface{}{ + "usage_total": uint64(500), + "usage_in_usermode": uint64(100), + "usage_in_kernelmode": uint64(200), + "usage_system": uint64(100), + "throttling_periods": uint64(1), + "throttling_throttled_periods": uint64(0), + "throttling_throttled_time": uint64(0), + "usage_percent": float64(400.0), + } acc.AssertContainsTaggedFields(t, "docker_cpu", cpufields, cputags) cputags["cpu"] = "cpu0" @@ -67,30 +112,6 @@ func TestDockerGatherContainerStats(t *testing.T) { acc.AssertContainsTaggedFields(t, "docker_cpu", cpu1fields, cputags) } -func TestDockerGatherContainerPercentages(t *testing.T) { - var acc testutil.Accumulator - stats := testStats() - - tags := map[string]string{ - "cont_id": "foobarbaz", - "cont_name": "redis", - "cont_image": "redis/image", - } - gatherContainerStats(stats, &acc, tags, true) - - // test docker_mem measurement - memfields := sample_mem_fields() - memfields["usage_percent"] = 55.55 - acc.AssertContainsTaggedFields(t, "docker_mem", memfields, tags) - - // test docker_cpu measurement - cputags := copyTags(tags) - cputags["cpu"] = "cpu-total" - cpufields := sample_cpu_fields() - cpufields["usage_percent"] = 400.0 - acc.AssertContainsTaggedFields(t, "docker_cpu", cpufields, cputags) -} - func testStats() *docker.Stats { stats := &docker.Stats{ Read: time.Now(), @@ -173,58 +194,3 @@ func testStats() *docker.Stats { return stats } - -func sample_mem_fields() map[string]interface{} { - - memfields := map[string]interface{}{ - "max_usage": uint64(1001), - "usage": uint64(1111), - "fail_count": uint64(1), - "limit": uint64(2000), - "total_pgmafault": uint64(0), - "cache": uint64(0), - "mapped_file": uint64(0), - "total_inactive_file": uint64(0), - "pgpgout": uint64(0), - "rss": uint64(0), - "total_mapped_file": uint64(0), - "writeback": uint64(0), - "unevictable": uint64(0), - "pgpgin": uint64(0), - "total_unevictable": uint64(0), - "pgmajfault": uint64(0), - "total_rss": uint64(44), - "total_rss_huge": uint64(444), - "total_writeback": uint64(55), - "total_inactive_anon": uint64(0), - "rss_huge": uint64(0), - "hierarchical_memory_limit": uint64(0), - "total_pgfault": uint64(0), - "total_active_file": uint64(0), - "active_anon": uint64(0), - "total_active_anon": uint64(0), - "total_pgpgout": uint64(0), - "total_cache": uint64(0), - "inactive_anon": uint64(0), - "active_file": uint64(1), - "pgfault": uint64(2), - "inactive_file": uint64(3), - "total_pgpgin": uint64(4), - } - - return memfields -} - -func sample_cpu_fields() map[string]interface{} { - - cpufields := map[string]interface{}{ - "usage_total": uint64(500), - "usage_in_usermode": uint64(100), - "usage_in_kernelmode": uint64(200), - "usage_system": uint64(100), - "throttling_periods": uint64(1), - "throttling_throttled_periods": uint64(0), - "throttling_throttled_time": uint64(0), - } - return cpufields -} From 512d9822f0e81692c8d7c5ab6e61a301ad080e48 Mon Sep 17 00:00:00 2001 From: "Dragostin Yanev (netixen)" Date: Thu, 11 Feb 2016 01:28:52 +0200 Subject: [PATCH 003/287] Add NATS consumer input plugin. --- Godeps | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/nats_consumer/README.md | 38 ++++ plugins/inputs/nats_consumer/nats_consumer.go | 202 ++++++++++++++++++ .../nats_consumer/nats_consumer_test.go | 152 +++++++++++++ 5 files changed, 394 insertions(+) create mode 100644 plugins/inputs/nats_consumer/README.md create mode 100644 plugins/inputs/nats_consumer/nats_consumer.go create mode 100644 plugins/inputs/nats_consumer/nats_consumer_test.go diff --git a/Godeps b/Godeps index 5cdfecbe7..0b9a16727 100644 --- a/Godeps +++ b/Godeps @@ -28,6 +28,7 @@ github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3 github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504 github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9 +github.com/nats-io/nats 6a83f1a633cfbfd90aa648ac99fb38c06a8b40df github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f github.com/pborman/uuid dee7705ef7b324f27ceb85a121c61f2c2e8ce988 github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2 diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index e7329b042..794885129 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -22,6 +22,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/memcached" _ "github.com/influxdata/telegraf/plugins/inputs/mongodb" _ "github.com/influxdata/telegraf/plugins/inputs/mysql" + _ "github.com/influxdata/telegraf/plugins/inputs/nats_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/nginx" _ "github.com/influxdata/telegraf/plugins/inputs/nsq" _ "github.com/influxdata/telegraf/plugins/inputs/passenger" diff --git a/plugins/inputs/nats_consumer/README.md b/plugins/inputs/nats_consumer/README.md new file mode 100644 index 000000000..f3b67c9d5 --- /dev/null +++ b/plugins/inputs/nats_consumer/README.md @@ -0,0 +1,38 @@ +# NATS Consumer + +The [NATS](http://www.nats.io/about/) consumer plugin reads from +specified NATS subjects and adds messages to InfluxDB. The plugin expects messages +in the [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md). +A [Queue Group](http://www.nats.io/documentation/concepts/nats-queueing/) +is used when subscribing to subjects so multiple instances of telegraf can read +from a NATS cluster in parallel. + +## Configuration +``` +# Read metrics from NATS subject(s) +[[inputs.nats_consumer]] + ### urls of NATS servers + servers = ["nats://localhost:4222"] + ### Use Transport Layer Security + secure = false + ### subject(s) to consume + subjects = ["telegraf"] + ### name a queue group + queue_group = "telegraf_consumers" + ### Maximum number of points to buffer between collection intervals + point_buffer = 100000 + + ### Data format to consume. This can be "json", "influx" or "graphite" + ### Each data format has it's own unique set of configuration options, read + ### more about them here: + ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md + data_format = "influx" +``` + +## Testing + +To run tests: + +``` +go test +``` \ No newline at end of file diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go new file mode 100644 index 000000000..4b25fa0a1 --- /dev/null +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -0,0 +1,202 @@ +package natsconsumer + +import ( + "fmt" + "log" + "sync" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/nats-io/nats" +) + +type natsError struct { + conn *nats.Conn + sub *nats.Subscription + err error +} + +func (e natsError) Error() string { + return fmt.Sprintf("%s url:%s id:%s sub:%s queue:%s", + e.err.Error(), e.conn.ConnectedUrl(), e.conn.ConnectedServerId(), e.sub.Subject, e.sub.Queue) +} + +type natsConsumer struct { + QueueGroup string + Subjects []string + Servers []string + Secure bool + + PointBuffer int + parser parsers.Parser + + sync.Mutex + Conn *nats.Conn + Subs []*nats.Subscription + + // channel for all incoming NATS messages + in chan *nats.Msg + // channel for all NATS read errors + errs chan error + // channel for all incoming parsed points + metricC chan telegraf.Metric + done chan struct{} +} + +var sampleConfig = ` + ### urls of NATS servers + servers = ["nats://localhost:4222"] + ### Use Transport Layer Security + secure = false + ### subject(s) to consume + subjects = ["telegraf"] + ### name a queue group + queue_group = "telegraf_consumers" + ### Maximum number of points to buffer between collection intervals + point_buffer = 100000 + + ### Data format to consume. This can be "json", "influx" or "graphite" + ### Each data format has it's own unique set of configuration options, read + ### more about them here: + ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md + data_format = "influx" +` + +func (n *natsConsumer) SampleConfig() string { + return sampleConfig +} + +func (n *natsConsumer) Description() string { + return "Read metrics from NATS subject(s)" +} + +func (n *natsConsumer) SetParser(parser parsers.Parser) { + n.parser = parser +} + +func (n *natsConsumer) natsErrHandler(c *nats.Conn, s *nats.Subscription, e error) { + select { + case n.errs <- natsError{conn: c, sub: s, err: e}: + default: + return + } +} + +// Start the nats consumer. Caller must call *natsConsumer.Stop() to clean up. +func (n *natsConsumer) Start() error { + n.Lock() + defer n.Unlock() + + var connectErr error + + opts := nats.DefaultOptions + opts.Servers = n.Servers + opts.Secure = n.Secure + + if n.Conn == nil || n.Conn.IsClosed() { + n.Conn, connectErr = opts.Connect() + if connectErr != nil { + return connectErr + } + + // Setup message and error channels + n.errs = make(chan error) + n.Conn.SetErrorHandler(n.natsErrHandler) + + n.in = make(chan *nats.Msg) + for _, subj := range n.Subjects { + sub, err := n.Conn.ChanQueueSubscribe(subj, n.QueueGroup, n.in) + if err != nil { + return err + } + n.Subs = append(n.Subs, sub) + } + } + + n.done = make(chan struct{}) + if n.PointBuffer == 0 { + n.PointBuffer = 100000 + } + + n.metricC = make(chan telegraf.Metric, n.PointBuffer) + + // Start the message reader + go n.receiver() + log.Printf("Started the NATS consumer service, nats: %v, subjects: %v, queue: %v\n", + n.Conn.ConnectedUrl(), n.Subjects, n.QueueGroup) + + return nil +} + +// receiver() reads all incoming messages from NATS, and parses them into +// influxdb metric points. +func (n *natsConsumer) receiver() { + defer n.clean() + for { + select { + case <-n.done: + return + case err := <-n.errs: + log.Printf("error reading from %s\n", err.Error()) + case msg := <-n.in: + metrics, err := n.parser.Parse(msg.Data) + if err != nil { + log.Printf("subject: %s, error: %s", msg.Subject, err.Error()) + } + + for _, metric := range metrics { + select { + case n.metricC <- metric: + continue + default: + log.Printf("NATS Consumer buffer is full, dropping a metric." + + " You may want to increase the point_buffer setting") + } + } + + } + } +} + +func (n *natsConsumer) clean() { + n.Lock() + defer n.Unlock() + close(n.in) + close(n.metricC) + close(n.errs) + + for _, sub := range n.Subs { + if err := sub.Unsubscribe(); err != nil { + log.Printf("Error unsubscribing from subject %s in queue %s: %s\n", + sub.Subject, sub.Queue, err.Error()) + } + } + + if n.Conn != nil && !n.Conn.IsClosed() { + n.Conn.Close() + } +} + +func (n *natsConsumer) Stop() { + n.Lock() + close(n.done) + n.Unlock() +} + +func (n *natsConsumer) Gather(acc telegraf.Accumulator) error { + n.Lock() + defer n.Unlock() + npoints := len(n.metricC) + for i := 0; i < npoints; i++ { + point := <-n.metricC + acc.AddFields(point.Name(), point.Fields(), point.Tags(), point.Time()) + } + return nil +} + +func init() { + inputs.Add("nats_consumer", func() telegraf.Input { + return &natsConsumer{} + }) +} diff --git a/plugins/inputs/nats_consumer/nats_consumer_test.go b/plugins/inputs/nats_consumer/nats_consumer_test.go new file mode 100644 index 000000000..50c663cb4 --- /dev/null +++ b/plugins/inputs/nats_consumer/nats_consumer_test.go @@ -0,0 +1,152 @@ +package natsconsumer + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" + "github.com/nats-io/nats" +) + +const ( + testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257" + testMsgGraphite = "cpu.load.short.graphite 23422 1454780029" + testMsgJSON = "{\"a\": 5, \"b\": {\"c\": 6}}\n" + invalidMsg = "cpu_load_short,host=server01 1422568543702900257" + pointBuffer = 5 +) + +func newTestNatsConsumer() (*natsConsumer, chan *nats.Msg) { + in := make(chan *nats.Msg, pointBuffer) + n := &natsConsumer{ + QueueGroup: "test", + Subjects: []string{"telegraf"}, + Servers: []string{"nats://localhost:4222"}, + Secure: false, + PointBuffer: pointBuffer, + in: in, + errs: make(chan error, pointBuffer), + done: make(chan struct{}), + metricC: make(chan telegraf.Metric, pointBuffer), + } + return n, in +} + +// Test that the parser parses NATS messages into points +func TestRunParser(t *testing.T) { + n, in := newTestNatsConsumer() + defer close(n.done) + + n.parser, _ = parsers.NewInfluxParser() + go n.receiver() + in <- natsMsg(testMsg) + time.Sleep(time.Millisecond) + + if a := len(n.metricC); a != 1 { + t.Errorf("got %v, expected %v", a, 1) + } +} + +// Test that the parser ignores invalid messages +func TestRunParserInvalidMsg(t *testing.T) { + n, in := newTestNatsConsumer() + defer close(n.done) + + n.parser, _ = parsers.NewInfluxParser() + go n.receiver() + in <- natsMsg(invalidMsg) + time.Sleep(time.Millisecond) + + if a := len(n.metricC); a != 0 { + t.Errorf("got %v, expected %v", a, 0) + } +} + +// Test that points are dropped when we hit the buffer limit +func TestRunParserRespectsBuffer(t *testing.T) { + n, in := newTestNatsConsumer() + defer close(n.done) + + n.parser, _ = parsers.NewInfluxParser() + go n.receiver() + for i := 0; i < pointBuffer+1; i++ { + in <- natsMsg(testMsg) + } + time.Sleep(time.Millisecond) + + if a := len(n.metricC); a != pointBuffer { + t.Errorf("got %v, expected %v", a, pointBuffer) + } +} + +// Test that the parser parses nats messages into points +func TestRunParserAndGather(t *testing.T) { + n, in := newTestNatsConsumer() + defer close(n.done) + + n.parser, _ = parsers.NewInfluxParser() + go n.receiver() + in <- natsMsg(testMsg) + time.Sleep(time.Millisecond) + + acc := testutil.Accumulator{} + n.Gather(&acc) + + if a := len(acc.Metrics); a != 1 { + t.Errorf("got %v, expected %v", a, 1) + } + acc.AssertContainsFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(23422)}) +} + +// Test that the parser parses nats messages into points +func TestRunParserAndGatherGraphite(t *testing.T) { + n, in := newTestNatsConsumer() + defer close(n.done) + + n.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) + go n.receiver() + in <- natsMsg(testMsgGraphite) + time.Sleep(time.Millisecond) + + acc := testutil.Accumulator{} + n.Gather(&acc) + + if a := len(acc.Metrics); a != 1 { + t.Errorf("got %v, expected %v", a, 1) + } + acc.AssertContainsFields(t, "cpu_load_short_graphite", + map[string]interface{}{"value": float64(23422)}) +} + +// Test that the parser parses nats messages into points +func TestRunParserAndGatherJSON(t *testing.T) { + n, in := newTestNatsConsumer() + defer close(n.done) + + n.parser, _ = parsers.NewJSONParser("nats_json_test", []string{}, nil) + go n.receiver() + in <- natsMsg(testMsgJSON) + time.Sleep(time.Millisecond) + + acc := testutil.Accumulator{} + n.Gather(&acc) + + if a := len(acc.Metrics); a != 1 { + t.Errorf("got %v, expected %v", a, 1) + } + acc.AssertContainsFields(t, "nats_json_test", + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }) +} + +func natsMsg(val string) *nats.Msg { + return &nats.Msg{ + Subject: "telegraf", + Data: []byte(val), + } +} From 6c353e8b8f118036094539ca8c629bf18e750579 Mon Sep 17 00:00:00 2001 From: "Dragostin Yanev (netixen)" Date: Fri, 12 Feb 2016 12:05:33 +0200 Subject: [PATCH 004/287] Change point_buffer to metric_buffer to conform will changes in https://github.com/influxdata/telegraf/pull/676 closes #680 --- CHANGELOG.md | 5 ++- README.md | 1 + plugins/inputs/nats_consumer/README.md | 4 +- plugins/inputs/nats_consumer/nats_consumer.go | 28 +++++++------- .../nats_consumer/nats_consumer_test.go | 38 +++++++++---------- 5 files changed, 39 insertions(+), 37 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ee69938be..535e0d067 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,9 +8,10 @@ format that they would like to parse. Currently supports: "json", "influx", and [here](https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md) ### Features -- [#652](https://github.com/influxdata/telegraf/pull/652): CouchDB Input Plugin +- [#652](https://github.com/influxdata/telegraf/pull/652): CouchDB Input Plugin. Thanks @codehate! - [#655](https://github.com/influxdata/telegraf/pull/655): Support parsing arbitrary data formats. Currently limited to kafka_consumer and exec inputs. -- [#671](https://github.com/influxdata/telegraf/pull/671): Dovecot input plugin. +- [#671](https://github.com/influxdata/telegraf/pull/671): Dovecot input plugin. Thanks @mikif70! +- [#680](https://github.com/influxdata/telegraf/pull/680): NATS consumer input plugin. Thanks @netixen! ### Bugfixes - [#443](https://github.com/influxdata/telegraf/issues/443): Fix Ping command timeout parameter on Linux. diff --git a/README.md b/README.md index 6109e0841..ac6d68f0a 100644 --- a/README.md +++ b/README.md @@ -204,6 +204,7 @@ Telegraf can also collect metrics via the following service plugins: * statsd * kafka_consumer +* nats_consumer * github_webhooks We'll be adding support for many more over the coming months. Read on if you diff --git a/plugins/inputs/nats_consumer/README.md b/plugins/inputs/nats_consumer/README.md index f3b67c9d5..b2d027039 100644 --- a/plugins/inputs/nats_consumer/README.md +++ b/plugins/inputs/nats_consumer/README.md @@ -19,8 +19,8 @@ from a NATS cluster in parallel. subjects = ["telegraf"] ### name a queue group queue_group = "telegraf_consumers" - ### Maximum number of points to buffer between collection intervals - point_buffer = 100000 + ### Maximum number of metrics to buffer between collection intervals + metric_buffer = 100000 ### Data format to consume. This can be "json", "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index 4b25fa0a1..56d56990f 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -28,8 +28,8 @@ type natsConsumer struct { Servers []string Secure bool - PointBuffer int - parser parsers.Parser + MetricBuffer int + parser parsers.Parser sync.Mutex Conn *nats.Conn @@ -39,7 +39,7 @@ type natsConsumer struct { in chan *nats.Msg // channel for all NATS read errors errs chan error - // channel for all incoming parsed points + // channel for all incoming parsed metrics metricC chan telegraf.Metric done chan struct{} } @@ -53,8 +53,8 @@ var sampleConfig = ` subjects = ["telegraf"] ### name a queue group queue_group = "telegraf_consumers" - ### Maximum number of points to buffer between collection intervals - point_buffer = 100000 + ### Maximum number of metrics to buffer between collection intervals + metric_buffer = 100000 ### Data format to consume. This can be "json", "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read @@ -115,11 +115,11 @@ func (n *natsConsumer) Start() error { } n.done = make(chan struct{}) - if n.PointBuffer == 0 { - n.PointBuffer = 100000 + if n.MetricBuffer == 0 { + n.MetricBuffer = 100000 } - n.metricC = make(chan telegraf.Metric, n.PointBuffer) + n.metricC = make(chan telegraf.Metric, n.MetricBuffer) // Start the message reader go n.receiver() @@ -130,7 +130,7 @@ func (n *natsConsumer) Start() error { } // receiver() reads all incoming messages from NATS, and parses them into -// influxdb metric points. +// telegraf metrics. func (n *natsConsumer) receiver() { defer n.clean() for { @@ -151,7 +151,7 @@ func (n *natsConsumer) receiver() { continue default: log.Printf("NATS Consumer buffer is full, dropping a metric." + - " You may want to increase the point_buffer setting") + " You may want to increase the metric_buffer setting") } } @@ -187,10 +187,10 @@ func (n *natsConsumer) Stop() { func (n *natsConsumer) Gather(acc telegraf.Accumulator) error { n.Lock() defer n.Unlock() - npoints := len(n.metricC) - for i := 0; i < npoints; i++ { - point := <-n.metricC - acc.AddFields(point.Name(), point.Fields(), point.Tags(), point.Time()) + nmetrics := len(n.metricC) + for i := 0; i < nmetrics; i++ { + metric := <-n.metricC + acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time()) } return nil } diff --git a/plugins/inputs/nats_consumer/nats_consumer_test.go b/plugins/inputs/nats_consumer/nats_consumer_test.go index 50c663cb4..214695d91 100644 --- a/plugins/inputs/nats_consumer/nats_consumer_test.go +++ b/plugins/inputs/nats_consumer/nats_consumer_test.go @@ -15,26 +15,26 @@ const ( testMsgGraphite = "cpu.load.short.graphite 23422 1454780029" testMsgJSON = "{\"a\": 5, \"b\": {\"c\": 6}}\n" invalidMsg = "cpu_load_short,host=server01 1422568543702900257" - pointBuffer = 5 + metricBuffer = 5 ) func newTestNatsConsumer() (*natsConsumer, chan *nats.Msg) { - in := make(chan *nats.Msg, pointBuffer) + in := make(chan *nats.Msg, metricBuffer) n := &natsConsumer{ - QueueGroup: "test", - Subjects: []string{"telegraf"}, - Servers: []string{"nats://localhost:4222"}, - Secure: false, - PointBuffer: pointBuffer, - in: in, - errs: make(chan error, pointBuffer), - done: make(chan struct{}), - metricC: make(chan telegraf.Metric, pointBuffer), + QueueGroup: "test", + Subjects: []string{"telegraf"}, + Servers: []string{"nats://localhost:4222"}, + Secure: false, + MetricBuffer: metricBuffer, + in: in, + errs: make(chan error, metricBuffer), + done: make(chan struct{}), + metricC: make(chan telegraf.Metric, metricBuffer), } return n, in } -// Test that the parser parses NATS messages into points +// Test that the parser parses NATS messages into metrics func TestRunParser(t *testing.T) { n, in := newTestNatsConsumer() defer close(n.done) @@ -64,24 +64,24 @@ func TestRunParserInvalidMsg(t *testing.T) { } } -// Test that points are dropped when we hit the buffer limit +// Test that metrics are dropped when we hit the buffer limit func TestRunParserRespectsBuffer(t *testing.T) { n, in := newTestNatsConsumer() defer close(n.done) n.parser, _ = parsers.NewInfluxParser() go n.receiver() - for i := 0; i < pointBuffer+1; i++ { + for i := 0; i < metricBuffer+1; i++ { in <- natsMsg(testMsg) } time.Sleep(time.Millisecond) - if a := len(n.metricC); a != pointBuffer { - t.Errorf("got %v, expected %v", a, pointBuffer) + if a := len(n.metricC); a != metricBuffer { + t.Errorf("got %v, expected %v", a, metricBuffer) } } -// Test that the parser parses nats messages into points +// Test that the parser parses line format messages into metrics func TestRunParserAndGather(t *testing.T) { n, in := newTestNatsConsumer() defer close(n.done) @@ -101,7 +101,7 @@ func TestRunParserAndGather(t *testing.T) { map[string]interface{}{"value": float64(23422)}) } -// Test that the parser parses nats messages into points +// Test that the parser parses graphite format messages into metrics func TestRunParserAndGatherGraphite(t *testing.T) { n, in := newTestNatsConsumer() defer close(n.done) @@ -121,7 +121,7 @@ func TestRunParserAndGatherGraphite(t *testing.T) { map[string]interface{}{"value": float64(23422)}) } -// Test that the parser parses nats messages into points +// Test that the parser parses json format messages into metrics func TestRunParserAndGatherJSON(t *testing.T) { n, in := newTestNatsConsumer() defer close(n.done) From 8d0f50a6fd10cea1af84ccfdc2beeb1a61dd19bd Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 9 Feb 2016 15:03:46 -0700 Subject: [PATCH 005/287] MQTT Consumer Input plugin --- CHANGELOG.md | 4 +- Godeps | 3 +- README.md | 1 + plugins/inputs/EXAMPLE_README.md | 2 +- plugins/inputs/all/all.go | 1 + plugins/inputs/kafka_consumer/README.md | 31 ++- .../inputs/kafka_consumer/kafka_consumer.go | 33 +-- plugins/inputs/mqtt_consumer/README.md | 48 ++++ plugins/inputs/mqtt_consumer/mqtt_consumer.go | 228 ++++++++++++++++++ .../mqtt_consumer/mqtt_consumer_test.go | 186 ++++++++++++++ plugins/inputs/nats_consumer/README.md | 19 +- plugins/outputs/mqtt/mqtt.go | 11 +- plugins/parsers/graphite/parser.go | 18 +- plugins/parsers/influx/parser.go | 4 + plugins/parsers/json/parser.go | 4 + plugins/parsers/registry.go | 5 + 16 files changed, 554 insertions(+), 44 deletions(-) create mode 100644 plugins/inputs/mqtt_consumer/README.md create mode 100644 plugins/inputs/mqtt_consumer/mqtt_consumer.go create mode 100644 plugins/inputs/mqtt_consumer/mqtt_consumer_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 535e0d067..eae478e24 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,8 @@ ## v0.10.3 [unreleased] ### Release Notes -- Users of the `exec` and `kafka_consumer` can now specify the incoming data +- Users of the `exec` and `kafka_consumer` (and the new `nats_consumer` +and `mqtt_consumer` plugins) can now specify the incoming data format that they would like to parse. Currently supports: "json", "influx", and "graphite" - More info on parsing arbitrary data formats can be found @@ -12,6 +13,7 @@ format that they would like to parse. Currently supports: "json", "influx", and - [#655](https://github.com/influxdata/telegraf/pull/655): Support parsing arbitrary data formats. Currently limited to kafka_consumer and exec inputs. - [#671](https://github.com/influxdata/telegraf/pull/671): Dovecot input plugin. Thanks @mikif70! - [#680](https://github.com/influxdata/telegraf/pull/680): NATS consumer input plugin. Thanks @netixen! +- [#676](https://github.com/influxdata/telegraf/pull/676): MQTT consumer input plugin. ### Bugfixes - [#443](https://github.com/influxdata/telegraf/issues/443): Fix Ping command timeout parameter on Linux. diff --git a/Godeps b/Godeps index 0b9a16727..005aee939 100644 --- a/Godeps +++ b/Godeps @@ -1,4 +1,4 @@ -git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git dbd8d5c40a582eb9adacde36b47932b3a3ad0034 +git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git 617c801af238c3af2d9e72c5d4a0f02edad03ce5 github.com/Shopify/sarama d37c73f2b2bce85f7fa16b6a550d26c5372892ef github.com/Sirupsen/logrus f7f79f729e0fbe2fcc061db48a9ba0263f588252 github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339 @@ -30,7 +30,6 @@ github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9 github.com/nats-io/nats 6a83f1a633cfbfd90aa648ac99fb38c06a8b40df github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f -github.com/pborman/uuid dee7705ef7b324f27ceb85a121c61f2c2e8ce988 github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2 github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 diff --git a/README.md b/README.md index ac6d68f0a..21a3445ea 100644 --- a/README.md +++ b/README.md @@ -203,6 +203,7 @@ Currently implemented sources: Telegraf can also collect metrics via the following service plugins: * statsd +* mqtt_consumer * kafka_consumer * nats_consumer * github_webhooks diff --git a/plugins/inputs/EXAMPLE_README.md b/plugins/inputs/EXAMPLE_README.md index 16aaac8ef..9207cd2ab 100644 --- a/plugins/inputs/EXAMPLE_README.md +++ b/plugins/inputs/EXAMPLE_README.md @@ -4,7 +4,7 @@ The example plugin gathers metrics about example things ### Configuration: -``` +```toml # Description [[inputs.example]] # SampleConfig diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 794885129..335d41a32 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -21,6 +21,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/mailchimp" _ "github.com/influxdata/telegraf/plugins/inputs/memcached" _ "github.com/influxdata/telegraf/plugins/inputs/mongodb" + _ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/mysql" _ "github.com/influxdata/telegraf/plugins/inputs/nats_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/nginx" diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index f0d15356f..4fdda0c3a 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -1,4 +1,4 @@ -# Kafka Consumer +# Kafka Consumer Input Plugin The [Kafka](http://kafka.apache.org/) consumer plugin polls a specified Kafka topic and adds messages to InfluxDB. The plugin assumes messages follow the @@ -6,6 +6,29 @@ line protocol. [Consumer Group](http://godoc.org/github.com/wvanbergen/kafka/con is used to talk to the Kafka cluster so multiple instances of telegraf can read from the same topic in parallel. +## Configuration + +```toml +# Read metrics from Kafka topic(s) +[[inputs.kafka_consumer]] + ### topic(s) to consume + topics = ["telegraf"] + ### an array of Zookeeper connection strings + zookeeper_peers = ["localhost:2181"] + ### the name of the consumer group + consumer_group = "telegraf_metrics_consumers" + ### Maximum number of metrics to buffer between collection intervals + metric_buffer = 100000 + ### Offset (must be either "oldest" or "newest") + offset = "oldest" + + ### Data format to consume. This can be "json", "influx" or "graphite" + ### Each data format has it's own unique set of configuration options, read + ### more about them here: + ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md + data_format = "influx" +``` + ## Testing Running integration tests requires running Zookeeper & Kafka. The following @@ -16,9 +39,3 @@ To start Kafka & Zookeeper: ``` docker run -d -p 2181:2181 -p 9092:9092 --env ADVERTISED_HOST=`boot2docker ip || docker-machine ip ` --env ADVERTISED_PORT=9092 spotify/kafka ``` - -To run tests: - -``` -go test -``` diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 20ce8ef23..9fa47dee9 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -19,8 +19,10 @@ type Kafka struct { Topics []string ZookeeperPeers []string Consumer *consumergroup.ConsumerGroup - PointBuffer int - Offset string + MetricBuffer int + // TODO remove PointBuffer, legacy support + PointBuffer int + Offset string parser parsers.Parser @@ -30,7 +32,7 @@ type Kafka struct { in <-chan *sarama.ConsumerMessage // channel for all kafka consumer errors errs <-chan *sarama.ConsumerError - // channel for all incoming parsed kafka points + // channel for all incoming parsed kafka metrics metricC chan telegraf.Metric done chan struct{} @@ -46,8 +48,8 @@ var sampleConfig = ` zookeeper_peers = ["localhost:2181"] ### the name of the consumer group consumer_group = "telegraf_metrics_consumers" - ### Maximum number of points to buffer between collection intervals - point_buffer = 100000 + ### Maximum number of metrics to buffer between collection intervals + metric_buffer = 100000 ### Offset (must be either "oldest" or "newest") offset = "oldest" @@ -104,10 +106,13 @@ func (k *Kafka) Start() error { } k.done = make(chan struct{}) - if k.PointBuffer == 0 { - k.PointBuffer = 100000 + if k.PointBuffer == 0 && k.MetricBuffer == 0 { + k.MetricBuffer = 100000 + } else if k.PointBuffer > 0 { + // Legacy support of PointBuffer field TODO remove + k.MetricBuffer = k.PointBuffer } - k.metricC = make(chan telegraf.Metric, k.PointBuffer) + k.metricC = make(chan telegraf.Metric, k.MetricBuffer) // Start the kafka message reader go k.receiver() @@ -128,7 +133,7 @@ func (k *Kafka) receiver() { case msg := <-k.in: metrics, err := k.parser.Parse(msg.Value) if err != nil { - log.Printf("Could not parse kafka message: %s, error: %s", + log.Printf("KAFKA PARSE ERROR\nmessage: %s\nerror: %s", string(msg.Value), err.Error()) } @@ -139,7 +144,7 @@ func (k *Kafka) receiver() { continue default: log.Printf("Kafka Consumer buffer is full, dropping a metric." + - " You may want to increase the point_buffer setting") + " You may want to increase the metric_buffer setting") } } @@ -166,10 +171,10 @@ func (k *Kafka) Stop() { func (k *Kafka) Gather(acc telegraf.Accumulator) error { k.Lock() defer k.Unlock() - npoints := len(k.metricC) - for i := 0; i < npoints; i++ { - point := <-k.metricC - acc.AddFields(point.Name(), point.Fields(), point.Tags(), point.Time()) + nmetrics := len(k.metricC) + for i := 0; i < nmetrics; i++ { + metric := <-k.metricC + acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time()) } return nil } diff --git a/plugins/inputs/mqtt_consumer/README.md b/plugins/inputs/mqtt_consumer/README.md new file mode 100644 index 000000000..6f7fa911c --- /dev/null +++ b/plugins/inputs/mqtt_consumer/README.md @@ -0,0 +1,48 @@ +# MQTT Consumer Input Plugin + +The [MQTT](http://mqtt.org/) consumer plugin reads from +specified MQTT topics and adds messages to InfluxDB. +The plugin expects messages in the +[Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md). + +### Configuration: + +```toml +# Read metrics from MQTT topic(s) +[[inputs.mqtt_consumer]] + servers = ["localhost:1883"] + ### MQTT QoS, must be 0, 1, or 2 + qos = 0 + + ### Topics to subscribe to + topics = [ + "telegraf/host01/cpu", + "telegraf/+/mem", + "sensors/#", + ] + + ### Maximum number of metrics to buffer between collection intervals + metric_buffer = 100000 + + ### username and password to connect MQTT server. + # username = "telegraf" + # password = "metricsmetricsmetricsmetrics" + + ### Optional SSL Config + # ssl_ca = "/etc/telegraf/ca.pem" + # ssl_cert = "/etc/telegraf/cert.pem" + # ssl_key = "/etc/telegraf/key.pem" + ### Use SSL but skip chain & host verification + # insecure_skip_verify = false + + ### Data format to consume. This can be "json", "influx" or "graphite" + ### Each data format has it's own unique set of configuration options, read + ### more about them here: + ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md + data_format = "influx" +``` + +### Tags: + +- All measurements are tagged with the incoming topic, ie +`topic=telegraf/host01/cpu` diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go new file mode 100644 index 000000000..8ca0d44b1 --- /dev/null +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -0,0 +1,228 @@ +package mqtt_consumer + +import ( + "fmt" + "log" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" + + "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" +) + +type MQTTConsumer struct { + Servers []string + Topics []string + Username string + Password string + MetricBuffer int + QoS int `toml:"qos"` + + parser parsers.Parser + + // Path to CA file + SSLCA string `toml:"ssl_ca"` + // Path to host cert file + SSLCert string `toml:"ssl_cert"` + // Path to cert key file + SSLKey string `toml:"ssl_key"` + // Use SSL but skip chain & host verification + InsecureSkipVerify bool + + sync.Mutex + client *mqtt.Client + // channel for all incoming parsed mqtt metrics + metricC chan telegraf.Metric + // channel for the topics of all incoming metrics (for tagging metrics) + topicC chan string + // channel of all incoming raw mqtt messages + in chan mqtt.Message + done chan struct{} +} + +var sampleConfig = ` + servers = ["localhost:1883"] + ### MQTT QoS, must be 0, 1, or 2 + qos = 0 + + ### Topics to subscribe to + topics = [ + "telegraf/host01/cpu", + "telegraf/+/mem", + "sensors/#", + ] + + ### Maximum number of metrics to buffer between collection intervals + metric_buffer = 100000 + + ### username and password to connect MQTT server. + # username = "telegraf" + # password = "metricsmetricsmetricsmetrics" + + ### Optional SSL Config + # ssl_ca = "/etc/telegraf/ca.pem" + # ssl_cert = "/etc/telegraf/cert.pem" + # ssl_key = "/etc/telegraf/key.pem" + ### Use SSL but skip chain & host verification + # insecure_skip_verify = false + + ### Data format to consume. This can be "json", "influx" or "graphite" + ### Each data format has it's own unique set of configuration options, read + ### more about them here: + ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md + data_format = "influx" +` + +func (m *MQTTConsumer) SampleConfig() string { + return sampleConfig +} + +func (m *MQTTConsumer) Description() string { + return "Read metrics from MQTT topic(s)" +} + +func (m *MQTTConsumer) SetParser(parser parsers.Parser) { + m.parser = parser +} + +func (m *MQTTConsumer) Start() error { + m.Lock() + defer m.Unlock() + if m.QoS > 2 || m.QoS < 0 { + return fmt.Errorf("MQTT Consumer, invalid QoS value: %d", m.QoS) + } + + opts, err := m.createOpts() + if err != nil { + return err + } + + m.client = mqtt.NewClient(opts) + if token := m.client.Connect(); token.Wait() && token.Error() != nil { + return token.Error() + } + + m.in = make(chan mqtt.Message, m.MetricBuffer) + m.done = make(chan struct{}) + if m.MetricBuffer == 0 { + m.MetricBuffer = 100000 + } + m.metricC = make(chan telegraf.Metric, m.MetricBuffer) + m.topicC = make(chan string, m.MetricBuffer) + + topics := make(map[string]byte) + for _, topic := range m.Topics { + topics[topic] = byte(m.QoS) + } + subscribeToken := m.client.SubscribeMultiple(topics, m.recvMessage) + subscribeToken.Wait() + if subscribeToken.Error() != nil { + return subscribeToken.Error() + } + + go m.receiver() + + return nil +} + +// receiver() reads all incoming messages from the consumer, and parses them into +// influxdb metric points. +func (m *MQTTConsumer) receiver() { + for { + select { + case <-m.done: + return + case msg := <-m.in: + topic := msg.Topic() + metrics, err := m.parser.Parse(msg.Payload()) + if err != nil { + log.Printf("MQTT PARSE ERROR\nmessage: %s\nerror: %s", + string(msg.Payload()), err.Error()) + } + + for _, metric := range metrics { + select { + case m.metricC <- metric: + m.topicC <- topic + default: + log.Printf("MQTT Consumer buffer is full, dropping a metric." + + " You may want to increase the metric_buffer setting") + } + } + } + } +} + +func (m *MQTTConsumer) recvMessage(_ *mqtt.Client, msg mqtt.Message) { + m.in <- msg +} + +func (m *MQTTConsumer) Stop() { + m.Lock() + defer m.Unlock() + close(m.done) + m.client.Disconnect(200) +} + +func (m *MQTTConsumer) Gather(acc telegraf.Accumulator) error { + m.Lock() + defer m.Unlock() + nmetrics := len(m.metricC) + for i := 0; i < nmetrics; i++ { + metric := <-m.metricC + topic := <-m.topicC + tags := metric.Tags() + tags["topic"] = topic + acc.AddFields(metric.Name(), metric.Fields(), tags, metric.Time()) + } + return nil +} + +func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { + opts := mqtt.NewClientOptions() + + opts.SetClientID("Telegraf-Consumer-" + internal.RandomString(5)) + + tlsCfg, err := internal.GetTLSConfig( + m.SSLCert, m.SSLKey, m.SSLCA, m.InsecureSkipVerify) + if err != nil { + return nil, err + } + + scheme := "tcp" + if tlsCfg != nil { + scheme = "ssl" + opts.SetTLSConfig(tlsCfg) + } + + user := m.Username + if user == "" { + opts.SetUsername(user) + } + password := m.Password + if password != "" { + opts.SetPassword(password) + } + + if len(m.Servers) == 0 { + return opts, fmt.Errorf("could not get host infomations") + } + for _, host := range m.Servers { + server := fmt.Sprintf("%s://%s", scheme, host) + + opts.AddBroker(server) + } + opts.SetAutoReconnect(true) + opts.SetKeepAlive(time.Second * 60) + return opts, nil +} + +func init() { + inputs.Add("mqtt_consumer", func() telegraf.Input { + return &MQTTConsumer{} + }) +} diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go new file mode 100644 index 000000000..be216dfbb --- /dev/null +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -0,0 +1,186 @@ +package mqtt_consumer + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" + + "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" +) + +const ( + testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257" + testMsgGraphite = "cpu.load.short.graphite 23422 1454780029" + testMsgJSON = "{\"a\": 5, \"b\": {\"c\": 6}}\n" + invalidMsg = "cpu_load_short,host=server01 1422568543702900257" + metricBuffer = 5 +) + +func newTestMQTTConsumer() (*MQTTConsumer, chan mqtt.Message) { + in := make(chan mqtt.Message, metricBuffer) + n := &MQTTConsumer{ + Topics: []string{"telegraf"}, + Servers: []string{"localhost:1883"}, + MetricBuffer: metricBuffer, + in: in, + done: make(chan struct{}), + metricC: make(chan telegraf.Metric, metricBuffer), + topicC: make(chan string, metricBuffer), + } + return n, in +} + +// Test that the parser parses NATS messages into metrics +func TestRunParser(t *testing.T) { + n, in := newTestMQTTConsumer() + defer close(n.done) + + n.parser, _ = parsers.NewInfluxParser() + go n.receiver() + in <- mqttMsg(testMsg) + time.Sleep(time.Millisecond) + + if a := len(n.metricC); a != 1 { + t.Errorf("got %v, expected %v", a, 1) + } +} + +// Test that the parser ignores invalid messages +func TestRunParserInvalidMsg(t *testing.T) { + n, in := newTestMQTTConsumer() + defer close(n.done) + + n.parser, _ = parsers.NewInfluxParser() + go n.receiver() + in <- mqttMsg(invalidMsg) + time.Sleep(time.Millisecond) + + if a := len(n.metricC); a != 0 { + t.Errorf("got %v, expected %v", a, 0) + } +} + +// Test that metrics are dropped when we hit the buffer limit +func TestRunParserRespectsBuffer(t *testing.T) { + n, in := newTestMQTTConsumer() + defer close(n.done) + + n.parser, _ = parsers.NewInfluxParser() + go n.receiver() + for i := 0; i < metricBuffer+1; i++ { + in <- mqttMsg(testMsg) + } + time.Sleep(time.Millisecond) + + if a := len(n.metricC); a != metricBuffer { + t.Errorf("got %v, expected %v", a, metricBuffer) + } +} + +// Test that the parser parses line format messages into metrics +func TestRunParserAndGather(t *testing.T) { + n, in := newTestMQTTConsumer() + defer close(n.done) + + n.parser, _ = parsers.NewInfluxParser() + go n.receiver() + in <- mqttMsg(testMsg) + time.Sleep(time.Millisecond) + + acc := testutil.Accumulator{} + n.Gather(&acc) + + if a := len(acc.Metrics); a != 1 { + t.Errorf("got %v, expected %v", a, 1) + } + acc.AssertContainsFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(23422)}) +} + +// Test that the parser parses graphite format messages into metrics +func TestRunParserAndGatherGraphite(t *testing.T) { + n, in := newTestMQTTConsumer() + defer close(n.done) + + n.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) + go n.receiver() + in <- mqttMsg(testMsgGraphite) + time.Sleep(time.Millisecond) + + acc := testutil.Accumulator{} + n.Gather(&acc) + + if a := len(acc.Metrics); a != 1 { + t.Errorf("got %v, expected %v", a, 1) + } + acc.AssertContainsFields(t, "cpu_load_short_graphite", + map[string]interface{}{"value": float64(23422)}) +} + +// Test that the parser parses json format messages into metrics +func TestRunParserAndGatherJSON(t *testing.T) { + n, in := newTestMQTTConsumer() + defer close(n.done) + + n.parser, _ = parsers.NewJSONParser("nats_json_test", []string{}, nil) + go n.receiver() + in <- mqttMsg(testMsgJSON) + time.Sleep(time.Millisecond) + + acc := testutil.Accumulator{} + n.Gather(&acc) + + if a := len(acc.Metrics); a != 1 { + t.Errorf("got %v, expected %v", a, 1) + } + acc.AssertContainsFields(t, "nats_json_test", + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }) +} + +func mqttMsg(val string) mqtt.Message { + return &message{ + topic: "telegraf/unit_test", + payload: []byte(val), + } +} + +// Take the message struct from the paho mqtt client library for returning +// a test message interface. +type message struct { + duplicate bool + qos byte + retained bool + topic string + messageID uint16 + payload []byte +} + +func (m *message) Duplicate() bool { + return m.duplicate +} + +func (m *message) Qos() byte { + return m.qos +} + +func (m *message) Retained() bool { + return m.retained +} + +func (m *message) Topic() string { + return m.topic +} + +func (m *message) MessageID() uint16 { + return m.messageID +} + +func (m *message) Payload() []byte { + return m.payload +} diff --git a/plugins/inputs/nats_consumer/README.md b/plugins/inputs/nats_consumer/README.md index b2d027039..31d13297e 100644 --- a/plugins/inputs/nats_consumer/README.md +++ b/plugins/inputs/nats_consumer/README.md @@ -1,14 +1,15 @@ -# NATS Consumer +# NATS Consumer Input Plugin -The [NATS](http://www.nats.io/about/) consumer plugin reads from +The [NATS](http://www.nats.io/about/) consumer plugin reads from specified NATS subjects and adds messages to InfluxDB. The plugin expects messages -in the [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md). +in the [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md). A [Queue Group](http://www.nats.io/documentation/concepts/nats-queueing/) is used when subscribing to subjects so multiple instances of telegraf can read from a NATS cluster in parallel. ## Configuration -``` + +```toml # Read metrics from NATS subject(s) [[inputs.nats_consumer]] ### urls of NATS servers @@ -21,18 +22,10 @@ from a NATS cluster in parallel. queue_group = "telegraf_consumers" ### Maximum number of metrics to buffer between collection intervals metric_buffer = 100000 - + ### Data format to consume. This can be "json", "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read ### more about them here: ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md data_format = "influx" ``` - -## Testing - -To run tests: - -``` -go test -``` \ No newline at end of file diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index 5d2694ff3..61f0ef557 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -11,9 +11,6 @@ import ( "github.com/influxdata/telegraf/plugins/outputs" ) -const MaxRetryCount = 3 -const ClientIdPrefix = "telegraf" - type MQTT struct { Servers []string `toml:"servers"` Username string @@ -21,6 +18,7 @@ type MQTT struct { Database string Timeout internal.Duration TopicPrefix string + QoS int `toml:"qos"` // Path to CA file SSLCA string `toml:"ssl_ca"` @@ -39,6 +37,8 @@ type MQTT struct { var sampleConfig = ` servers = ["localhost:1883"] # required. + ### MQTT QoS, must be 0, 1, or 2 + qos = 0 ### MQTT outputs send metrics to this topic format ### "///" @@ -61,6 +61,9 @@ func (m *MQTT) Connect() error { var err error m.Lock() defer m.Unlock() + if m.QoS > 2 || m.QoS < 0 { + return fmt.Errorf("MQTT Output, invalid QoS value: %d", m.QoS) + } m.opts, err = m.createOpts() if err != nil { @@ -124,7 +127,7 @@ func (m *MQTT) Write(metrics []telegraf.Metric) error { } func (m *MQTT) publish(topic, body string) error { - token := m.client.Publish(topic, 0, false, body) + token := m.client.Publish(topic, byte(m.QoS), false, body) token.Wait() if token.Error() != nil { return token.Error() diff --git a/plugins/parsers/graphite/parser.go b/plugins/parsers/graphite/parser.go index 74ccd81cb..5e8815064 100644 --- a/plugins/parsers/graphite/parser.go +++ b/plugins/parsers/graphite/parser.go @@ -29,6 +29,10 @@ type GraphiteParser struct { matcher *matcher } +func (p *GraphiteParser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} + func NewGraphiteParser( separator string, templates []string, @@ -104,13 +108,14 @@ func (p *GraphiteParser) Parse(buf []byte) ([]telegraf.Metric, error) { metrics := make([]telegraf.Metric, 0) + var errStr string buffer := bytes.NewBuffer(buf) reader := bufio.NewReader(buffer) for { // Read up to the next newline. buf, err := reader.ReadBytes('\n') if err == io.EOF { - return metrics, nil + break } if err != nil && err != io.EOF { return metrics, err @@ -118,10 +123,19 @@ func (p *GraphiteParser) Parse(buf []byte) ([]telegraf.Metric, error) { // Trim the buffer, even though there should be no padding line := strings.TrimSpace(string(buf)) - if metric, err := p.ParseLine(line); err == nil { + metric, err := p.ParseLine(line) + + if err == nil { metrics = append(metrics, metric) + } else { + errStr += err.Error() + "\n" } } + + if errStr != "" { + return metrics, fmt.Errorf(errStr) + } + return metrics, nil } // Parse performs Graphite parsing of a single line. diff --git a/plugins/parsers/influx/parser.go b/plugins/parsers/influx/parser.go index d5a1b8db5..345e60b2e 100644 --- a/plugins/parsers/influx/parser.go +++ b/plugins/parsers/influx/parser.go @@ -55,3 +55,7 @@ func (p *InfluxParser) ParseLine(line string) (telegraf.Metric, error) { return metrics[0], nil } + +func (p *InfluxParser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index d8aa93e01..e5172ac97 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -67,6 +67,10 @@ func (p *JSONParser) ParseLine(line string) (telegraf.Metric, error) { return metrics[0], nil } +func (p *JSONParser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} + type JSONFlattener struct { Fields map[string]interface{} } diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index 083d497e6..982b6bb80 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -28,6 +28,11 @@ type Parser interface { // ie, "cpu.usage.idle 90" // and parses it into a telegraf metric. ParseLine(line string) (telegraf.Metric, error) + + // SetDefaultTags tells the parser to add all of the given tags + // to each parsed metric. + // NOTE: do _not_ modify the map after you've passed it here!! + SetDefaultTags(tags map[string]string) } // Config is a struct that covers the data types needed for all parser types, From 72f5c9b62d9466c304d47834ff41c74274c5661b Mon Sep 17 00:00:00 2001 From: Thomas Menard Date: Wed, 10 Feb 2016 10:40:48 +0100 Subject: [PATCH 006/287] postgres plugin bgwriter stats Add pg_stat_bg_writer stats closes #683 --- CHANGELOG.md | 1 + plugins/inputs/postgresql/README.md | 3 +- plugins/inputs/postgresql/postgresql.go | 46 +++++++++++++++++--- plugins/inputs/postgresql/postgresql_test.go | 14 ++++-- 4 files changed, 53 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index eae478e24..99083cc45 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ format that they would like to parse. Currently supports: "json", "influx", and - [#671](https://github.com/influxdata/telegraf/pull/671): Dovecot input plugin. Thanks @mikif70! - [#680](https://github.com/influxdata/telegraf/pull/680): NATS consumer input plugin. Thanks @netixen! - [#676](https://github.com/influxdata/telegraf/pull/676): MQTT consumer input plugin. +- [#683](https://github.com/influxdata/telegraf/pull/683): PostGRES input plugin: add pg_stat_bgwriter. Thanks @menardorama! ### Bugfixes - [#443](https://github.com/influxdata/telegraf/issues/443): Fix Ping command timeout parameter on Linux. diff --git a/plugins/inputs/postgresql/README.md b/plugins/inputs/postgresql/README.md index ce0ae18d6..e5e9a8961 100644 --- a/plugins/inputs/postgresql/README.md +++ b/plugins/inputs/postgresql/README.md @@ -1,6 +1,6 @@ # PostgreSQL plugin -This postgresql plugin provides metrics for your postgres database. It currently works with postgres versions 8.1+. It uses data from the built in _pg_stat_database_ view. The metrics recorded depend on your version of postgres. See table: +This postgresql plugin provides metrics for your postgres database. It currently works with postgres versions 8.1+. It uses data from the built in _pg_stat_database_ and pg_stat_bgwriter views. The metrics recorded depend on your version of postgres. See table: ``` pg version 9.2+ 9.1 8.3-9.0 8.1-8.2 7.4-8.0(unsupported) --- --- --- ------- ------- ------- @@ -27,4 +27,5 @@ stats_reset* x x _* value ignored and therefore not recorded._ + More information about the meaning of these metrics can be found in the [PostgreSQL Documentation](http://www.postgresql.org/docs/9.2/static/monitoring-stats.html#PG-STAT-DATABASE-VIEW) diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index d64cc1099..660f1b318 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -4,6 +4,7 @@ import ( "bytes" "database/sql" "fmt" + "sort" "strings" "github.com/influxdata/telegraf" @@ -16,6 +17,7 @@ type Postgresql struct { Address string Databases []string OrderedColumns []string + AllColumns []string } var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true} @@ -86,6 +88,9 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { p.OrderedColumns, err = rows.Columns() if err != nil { return err + } else { + p.AllColumns = make([]string, len(p.OrderedColumns)) + copy(p.AllColumns, p.OrderedColumns) } for rows.Next() { @@ -94,8 +99,34 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { return err } } + //return rows.Err() + query = `SELECT * FROM pg_stat_bgwriter` - return rows.Err() + bg_writer_row, err := db.Query(query) + if err != nil { + return err + } + + defer bg_writer_row.Close() + + // grab the column information from the result + p.OrderedColumns, err = bg_writer_row.Columns() + if err != nil { + return err + } else { + for _, v := range p.OrderedColumns { + p.AllColumns = append(p.AllColumns, v) + } + } + + for bg_writer_row.Next() { + err = p.accRow(bg_writer_row, acc) + if err != nil { + return err + } + } + sort.Strings(p.AllColumns) + return bg_writer_row.Err() } type scanner interface { @@ -124,11 +155,14 @@ func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator) error { if err != nil { return err } - - // extract the database name from the column map - dbnameChars := (*columnMap["datname"]).([]uint8) - for i := 0; i < len(dbnameChars); i++ { - dbname.WriteString(string(dbnameChars[i])) + if columnMap["datname"] != nil { + // extract the database name from the column map + dbnameChars := (*columnMap["datname"]).([]uint8) + for i := 0; i < len(dbnameChars); i++ { + dbname.WriteString(string(dbnameChars[i])) + } + } else { + dbname.WriteString("postgres") } tags := map[string]string{"server": p.Address, "db": dbname.String()} diff --git a/plugins/inputs/postgresql/postgresql_test.go b/plugins/inputs/postgresql/postgresql_test.go index 3a2ccb1b0..552b18cdb 100644 --- a/plugins/inputs/postgresql/postgresql_test.go +++ b/plugins/inputs/postgresql/postgresql_test.go @@ -21,15 +21,13 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { } var acc testutil.Accumulator - err := p.Gather(&acc) require.NoError(t, err) availableColumns := make(map[string]bool) - for _, col := range p.OrderedColumns { + for _, col := range p.AllColumns { availableColumns[col] = true } - intMetrics := []string{ "xact_commit", "xact_rollback", @@ -45,6 +43,14 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { "temp_bytes", "deadlocks", "numbackends", + "buffers_alloc", + "buffers_backend", + "buffers_backend_fsync", + "buffers_checkpoint", + "buffers_clean", + "checkpoints_req", + "checkpoints_timed", + "maxwritten_clean", } floatMetrics := []string{ @@ -71,7 +77,7 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { } assert.True(t, metricsCounted > 0) - assert.Equal(t, len(availableColumns)-len(p.IgnoredColumns()), metricsCounted) + //assert.Equal(t, len(availableColumns)-len(p.IgnoredColumns()), metricsCounted) } func TestPostgresqlTagsMetricsWithDatabaseName(t *testing.T) { From a9c135488e6781dd2cd81260e6e3b255a28cbef6 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 10 Feb 2016 15:50:07 -0700 Subject: [PATCH 007/287] Add Serializer plugins, and 'file' output plugin --- Godeps | 3 +- internal/config/config.go | 43 +++++++ plugins/outputs/all/all.go | 1 + plugins/outputs/amqp/amqp.go | 32 ++++- plugins/outputs/amqp/amqp_test.go | 7 +- plugins/outputs/file/README.md | 1 + plugins/outputs/file/file.go | 109 ++++++++++++++++ plugins/outputs/file/file_test.go | 1 + plugins/outputs/graphite/graphite.go | 84 +++--------- plugins/outputs/graphite/graphite_test.go | 31 +---- plugins/outputs/kafka/kafka.go | 48 ++++--- plugins/outputs/kafka/kafka_test.go | 7 +- plugins/outputs/mqtt/mqtt.go | 72 +++++++---- plugins/outputs/mqtt/mqtt_test.go | 6 +- plugins/outputs/nsq/nsq.go | 36 +++++- plugins/outputs/nsq/nsq_test.go | 7 +- plugins/parsers/influx/parser.go | 2 +- plugins/serializers/graphite/graphite.go | 79 ++++++++++++ plugins/serializers/graphite/graphite_test.go | 121 ++++++++++++++++++ plugins/serializers/influx/influx.go | 12 ++ plugins/serializers/influx/influx_test.go | 68 ++++++++++ plugins/serializers/registry.go | 55 ++++++++ 22 files changed, 665 insertions(+), 160 deletions(-) create mode 100644 plugins/outputs/file/README.md create mode 100644 plugins/outputs/file/file.go create mode 100644 plugins/outputs/file/file_test.go create mode 100644 plugins/serializers/graphite/graphite.go create mode 100644 plugins/serializers/graphite/graphite_test.go create mode 100644 plugins/serializers/influx/influx.go create mode 100644 plugins/serializers/influx/influx_test.go create mode 100644 plugins/serializers/registry.go diff --git a/Godeps b/Godeps index 005aee939..d0d2194c6 100644 --- a/Godeps +++ b/Godeps @@ -19,8 +19,7 @@ github.com/gorilla/context 1c83b3eabd45b6d76072b66b746c20815fb2872d github.com/gorilla/mux 26a6070f849969ba72b72256e9f14cf519751690 github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478 github.com/influxdata/config bae7cb98197d842374d3b8403905924094930f24 -github.com/influxdata/influxdb a9552fdd91361819a792f337e5d9998859732a67 -github.com/influxdb/influxdb a9552fdd91361819a792f337e5d9998859732a67 +github.com/influxdata/influxdb ef571fc104dc24b77cd3710c156cd95e5cfd7aa5 github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264 github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38 github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f diff --git a/internal/config/config.go b/internal/config/config.go index 766ba1189..ffd4f632a 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -16,6 +16,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/config" "github.com/naoina/toml/ast" @@ -398,6 +399,17 @@ func (c *Config) addOutput(name string, table *ast.Table) error { } output := creator() + // If the output has a SetSerializer function, then this means it can write + // arbitrary types of output, so build the serializer and set it. + switch t := output.(type) { + case serializers.SerializerOutput: + serializer, err := buildSerializer(name, table) + if err != nil { + return err + } + t.SetSerializer(serializer) + } + outputConfig, err := buildOutput(name, table) if err != nil { return err @@ -660,6 +672,37 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { return parsers.NewParser(c) } +// buildSerializer grabs the necessary entries from the ast.Table for creating +// a serializers.Serializer object, and creates it, which can then be added onto +// an Output object. +func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error) { + c := &serializers.Config{} + + if node, ok := tbl.Fields["data_format"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.DataFormat = str.Value + } + } + } + + if c.DataFormat == "" { + c.DataFormat = "influx" + } + + if node, ok := tbl.Fields["prefix"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.Prefix = str.Value + } + } + } + + delete(tbl.Fields, "data_format") + delete(tbl.Fields, "prefix") + return serializers.NewSerializer(c) +} + // buildOutput parses output specific items from the ast.Table, builds the filter and returns an // internal_models.OutputConfig to be inserted into internal_models.RunningInput // Note: error exists in the return for future calls that might require error diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index ac8357c90..18fb1c925 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -5,6 +5,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/amqp" _ "github.com/influxdata/telegraf/plugins/outputs/cloudwatch" _ "github.com/influxdata/telegraf/plugins/outputs/datadog" + _ "github.com/influxdata/telegraf/plugins/outputs/file" _ "github.com/influxdata/telegraf/plugins/outputs/graphite" _ "github.com/influxdata/telegraf/plugins/outputs/influxdb" _ "github.com/influxdata/telegraf/plugins/outputs/kafka" diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index 19d95f512..d826e6d52 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -10,6 +10,8 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers" + "github.com/streadway/amqp" ) @@ -39,6 +41,8 @@ type AMQP struct { channel *amqp.Channel sync.Mutex headers amqp.Table + + serializer serializers.Serializer } const ( @@ -69,8 +73,18 @@ var sampleConfig = ` # ssl_key = "/etc/telegraf/key.pem" ### Use SSL but skip chain & host verification # insecure_skip_verify = false + + ### Data format to output. This can be "influx" or "graphite" + ### Each data format has it's own unique set of configuration options, read + ### more about them here: + ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md + data_format = "influx" ` +func (a *AMQP) SetSerializer(serializer serializers.Serializer) { + a.serializer = serializer +} + func (q *AMQP) Connect() error { q.Lock() defer q.Unlock() @@ -147,18 +161,24 @@ func (q *AMQP) Write(metrics []telegraf.Metric) error { } var outbuf = make(map[string][][]byte) - for _, p := range metrics { - var value, key string - value = p.String() - + for _, metric := range metrics { + var key string if q.RoutingTag != "" { - if h, ok := p.Tags()[q.RoutingTag]; ok { + if h, ok := metric.Tags()[q.RoutingTag]; ok { key = h } } - outbuf[key] = append(outbuf[key], []byte(value)) + values, err := q.serializer.Serialize(metric) + if err != nil { + return err + } + + for _, value := range values { + outbuf[key] = append(outbuf[key], []byte(value)) + } } + for key, buf := range outbuf { err := q.channel.Publish( q.Exchange, // exchange diff --git a/plugins/outputs/amqp/amqp_test.go b/plugins/outputs/amqp/amqp_test.go index 4cecff02e..66a082627 100644 --- a/plugins/outputs/amqp/amqp_test.go +++ b/plugins/outputs/amqp/amqp_test.go @@ -3,6 +3,7 @@ package amqp import ( "testing" + "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -13,9 +14,11 @@ func TestConnectAndWrite(t *testing.T) { } var url = "amqp://" + testutil.GetLocalHost() + ":5672/" + s, _ := serializers.NewInfluxSerializer() q := &AMQP{ - URL: url, - Exchange: "telegraf_test", + URL: url, + Exchange: "telegraf_test", + serializer: s, } // Verify that we can connect to the AMQP broker diff --git a/plugins/outputs/file/README.md b/plugins/outputs/file/README.md new file mode 100644 index 000000000..6f3b7f513 --- /dev/null +++ b/plugins/outputs/file/README.md @@ -0,0 +1 @@ +# file Output Plugin diff --git a/plugins/outputs/file/file.go b/plugins/outputs/file/file.go new file mode 100644 index 000000000..deae8aaf8 --- /dev/null +++ b/plugins/outputs/file/file.go @@ -0,0 +1,109 @@ +package file + +import ( + "fmt" + "io" + "os" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers" +) + +type File struct { + Files []string + + writer io.Writer + closers []io.Closer + + serializer serializers.Serializer +} + +var sampleConfig = ` + ### Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ### Data format to output. This can be "influx" or "graphite" + ### Each data format has it's own unique set of configuration options, read + ### more about them here: + ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md + data_format = "influx" +` + +func (f *File) SetSerializer(serializer serializers.Serializer) { + f.serializer = serializer +} + +func (f *File) Connect() error { + writers := []io.Writer{} + for _, file := range f.Files { + if file == "stdout" { + writers = append(writers, os.Stdout) + f.closers = append(f.closers, os.Stdout) + } else { + var of *os.File + var err error + if _, err := os.Stat(file); os.IsNotExist(err) { + of, err = os.Create(file) + } else { + of, err = os.OpenFile(file, os.O_APPEND|os.O_WRONLY, os.ModeAppend) + } + + if err != nil { + return err + } + writers = append(writers, of) + f.closers = append(f.closers, of) + } + } + f.writer = io.MultiWriter(writers...) + return nil +} + +func (f *File) Close() error { + var errS string + for _, c := range f.closers { + if err := c.Close(); err != nil { + errS += err.Error() + "\n" + } + } + if errS != "" { + return fmt.Errorf(errS) + } + return nil +} + +func (f *File) SampleConfig() string { + return sampleConfig +} + +func (f *File) Description() string { + return "Send telegraf metrics to file(s)" +} + +func (f *File) Write(metrics []telegraf.Metric) error { + if len(metrics) == 0 { + return nil + } + + for _, metric := range metrics { + values, err := f.serializer.Serialize(metric) + if err != nil { + return err + } + + for _, value := range values { + _, err = f.writer.Write([]byte(value + "\n")) + if err != nil { + return fmt.Errorf("FAILED to write message: %s, %s", value, err) + } + } + } + return nil +} + +func init() { + outputs.Add("file", func() telegraf.Output { + return &File{} + }) +} diff --git a/plugins/outputs/file/file_test.go b/plugins/outputs/file/file_test.go new file mode 100644 index 000000000..b691ba57a --- /dev/null +++ b/plugins/outputs/file/file_test.go @@ -0,0 +1 @@ +package file diff --git a/plugins/outputs/graphite/graphite.go b/plugins/outputs/graphite/graphite.go index 7e4414ffc..29ac774f4 100644 --- a/plugins/outputs/graphite/graphite.go +++ b/plugins/outputs/graphite/graphite.go @@ -3,14 +3,15 @@ package graphite import ( "errors" "fmt" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/outputs" "log" "math/rand" "net" - "sort" "strings" "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers" ) type Graphite struct { @@ -71,42 +72,22 @@ func (g *Graphite) Description() string { func (g *Graphite) Write(metrics []telegraf.Metric) error { // Prepare data var bp []string - for _, metric := range metrics { - // Get name - name := metric.Name() - // Convert UnixNano to Unix timestamps - timestamp := metric.UnixNano() / 1000000000 - tag_str := buildTags(metric) - - for field_name, value := range metric.Fields() { - // Convert value - value_str := fmt.Sprintf("%#v", value) - // Write graphite metric - var graphitePoint string - if name == field_name { - graphitePoint = fmt.Sprintf("%s.%s %s %d\n", - tag_str, - strings.Replace(name, ".", "_", -1), - value_str, - timestamp) - } else { - graphitePoint = fmt.Sprintf("%s.%s.%s %s %d\n", - tag_str, - strings.Replace(name, ".", "_", -1), - strings.Replace(field_name, ".", "_", -1), - value_str, - timestamp) - } - if g.Prefix != "" { - graphitePoint = fmt.Sprintf("%s.%s", g.Prefix, graphitePoint) - } - bp = append(bp, graphitePoint) - } + s, err := serializers.NewGraphiteSerializer(g.Prefix) + if err != nil { + return err } - graphitePoints := strings.Join(bp, "") + + for _, metric := range metrics { + gMetrics, err := s.Serialize(metric) + if err != nil { + log.Printf("Error serializing some metrics to graphite: %s", err.Error()) + } + bp = append(bp, gMetrics...) + } + graphitePoints := strings.Join(bp, "\n") + "\n" // This will get set to nil if a successful write occurs - err := errors.New("Could not write to any Graphite server in cluster\n") + err = errors.New("Could not write to any Graphite server in cluster\n") // Send data to a random server p := rand.Perm(len(g.conns)) @@ -128,37 +109,6 @@ func (g *Graphite) Write(metrics []telegraf.Metric) error { return err } -func buildTags(metric telegraf.Metric) string { - var keys []string - tags := metric.Tags() - for k := range tags { - if k == "host" { - continue - } - keys = append(keys, k) - } - sort.Strings(keys) - - var tag_str string - if host, ok := tags["host"]; ok { - if len(keys) > 0 { - tag_str = strings.Replace(host, ".", "_", -1) + "." - } else { - tag_str = strings.Replace(host, ".", "_", -1) - } - } - - for i, k := range keys { - tag_value := strings.Replace(tags[k], ".", "_", -1) - if i == 0 { - tag_str += tag_value - } else { - tag_str += "." + tag_value - } - } - return tag_str -} - func init() { outputs.Add("graphite", func() telegraf.Output { return &Graphite{} diff --git a/plugins/outputs/graphite/graphite_test.go b/plugins/outputs/graphite/graphite_test.go index 2b62750e3..9d9476241 100644 --- a/plugins/outputs/graphite/graphite_test.go +++ b/plugins/outputs/graphite/graphite_test.go @@ -43,6 +43,8 @@ func TestGraphiteOK(t *testing.T) { // Start TCP server wg.Add(1) go TCPServer(t, &wg) + // Give the fake graphite TCP server some time to start: + time.Sleep(time.Millisecond * 100) // Init plugin g := Graphite{ @@ -95,32 +97,3 @@ func TCPServer(t *testing.T, wg *sync.WaitGroup) { assert.Equal(t, "my.prefix.192_168_0_1.my_measurement.value 3.14 1289430000", data3) conn.Close() } - -func TestGraphiteTags(t *testing.T) { - m1, _ := telegraf.NewMetric( - "mymeasurement", - map[string]string{"host": "192.168.0.1"}, - map[string]interface{}{"value": float64(3.14)}, - time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), - ) - m2, _ := telegraf.NewMetric( - "mymeasurement", - map[string]string{"host": "192.168.0.1", "afoo": "first", "bfoo": "second"}, - map[string]interface{}{"value": float64(3.14)}, - time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), - ) - m3, _ := telegraf.NewMetric( - "mymeasurement", - map[string]string{"afoo": "first", "bfoo": "second"}, - map[string]interface{}{"value": float64(3.14)}, - time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), - ) - - tags1 := buildTags(m1) - tags2 := buildTags(m2) - tags3 := buildTags(m3) - - assert.Equal(t, "192_168_0_1", tags1) - assert.Equal(t, "192_168_0_1.first.second", tags2) - assert.Equal(t, "first.second", tags3) -} diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index a1240dc28..71c2642dd 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -2,12 +2,12 @@ package kafka import ( "crypto/tls" - "errors" "fmt" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers" "github.com/Shopify/sarama" ) @@ -40,6 +40,8 @@ type Kafka struct { tlsConfig tls.Config producer sarama.SyncProducer + + serializer serializers.Serializer } var sampleConfig = ` @@ -57,8 +59,18 @@ var sampleConfig = ` # ssl_key = "/etc/telegraf/key.pem" ### Use SSL but skip chain & host verification # insecure_skip_verify = false + + ### Data format to output. This can be "influx" or "graphite" + ### Each data format has it's own unique set of configuration options, read + ### more about them here: + ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md + data_format = "influx" ` +func (k *Kafka) SetSerializer(serializer serializers.Serializer) { + k.serializer = serializer +} + func (k *Kafka) Connect() error { config := sarama.NewConfig() // Wait for all in-sync replicas to ack the message @@ -109,21 +121,27 @@ func (k *Kafka) Write(metrics []telegraf.Metric) error { return nil } - for _, p := range metrics { - value := p.String() - - m := &sarama.ProducerMessage{ - Topic: k.Topic, - Value: sarama.StringEncoder(value), - } - if h, ok := p.Tags()[k.RoutingTag]; ok { - m.Key = sarama.StringEncoder(h) - } - - _, _, err := k.producer.SendMessage(m) + for _, metric := range metrics { + values, err := k.serializer.Serialize(metric) if err != nil { - return errors.New(fmt.Sprintf("FAILED to send kafka message: %s\n", - err)) + return err + } + + var pubErr error + for _, value := range values { + m := &sarama.ProducerMessage{ + Topic: k.Topic, + Value: sarama.StringEncoder(value), + } + if h, ok := metric.Tags()[k.RoutingTag]; ok { + m.Key = sarama.StringEncoder(h) + } + + _, _, pubErr = k.producer.SendMessage(m) + } + + if pubErr != nil { + return fmt.Errorf("FAILED to send kafka message: %s\n", pubErr) } } return nil diff --git a/plugins/outputs/kafka/kafka_test.go b/plugins/outputs/kafka/kafka_test.go index 103f268cb..f99e0ecea 100644 --- a/plugins/outputs/kafka/kafka_test.go +++ b/plugins/outputs/kafka/kafka_test.go @@ -3,6 +3,7 @@ package kafka import ( "testing" + "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -13,9 +14,11 @@ func TestConnectAndWrite(t *testing.T) { } brokers := []string{testutil.GetLocalHost() + ":9092"} + s, _ := serializers.NewInfluxSerializer() k := &Kafka{ - Brokers: brokers, - Topic: "Test", + Brokers: brokers, + Topic: "Test", + serializer: s, } // Verify that we can connect to the Kafka broker diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index 61f0ef557..efa20944b 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -9,8 +9,35 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers" ) +var sampleConfig = ` + servers = ["localhost:1883"] # required. + + ### MQTT outputs send metrics to this topic format + ### "///" + ### ex: prefix/host/web01.example.com/mem + topic_prefix = "telegraf" + + ### username and password to connect MQTT server. + # username = "telegraf" + # password = "metricsmetricsmetricsmetrics" + + ### Optional SSL Config + # ssl_ca = "/etc/telegraf/ca.pem" + # ssl_cert = "/etc/telegraf/cert.pem" + # ssl_key = "/etc/telegraf/key.pem" + ### Use SSL but skip chain & host verification + # insecure_skip_verify = false + + ### Data format to output. This can be "influx" or "graphite" + ### Each data format has it's own unique set of configuration options, read + ### more about them here: + ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md + data_format = "influx" +` + type MQTT struct { Servers []string `toml:"servers"` Username string @@ -32,31 +59,11 @@ type MQTT struct { client *paho.Client opts *paho.ClientOptions + serializer serializers.Serializer + sync.Mutex } -var sampleConfig = ` - servers = ["localhost:1883"] # required. - ### MQTT QoS, must be 0, 1, or 2 - qos = 0 - - ### MQTT outputs send metrics to this topic format - ### "///" - ### ex: prefix/host/web01.example.com/mem - topic_prefix = "telegraf" - - ### username and password to connect MQTT server. - # username = "telegraf" - # password = "metricsmetricsmetricsmetrics" - - ### Optional SSL Config - # ssl_ca = "/etc/telegraf/ca.pem" - # ssl_cert = "/etc/telegraf/cert.pem" - # ssl_key = "/etc/telegraf/key.pem" - ### Use SSL but skip chain & host verification - # insecure_skip_verify = false -` - func (m *MQTT) Connect() error { var err error m.Lock() @@ -78,6 +85,10 @@ func (m *MQTT) Connect() error { return nil } +func (m *MQTT) SetSerializer(serializer serializers.Serializer) { + m.serializer = serializer +} + func (m *MQTT) Close() error { if m.client.IsConnected() { m.client.Disconnect(20) @@ -104,7 +115,7 @@ func (m *MQTT) Write(metrics []telegraf.Metric) error { hostname = "" } - for _, p := range metrics { + for _, metric := range metrics { var t []string if m.TopicPrefix != "" { t = append(t, m.TopicPrefix) @@ -113,13 +124,20 @@ func (m *MQTT) Write(metrics []telegraf.Metric) error { t = append(t, hostname) } - t = append(t, p.Name()) + t = append(t, metric.Name()) topic := strings.Join(t, "/") - value := p.String() - err := m.publish(topic, value) + values, err := m.serializer.Serialize(metric) if err != nil { - return fmt.Errorf("Could not write to MQTT server, %s", err) + return fmt.Errorf("MQTT Could not serialize metric: %s", + metric.String()) + } + + for _, value := range values { + err = m.publish(topic, value) + if err != nil { + return fmt.Errorf("Could not write to MQTT server, %s", err) + } } } diff --git a/plugins/outputs/mqtt/mqtt_test.go b/plugins/outputs/mqtt/mqtt_test.go index 25d0ab9e3..260eb0c64 100644 --- a/plugins/outputs/mqtt/mqtt_test.go +++ b/plugins/outputs/mqtt/mqtt_test.go @@ -3,7 +3,9 @@ package mqtt import ( "testing" + "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) @@ -13,8 +15,10 @@ func TestConnectAndWrite(t *testing.T) { } var url = testutil.GetLocalHost() + ":1883" + s, _ := serializers.NewInfluxSerializer() m := &MQTT{ - Servers: []string{url}, + Servers: []string{url}, + serializer: s, } // Verify that we can connect to the MQTT broker diff --git a/plugins/outputs/nsq/nsq.go b/plugins/outputs/nsq/nsq.go index ce84c77d5..7fe9b2068 100644 --- a/plugins/outputs/nsq/nsq.go +++ b/plugins/outputs/nsq/nsq.go @@ -2,15 +2,20 @@ package nsq import ( "fmt" + + "github.com/nsqio/go-nsq" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/nsqio/go-nsq" + "github.com/influxdata/telegraf/plugins/serializers" ) type NSQ struct { Server string Topic string producer *nsq.Producer + + serializer serializers.Serializer } var sampleConfig = ` @@ -18,8 +23,18 @@ var sampleConfig = ` server = "localhost:4150" ### NSQ topic for producer messages topic = "telegraf" + + ### Data format to output. This can be "influx" or "graphite" + ### Each data format has it's own unique set of configuration options, read + ### more about them here: + ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md + data_format = "influx" ` +func (n *NSQ) SetSerializer(serializer serializers.Serializer) { + n.serializer = serializer +} + func (n *NSQ) Connect() error { config := nsq.NewConfig() producer, err := nsq.NewProducer(n.Server, config) @@ -50,12 +65,21 @@ func (n *NSQ) Write(metrics []telegraf.Metric) error { return nil } - for _, p := range metrics { - value := p.String() - - err := n.producer.Publish(n.Topic, []byte(value)) - + for _, metric := range metrics { + values, err := n.serializer.Serialize(metric) if err != nil { + return err + } + + var pubErr error + for _, value := range values { + err = n.producer.Publish(n.Topic, []byte(value)) + if err != nil { + pubErr = err + } + } + + if pubErr != nil { return fmt.Errorf("FAILED to send NSQD message: %s", err) } } diff --git a/plugins/outputs/nsq/nsq_test.go b/plugins/outputs/nsq/nsq_test.go index 0880d0252..e2b0fc31d 100644 --- a/plugins/outputs/nsq/nsq_test.go +++ b/plugins/outputs/nsq/nsq_test.go @@ -3,6 +3,7 @@ package nsq import ( "testing" + "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -13,9 +14,11 @@ func TestConnectAndWrite(t *testing.T) { } server := []string{testutil.GetLocalHost() + ":4150"} + s, _ := serializers.NewInfluxSerializer() n := &NSQ{ - Server: server[0], - Topic: "telegraf", + Server: server[0], + Topic: "telegraf", + serializer: s, } // Verify that we can connect to the NSQ daemon diff --git a/plugins/parsers/influx/parser.go b/plugins/parsers/influx/parser.go index 345e60b2e..8ab783b0d 100644 --- a/plugins/parsers/influx/parser.go +++ b/plugins/parsers/influx/parser.go @@ -15,7 +15,7 @@ type InfluxParser struct { DefaultTags map[string]string } -// ParseMetrics returns a slice of Metrics from a text representation of a +// Parse returns a slice of Metrics from a text representation of a // metric (in line-protocol format) // with each metric separated by newlines. If any metrics fail to parse, // a non-nil error will be returned in addition to the metrics that parsed diff --git a/plugins/serializers/graphite/graphite.go b/plugins/serializers/graphite/graphite.go new file mode 100644 index 000000000..d04f756c1 --- /dev/null +++ b/plugins/serializers/graphite/graphite.go @@ -0,0 +1,79 @@ +package graphite + +import ( + "fmt" + "sort" + "strings" + + "github.com/influxdata/telegraf" +) + +type GraphiteSerializer struct { + Prefix string +} + +func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]string, error) { + out := []string{} + // Get name + name := metric.Name() + // Convert UnixNano to Unix timestamps + timestamp := metric.UnixNano() / 1000000000 + tag_str := buildTags(metric) + + for field_name, value := range metric.Fields() { + // Convert value + value_str := fmt.Sprintf("%#v", value) + // Write graphite metric + var graphitePoint string + if name == field_name { + graphitePoint = fmt.Sprintf("%s.%s %s %d", + tag_str, + strings.Replace(name, ".", "_", -1), + value_str, + timestamp) + } else { + graphitePoint = fmt.Sprintf("%s.%s.%s %s %d", + tag_str, + strings.Replace(name, ".", "_", -1), + strings.Replace(field_name, ".", "_", -1), + value_str, + timestamp) + } + if s.Prefix != "" { + graphitePoint = fmt.Sprintf("%s.%s", s.Prefix, graphitePoint) + } + out = append(out, graphitePoint) + } + return out, nil +} + +func buildTags(metric telegraf.Metric) string { + var keys []string + tags := metric.Tags() + for k := range tags { + if k == "host" { + continue + } + keys = append(keys, k) + } + sort.Strings(keys) + + var tag_str string + if host, ok := tags["host"]; ok { + if len(keys) > 0 { + tag_str = strings.Replace(host, ".", "_", -1) + "." + } else { + tag_str = strings.Replace(host, ".", "_", -1) + } + } + + for i, k := range keys { + tag_value := strings.Replace(tags[k], ".", "_", -1) + if i == 0 { + tag_str += tag_value + } else { + tag_str += "." + tag_value + } + } + return tag_str +} diff --git a/plugins/serializers/graphite/graphite_test.go b/plugins/serializers/graphite/graphite_test.go new file mode 100644 index 000000000..72b203b7a --- /dev/null +++ b/plugins/serializers/graphite/graphite_test.go @@ -0,0 +1,121 @@ +package graphite + +import ( + "fmt" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/influxdata/telegraf" +) + +func TestGraphiteTags(t *testing.T) { + m1, _ := telegraf.NewMetric( + "mymeasurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + m2, _ := telegraf.NewMetric( + "mymeasurement", + map[string]string{"host": "192.168.0.1", "afoo": "first", "bfoo": "second"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + m3, _ := telegraf.NewMetric( + "mymeasurement", + map[string]string{"afoo": "first", "bfoo": "second"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + tags1 := buildTags(m1) + tags2 := buildTags(m2) + tags3 := buildTags(m3) + + assert.Equal(t, "192_168_0_1", tags1) + assert.Equal(t, "192_168_0_1.first.second", tags2) + assert.Equal(t, "first.second", tags3) +} + +func TestSerializeMetricNoHost(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + "datacenter": "us-west-2", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + "usage_busy": float64(8.5), + } + m, err := telegraf.NewMetric("cpu", tags, fields, now) + assert.NoError(t, err) + + s := GraphiteSerializer{} + mS, err := s.Serialize(m) + assert.NoError(t, err) + + expS := []string{ + fmt.Sprintf("cpu0.us-west-2.cpu.usage_idle 91.5 %d", now.Unix()), + fmt.Sprintf("cpu0.us-west-2.cpu.usage_busy 8.5 %d", now.Unix()), + } + sort.Strings(mS) + sort.Strings(expS) + assert.Equal(t, expS, mS) +} + +func TestSerializeMetricHost(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "host": "localhost", + "cpu": "cpu0", + "datacenter": "us-west-2", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + "usage_busy": float64(8.5), + } + m, err := telegraf.NewMetric("cpu", tags, fields, now) + assert.NoError(t, err) + + s := GraphiteSerializer{} + mS, err := s.Serialize(m) + assert.NoError(t, err) + + expS := []string{ + fmt.Sprintf("localhost.cpu0.us-west-2.cpu.usage_idle 91.5 %d", now.Unix()), + fmt.Sprintf("localhost.cpu0.us-west-2.cpu.usage_busy 8.5 %d", now.Unix()), + } + sort.Strings(mS) + sort.Strings(expS) + assert.Equal(t, expS, mS) +} + +func TestSerializeMetricPrefix(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "host": "localhost", + "cpu": "cpu0", + "datacenter": "us-west-2", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + "usage_busy": float64(8.5), + } + m, err := telegraf.NewMetric("cpu", tags, fields, now) + assert.NoError(t, err) + + s := GraphiteSerializer{Prefix: "prefix"} + mS, err := s.Serialize(m) + assert.NoError(t, err) + + expS := []string{ + fmt.Sprintf("prefix.localhost.cpu0.us-west-2.cpu.usage_idle 91.5 %d", now.Unix()), + fmt.Sprintf("prefix.localhost.cpu0.us-west-2.cpu.usage_busy 8.5 %d", now.Unix()), + } + sort.Strings(mS) + sort.Strings(expS) + assert.Equal(t, expS, mS) +} diff --git a/plugins/serializers/influx/influx.go b/plugins/serializers/influx/influx.go new file mode 100644 index 000000000..03c53fed2 --- /dev/null +++ b/plugins/serializers/influx/influx.go @@ -0,0 +1,12 @@ +package influx + +import ( + "github.com/influxdata/telegraf" +) + +type InfluxSerializer struct { +} + +func (s *InfluxSerializer) Serialize(metric telegraf.Metric) ([]string, error) { + return []string{metric.String()}, nil +} diff --git a/plugins/serializers/influx/influx_test.go b/plugins/serializers/influx/influx_test.go new file mode 100644 index 000000000..4937800aa --- /dev/null +++ b/plugins/serializers/influx/influx_test.go @@ -0,0 +1,68 @@ +package influx + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/influxdata/telegraf" +) + +func TestSerializeMetricFloat(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + m, err := telegraf.NewMetric("cpu", tags, fields, now) + assert.NoError(t, err) + + s := InfluxSerializer{} + mS, err := s.Serialize(m) + assert.NoError(t, err) + + expS := []string{fmt.Sprintf("cpu,cpu=cpu0 usage_idle=91.5 %d", now.UnixNano())} + assert.Equal(t, expS, mS) +} + +func TestSerializeMetricInt(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": int64(90), + } + m, err := telegraf.NewMetric("cpu", tags, fields, now) + assert.NoError(t, err) + + s := InfluxSerializer{} + mS, err := s.Serialize(m) + assert.NoError(t, err) + + expS := []string{fmt.Sprintf("cpu,cpu=cpu0 usage_idle=90i %d", now.UnixNano())} + assert.Equal(t, expS, mS) +} + +func TestSerializeMetricString(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": "foobar", + } + m, err := telegraf.NewMetric("cpu", tags, fields, now) + assert.NoError(t, err) + + s := InfluxSerializer{} + mS, err := s.Serialize(m) + assert.NoError(t, err) + + expS := []string{fmt.Sprintf("cpu,cpu=cpu0 usage_idle=\"foobar\" %d", now.UnixNano())} + assert.Equal(t, expS, mS) +} diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go new file mode 100644 index 000000000..2fedfbeaf --- /dev/null +++ b/plugins/serializers/registry.go @@ -0,0 +1,55 @@ +package serializers + +import ( + "github.com/influxdata/telegraf" + + "github.com/influxdata/telegraf/plugins/serializers/graphite" + "github.com/influxdata/telegraf/plugins/serializers/influx" +) + +// SerializerOutput is an interface for output plugins that are able to +// serialize telegraf metrics into arbitrary data formats. +type SerializerOutput interface { + // SetSerializer sets the serializer function for the interface. + SetSerializer(serializer Serializer) +} + +// Serializer is an interface defining functions that a serializer plugin must +// satisfy. +type Serializer interface { + // Serialize takes a single telegraf metric and turns it into a string. + Serialize(metric telegraf.Metric) ([]string, error) +} + +// Config is a struct that covers the data types needed for all serializer types, +// and can be used to instantiate _any_ of the serializers. +type Config struct { + // Dataformat can be one of: influx, graphite + DataFormat string + + // Prefix to add to all measurements, only supports Graphite + Prefix string +} + +// NewSerializer a Serializer interface based on the given config. +func NewSerializer(config *Config) (Serializer, error) { + var err error + var serializer Serializer + switch config.DataFormat { + case "influx": + serializer, err = NewInfluxSerializer() + case "graphite": + serializer, err = NewGraphiteSerializer(config.Prefix) + } + return serializer, err +} + +func NewInfluxSerializer() (Serializer, error) { + return &influx.InfluxSerializer{}, nil +} + +func NewGraphiteSerializer(prefix string) (Serializer, error) { + return &graphite.GraphiteSerializer{ + Prefix: prefix, + }, nil +} From 37726a02afeb099fbee857da5b2c3c1b0d5e5d55 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 10 Feb 2016 15:50:07 -0700 Subject: [PATCH 008/287] Add Serializer plugins, and 'file' output plugin --- plugins/outputs/file/file_test.go | 195 ++++++++++++++++++++++++++++++ 1 file changed, 195 insertions(+) diff --git a/plugins/outputs/file/file_test.go b/plugins/outputs/file/file_test.go index b691ba57a..a2f15fc08 100644 --- a/plugins/outputs/file/file_test.go +++ b/plugins/outputs/file/file_test.go @@ -1 +1,196 @@ package file + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/serializers" + "github.com/influxdata/telegraf/testutil" +) + +const ( + expNewFile = "test1,tag1=value1 value=1 1257894000000000000\n" + expExistFile = "cpu,cpu=cpu0 value=100 1455312810012459582\n" + + "test1,tag1=value1 value=1 1257894000000000000\n" +) + +func TestFileExistingFile(t *testing.T) { + fh := createFile() + s, _ := serializers.NewInfluxSerializer() + f := File{ + Files: []string{fh.Name()}, + serializer: s, + } + + err := f.Connect() + assert.NoError(t, err) + + err = f.Write(testutil.MockMetrics()) + assert.NoError(t, err) + + validateFile(fh.Name(), expExistFile, t) + + err = f.Close() + assert.NoError(t, err) +} + +func TestFileNewFile(t *testing.T) { + s, _ := serializers.NewInfluxSerializer() + fh := tmpFile() + f := File{ + Files: []string{fh}, + serializer: s, + } + + err := f.Connect() + assert.NoError(t, err) + + err = f.Write(testutil.MockMetrics()) + assert.NoError(t, err) + + validateFile(fh, expNewFile, t) + + err = f.Close() + assert.NoError(t, err) +} + +func TestFileExistingFiles(t *testing.T) { + fh1 := createFile() + fh2 := createFile() + fh3 := createFile() + + s, _ := serializers.NewInfluxSerializer() + f := File{ + Files: []string{fh1.Name(), fh2.Name(), fh3.Name()}, + serializer: s, + } + + err := f.Connect() + assert.NoError(t, err) + + err = f.Write(testutil.MockMetrics()) + assert.NoError(t, err) + + validateFile(fh1.Name(), expExistFile, t) + validateFile(fh2.Name(), expExistFile, t) + validateFile(fh3.Name(), expExistFile, t) + + err = f.Close() + assert.NoError(t, err) +} + +func TestFileNewFiles(t *testing.T) { + s, _ := serializers.NewInfluxSerializer() + fh1 := tmpFile() + fh2 := tmpFile() + fh3 := tmpFile() + f := File{ + Files: []string{fh1, fh2, fh3}, + serializer: s, + } + + err := f.Connect() + assert.NoError(t, err) + + err = f.Write(testutil.MockMetrics()) + assert.NoError(t, err) + + validateFile(fh1, expNewFile, t) + validateFile(fh2, expNewFile, t) + validateFile(fh3, expNewFile, t) + + err = f.Close() + assert.NoError(t, err) +} + +func TestFileBoth(t *testing.T) { + fh1 := createFile() + fh2 := tmpFile() + + s, _ := serializers.NewInfluxSerializer() + f := File{ + Files: []string{fh1.Name(), fh2}, + serializer: s, + } + + err := f.Connect() + assert.NoError(t, err) + + err = f.Write(testutil.MockMetrics()) + assert.NoError(t, err) + + validateFile(fh1.Name(), expExistFile, t) + validateFile(fh2, expNewFile, t) + + err = f.Close() + assert.NoError(t, err) +} + +func TestFileStdout(t *testing.T) { + // keep backup of the real stdout + old := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + s, _ := serializers.NewInfluxSerializer() + f := File{ + Files: []string{"stdout"}, + serializer: s, + } + + err := f.Connect() + assert.NoError(t, err) + + err = f.Write(testutil.MockMetrics()) + assert.NoError(t, err) + + err = f.Close() + assert.NoError(t, err) + + outC := make(chan string) + // copy the output in a separate goroutine so printing can't block indefinitely + go func() { + var buf bytes.Buffer + io.Copy(&buf, r) + outC <- buf.String() + }() + + // back to normal state + w.Close() + // restoring the real stdout + os.Stdout = old + out := <-outC + + assert.Equal(t, expNewFile, out) +} + +func createFile() *os.File { + f, err := ioutil.TempFile("", "") + if err != nil { + panic(err) + } + f.WriteString("cpu,cpu=cpu0 value=100 1455312810012459582\n") + return f +} + +func tmpFile() string { + d, err := ioutil.TempDir("", "") + if err != nil { + panic(err) + } + return d + internal.RandomString(10) +} + +func validateFile(fname, expS string, t *testing.T) { + buf, err := ioutil.ReadFile(fname) + if err != nil { + panic(err) + } + assert.Equal(t, expS, string(buf)) +} From 0198296cedb6ce25bac547d5ebf7f6449c3f73a1 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 12 Feb 2016 15:09:34 -0700 Subject: [PATCH 009/287] Data format output documentation --- DATA_FORMATS_OUTPUT.md | 94 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 DATA_FORMATS_OUTPUT.md diff --git a/DATA_FORMATS_OUTPUT.md b/DATA_FORMATS_OUTPUT.md new file mode 100644 index 000000000..af129489b --- /dev/null +++ b/DATA_FORMATS_OUTPUT.md @@ -0,0 +1,94 @@ +# Telegraf Output Data Formats + +Telegraf metrics, like InfluxDB +[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/), +are a combination of four basic parts: + +1. Measurement Name +1. Tags +1. Fields +1. Timestamp + +In InfluxDB line protocol, these 4 parts are easily defined in textual form: +`measurement_name[,tag1=val1,...] field1=val1[,field2=val2,...] [timestamp]` + +For Telegraf outputs that write textual data (such as `kafka`, `mqtt`, and `file`), +InfluxDB line protocol was originally the only available output format. But now +we are normalizing telegraf metric "serializers" into a plugin-like format across +all output plugins that can support it. You will be able to identify a plugin +that supports different data formats by the presence of a `data_format` +config option, for example, in the file plugin: + +```toml +[[outputs.file]] + ### Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ### Data format to output. This can be "influx" or "graphite" + ### Each data format has it's own unique set of configuration options, read + ### more about them here: + ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md + data_format = "influx" + + ### Additional configuration options go here +``` + +Each data_format has an additional set of configuration options available, which +I'll go over below. + +## Influx: + +There are no additional configuration options for InfluxDB line-protocol. The +metrics are parsed directly into Telegraf metrics. + +#### Influx Configuration: + +```toml +[[outputs.file]] + ### Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ### Data format to output. This can be "influx" or "graphite" + ### Each data format has it's own unique set of configuration options, read + ### more about them here: + ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md + data_format = "influx" + + ### Additional configuration options go here +``` + +## Graphite: + +The Graphite data format translates Telegraf metrics into _dot_ buckets. +The format is: + +``` +[prefix].[host tag].[all tags (alphabetical)].[measurement name].[field name] value timestamp +``` + +Which means the following influx metric -> graphite conversion would happen: + +``` +cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.08869456589632,usage_user=0.886945658963148 1455320660004257758 +=> +tars.cpu-total.us-east-1.cpu.usage_user 0.5620784411691232 1455320690 +tars.cpu-total.us-east-1.cpu.usage_idle 98.5885585810642 1455320690 +``` + +`prefix` is a configuration option when using the graphite output data format. + +#### Graphite Configuration: + +```toml +[[outputs.file]] + ### Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ### Data format to output. This can be "influx" or "graphite" + ### Each data format has it's own unique set of configuration options, read + ### more about them here: + ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md + data_format = "influx" + + prefix = "telegraf" +``` From 0fef147713a37cd2a6ee443d77d28b4128a9ebae Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 12 Feb 2016 16:52:33 -0700 Subject: [PATCH 010/287] data output readme update --- DATA_FORMATS_OUTPUT.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/DATA_FORMATS_OUTPUT.md b/DATA_FORMATS_OUTPUT.md index af129489b..7dca85a4c 100644 --- a/DATA_FORMATS_OUTPUT.md +++ b/DATA_FORMATS_OUTPUT.md @@ -39,7 +39,7 @@ I'll go over below. ## Influx: There are no additional configuration options for InfluxDB line-protocol. The -metrics are parsed directly into Telegraf metrics. +metrics are serialized directly into InfluxDB line-protocol. #### Influx Configuration: @@ -53,8 +53,6 @@ metrics are parsed directly into Telegraf metrics. ### more about them here: ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md data_format = "influx" - - ### Additional configuration options go here ``` ## Graphite: @@ -69,10 +67,10 @@ The format is: Which means the following influx metric -> graphite conversion would happen: ``` -cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.08869456589632,usage_user=0.886945658963148 1455320660004257758 +cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758 => -tars.cpu-total.us-east-1.cpu.usage_user 0.5620784411691232 1455320690 -tars.cpu-total.us-east-1.cpu.usage_idle 98.5885585810642 1455320690 +tars.cpu-total.us-east-1.cpu.usage_user 0.89 1455320690 +tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690 ``` `prefix` is a configuration option when using the graphite output data format. From 8236534e3c51e127b865cdacc3552e18b0d80545 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 12 Feb 2016 16:55:27 -0700 Subject: [PATCH 011/287] changelog update --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 99083cc45..cf048d77c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,8 @@ and `mqtt_consumer` plugins) can now specify the incoming data format that they would like to parse. Currently supports: "json", "influx", and "graphite" +- Users of message broker and file output plugins can now choose what data format +they would like to output. Currently supports: "influx" and "graphite" - More info on parsing arbitrary data formats can be found [here](https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md) @@ -15,6 +17,8 @@ format that they would like to parse. Currently supports: "json", "influx", and - [#680](https://github.com/influxdata/telegraf/pull/680): NATS consumer input plugin. Thanks @netixen! - [#676](https://github.com/influxdata/telegraf/pull/676): MQTT consumer input plugin. - [#683](https://github.com/influxdata/telegraf/pull/683): PostGRES input plugin: add pg_stat_bgwriter. Thanks @menardorama! +- [#679](https://github.com/influxdata/telegraf/pull/679): File/stdout output plugin. +- [#679](https://github.com/influxdata/telegraf/pull/679): Support for arbitrary output data formats. ### Bugfixes - [#443](https://github.com/influxdata/telegraf/issues/443): Fix Ping command timeout parameter on Linux. From 6a601ceb975dead75481324254c5714f1e2ace4a Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 12 Feb 2016 17:00:11 -0700 Subject: [PATCH 012/287] Add support for specifying SSL config for influxdb output closes #191 --- plugins/outputs/influxdb/influxdb.go | 23 +++++++++++++++++++++++ plugins/outputs/mqtt/mqtt.go | 3 ++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index c11484a48..52fd8039b 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -28,6 +28,15 @@ type InfluxDB struct { Timeout internal.Duration UDPPayload int `toml:"udp_payload"` + // Path to CA file + SSLCA string `toml:"ssl_ca"` + // Path to host cert file + SSLCert string `toml:"ssl_cert"` + // Path to cert key file + SSLKey string `toml:"ssl_key"` + // Use SSL but skip chain & host verification + InsecureSkipVerify bool + conns []client.Client } @@ -52,6 +61,13 @@ var sampleConfig = ` # user_agent = "telegraf" ### Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) # udp_payload = 512 + + ### Optional SSL Config + # ssl_ca = "/etc/telegraf/ca.pem" + # ssl_cert = "/etc/telegraf/cert.pem" + # ssl_key = "/etc/telegraf/key.pem" + ### Use SSL but skip chain & host verification + # insecure_skip_verify = false ` func (i *InfluxDB) Connect() error { @@ -66,6 +82,12 @@ func (i *InfluxDB) Connect() error { urls = append(urls, i.URL) } + tlsCfg, err := internal.GetTLSConfig( + i.SSLCert, i.SSLKey, i.SSLCA, i.InsecureSkipVerify) + if err != nil { + return err + } + var conns []client.Client for _, u := range urls { switch { @@ -94,6 +116,7 @@ func (i *InfluxDB) Connect() error { Password: i.Password, UserAgent: i.UserAgent, Timeout: i.Timeout.Duration, + TLSConfig: tlsCfg, }) if err != nil { return err diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index efa20944b..d28a04d72 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -5,11 +5,12 @@ import ( "strings" "sync" - paho "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" + + paho "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" ) var sampleConfig = ` From dd086c7830aa4b2a80a5588fdba3eca0eeeeeb5c Mon Sep 17 00:00:00 2001 From: Vladislav Shub Date: Sun, 14 Feb 2016 12:27:13 +0200 Subject: [PATCH 013/287] Added full support for raindrops and tests --- README.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/raindrops/README.md | 15 ++ plugins/inputs/raindrops/raindrops.go | 185 +++++++++++++++++++++ plugins/inputs/raindrops/raindrops_test.go | 108 ++++++++++++ 5 files changed, 310 insertions(+) create mode 100644 plugins/inputs/raindrops/README.md create mode 100644 plugins/inputs/raindrops/raindrops.go create mode 100644 plugins/inputs/raindrops/raindrops_test.go diff --git a/README.md b/README.md index 21a3445ea..c38890350 100644 --- a/README.md +++ b/README.md @@ -182,6 +182,7 @@ Currently implemented sources: * prometheus * puppetagent * rabbitmq +* raindrops * redis * rethinkdb * sql server (microsoft) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 335d41a32..639afbe09 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -35,6 +35,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/prometheus" _ "github.com/influxdata/telegraf/plugins/inputs/puppetagent" _ "github.com/influxdata/telegraf/plugins/inputs/rabbitmq" + _ "github.com/influxdata/telegraf/plugins/inputs/raindrops" _ "github.com/influxdata/telegraf/plugins/inputs/redis" _ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb" _ "github.com/influxdata/telegraf/plugins/inputs/sensors" diff --git a/plugins/inputs/raindrops/README.md b/plugins/inputs/raindrops/README.md new file mode 100644 index 000000000..8dc4e51a0 --- /dev/null +++ b/plugins/inputs/raindrops/README.md @@ -0,0 +1,15 @@ +# Raindrops Input Plugin + +The [raindrops](http://raindrops.bogomips.org/) plugin reads from +specified raindops middleware URI and adds stats to InfluxDB. +### Configuration: + +```toml +# Read raindrops stats +[[inputs.raindrops]] + urls = ["http://localhost/_raindrops"] +``` + +### Tags: + +- Multiple listeners are tagged with IP:Port/Socket, ie `0.0.0.0:8080` or `/tmp/unicorn` diff --git a/plugins/inputs/raindrops/raindrops.go b/plugins/inputs/raindrops/raindrops.go new file mode 100644 index 000000000..00c711cb2 --- /dev/null +++ b/plugins/inputs/raindrops/raindrops.go @@ -0,0 +1,185 @@ +package raindrops + +import ( + "bufio" + "fmt" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type Raindrops struct { + Urls []string + http_client *http.Client +} + +var sampleConfig = ` + ### An array of raindrops middleware URI to gather stats. + urls = ["http://localhost/_raindrops"] +` + +func (r *Raindrops) SampleConfig() string { + return sampleConfig +} + +func (r *Raindrops) Description() string { + return "Read raindrops stats (raindrops - real-time stats for preforking Rack servers)" +} + +func (r *Raindrops) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + var outerr error + + for _, u := range r.Urls { + addr, err := url.Parse(u) + if err != nil { + return fmt.Errorf("Unable to parse address '%s': %s", u, err) + } + + wg.Add(1) + go func(addr *url.URL) { + defer wg.Done() + outerr = r.gatherUrl(addr, acc) + }(addr) + } + + wg.Wait() + + return outerr +} + +func (r *Raindrops) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { + resp, err := r.http_client.Get(addr.String()) + if err != nil { + return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status) + } + buf := bufio.NewReader(resp.Body) + + // Calling + _, err = buf.ReadString(':') + if err != nil { + return err + } + line, err := buf.ReadString('\n') + if err != nil { + return err + } + calling, err := strconv.ParseUint(strings.TrimSpace(line), 10, 64) + if err != nil { + return err + } + + // Writing + _, err = buf.ReadString(':') + if err != nil { + return err + } + line, err = buf.ReadString('\n') + if err != nil { + return err + } + writing, err := strconv.ParseUint(strings.TrimSpace(line), 10, 64) + if err != nil { + return err + } + tags := r.getTags(addr) + fields := map[string]interface{}{ + "calling": calling, + "writing": writing, + } + acc.AddFields("raindrops", fields, tags) + + iterate := true + var queued_line_str string + var active_line_str string + var active_err error + var queued_err error + + for iterate { + // Listen + var tags map[string]string + + lis := map[string]interface{}{ + "active": 0, + "queued": 0, + } + active_line_str, active_err = buf.ReadString('\n') + if active_err != nil { + iterate = false + break + } + if strings.Compare(active_line_str, "\n") == 0{ + break + } + queued_line_str, queued_err = buf.ReadString('\n') + if queued_err != nil { + iterate = false + } + active_line := strings.Split(active_line_str, " ") + listen_name := active_line[0] + + active, err := strconv.ParseUint(strings.TrimSpace(active_line[2]), 10, 64) + if err != nil { + active = 0 + } + lis["active"] = active + + queued_line := strings.Split(queued_line_str, " ") + queued, err := strconv.ParseUint(strings.TrimSpace(queued_line[2]), 10, 64) + if err != nil { + queued = 0 + } + lis["queued"] = queued + if strings.Contains(listen_name, ":") { + listener := strings.Split(listen_name, ":") + tags = map[string]string{ + "ip": listener[0], + "port": listener[1], + } + + } else { + tags = map[string]string{ + "socket": listen_name, + } + } + fmt.Println("raindropssock", lis, tags) + acc.AddFields("raindropssock", lis, tags) + } + return nil +} + +// Get tag(s) for the raindrops calling/writing plugin +func (r *Raindrops) getTags(addr *url.URL) map[string]string { + h := addr.Host + host, port, err := net.SplitHostPort(h) + if err != nil { + host = addr.Host + if addr.Scheme == "http" { + port = "80" + } else if addr.Scheme == "https" { + port = "443" + } else { + port = "" + } + } + return map[string]string{"server": host, "port": port} +} + +func init() { + inputs.Add("raindrops", func() telegraf.Input { + return &Raindrops{http_client: &http.Client{Transport: &http.Transport{ + ResponseHeaderTimeout: time.Duration(3 * time.Second), + }}} + }) +} diff --git a/plugins/inputs/raindrops/raindrops_test.go b/plugins/inputs/raindrops/raindrops_test.go new file mode 100644 index 000000000..d4767b88a --- /dev/null +++ b/plugins/inputs/raindrops/raindrops_test.go @@ -0,0 +1,108 @@ +package raindrops + +import ( + "fmt" + "net" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "time" +) + +const sampleResponse = ` +calling: 100 +writing: 200 +0.0.0.0:8080 active: 1 +0.0.0.0:8080 queued: 2 +0.0.0.0:8081 active: 3 +0.0.0.0:8081 queued: 4 +127.0.0.1:8082 active: 5 +127.0.0.1:8082 queued: 6 +0.0.0.0:8083 active: 7 +0.0.0.0:8083 queued: 8 +0.0.0.0:8084 active: 9 +0.0.0.0:8084 queued: 10 +0.0.0.0:3000 active: 11 +0.0.0.0:3000 queued: 12 +/tmp/listen.me active: 13 +/tmp/listen.me queued: 14 +` + +// Verify that raindrops tags are properly parsed based on the server +func TestRaindropsTags(t *testing.T) { + urls := []string{"http://localhost/_raindrops", "http://localhost:80/_raindrops"} + var addr *url.URL + r := &Raindrops{} + for _, url1 := range urls { + addr, _ = url.Parse(url1) + tagMap := r.getTags(addr) + assert.Contains(t, tagMap["server"], "localhost") + } +} + +func TestRaindropsGeneratesMetrics(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var rsp string + + if r.URL.Path == "/_raindrops" { + rsp = sampleResponse + } else { + panic("Cannot handle request") + } + + fmt.Fprintln(w, rsp) + })) + defer ts.Close() + + n := &Raindrops{ + Urls: []string{fmt.Sprintf("%s/_raindrops", ts.URL)}, + http_client: &http.Client{Transport: &http.Transport{ + ResponseHeaderTimeout: time.Duration(3 * time.Second), + }}, + } + + var acc testutil.Accumulator + + err := n.Gather(&acc) + require.NoError(t, err) + + fields := map[string]interface{}{ + "calling": uint64(100), + "writing": uint64(200), + } + addr, err := url.Parse(ts.URL) + if err != nil { + panic(err) + } + + host, port, err := net.SplitHostPort(addr.Host) + if err != nil { + host = addr.Host + if addr.Scheme == "http" { + port = "80" + } else if addr.Scheme == "https" { + port = "443" + } else { + port = "" + } + } + + tags := map[string]string{"server": host, "port": port} + acc.AssertContainsTaggedFields(t, "raindrops", fields, tags) + + tags = map[string]string{ + "port": "8081", + "ip": "0.0.0.0", + } + fields = map[string]interface {} { + "active": uint64(3), + "queued": uint64(4), + } + fmt.Println("raindropssock_test", fields, tags) + acc.AssertContainsTaggedFields(t, "raindropssock", fields, tags) +} From 0b72612cd22deeebe95baff914837232b2018e5c Mon Sep 17 00:00:00 2001 From: Andrei Burd Date: Sun, 14 Feb 2016 21:12:21 +0200 Subject: [PATCH 014/287] Code formatted, Readme updated based on example closes #695 --- CHANGELOG.md | 1 + CONTRIBUTING.md | 2 +- plugins/inputs/raindrops/README.md | 40 ++++++++++++++++++++-- plugins/inputs/raindrops/raindrops.go | 7 ++-- plugins/inputs/raindrops/raindrops_test.go | 11 +++--- 5 files changed, 47 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cf048d77c..f1c802f3a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ they would like to output. Currently supports: "influx" and "graphite" - [#683](https://github.com/influxdata/telegraf/pull/683): PostGRES input plugin: add pg_stat_bgwriter. Thanks @menardorama! - [#679](https://github.com/influxdata/telegraf/pull/679): File/stdout output plugin. - [#679](https://github.com/influxdata/telegraf/pull/679): Support for arbitrary output data formats. +- [#695](https://github.com/influxdata/telegraf/pull/695): raindrops input plugin. Thanks @burdandrei! ### Bugfixes - [#443](https://github.com/influxdata/telegraf/issues/443): Fix Ping command timeout parameter on Linux. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6876cfa7b..16749fcbc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -326,7 +326,7 @@ which would take some time to replicate. To overcome this situation we've decided to use docker containers to provide a fast and reproducible environment to test those services which require it. For other situations -(i.e: https://github.com/influxdata/telegraf/blob/master/plugins/redis/redis_test.go) +(i.e: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/redis/redis_test.go) a simple mock will suffice. To execute Telegraf tests follow these simple steps: diff --git a/plugins/inputs/raindrops/README.md b/plugins/inputs/raindrops/README.md index 8dc4e51a0..6a73a085b 100644 --- a/plugins/inputs/raindrops/README.md +++ b/plugins/inputs/raindrops/README.md @@ -1,15 +1,49 @@ # Raindrops Input Plugin The [raindrops](http://raindrops.bogomips.org/) plugin reads from -specified raindops middleware URI and adds stats to InfluxDB. +specified raindops [middleware](http://raindrops.bogomips.org/Raindrops/Middleware.html) URI and adds stats to InfluxDB. + ### Configuration: ```toml # Read raindrops stats [[inputs.raindrops]] - urls = ["http://localhost/_raindrops"] + urls = ["http://localhost:8080/_raindrops"] ``` +### Measurements & Fields: + +- raindrops + - calling (integer, count) + - writing (integer, count) +- raindrops_listen + - active (integer, bytes) + - queued (integer, bytes) + ### Tags: -- Multiple listeners are tagged with IP:Port/Socket, ie `0.0.0.0:8080` or `/tmp/unicorn` +- Raindops calling/writing of all the workers: + - server + - port + +- raindrops_listen (ip:port): + - ip + - port + +- raindrops_listen (Unix Socket): + - socket + +### Example Output: + +``` +$ ./telegraf -config telegraf.conf -input-filter raindrops -test +* Plugin: raindrops, Collection 1 +> raindrops,port=8080,server=localhost calling=0i,writing=0i 1455479896806238204 +> raindrops_listen,ip=0.0.0.0,port=8080 active=0i,queued=0i 1455479896806561938 +> raindrops_listen,ip=0.0.0.0,port=8081 active=1i,queued=0i 1455479896806605749 +> raindrops_listen,ip=127.0.0.1,port=8082 active=0i,queued=0i 1455479896806646315 +> raindrops_listen,ip=0.0.0.0,port=8083 active=0i,queued=0i 1455479896806683252 +> raindrops_listen,ip=0.0.0.0,port=8084 active=0i,queued=0i 1455479896806712025 +> raindrops_listen,ip=0.0.0.0,port=3000 active=0i,queued=0i 1455479896806779197 +> raindrops_listen,socket=/tmp/listen.me active=0i,queued=0i 1455479896806813907 +``` diff --git a/plugins/inputs/raindrops/raindrops.go b/plugins/inputs/raindrops/raindrops.go index 00c711cb2..572422f59 100644 --- a/plugins/inputs/raindrops/raindrops.go +++ b/plugins/inputs/raindrops/raindrops.go @@ -22,7 +22,7 @@ type Raindrops struct { var sampleConfig = ` ### An array of raindrops middleware URI to gather stats. - urls = ["http://localhost/_raindrops"] + urls = ["http://localhost:8080/_raindrops"] ` func (r *Raindrops) SampleConfig() string { @@ -119,7 +119,7 @@ func (r *Raindrops) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { iterate = false break } - if strings.Compare(active_line_str, "\n") == 0{ + if strings.Compare(active_line_str, "\n") == 0 { break } queued_line_str, queued_err = buf.ReadString('\n') @@ -153,8 +153,7 @@ func (r *Raindrops) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { "socket": listen_name, } } - fmt.Println("raindropssock", lis, tags) - acc.AddFields("raindropssock", lis, tags) + acc.AddFields("raindrops_listen", lis, tags) } return nil } diff --git a/plugins/inputs/raindrops/raindrops_test.go b/plugins/inputs/raindrops/raindrops_test.go index d4767b88a..0dee9b1cc 100644 --- a/plugins/inputs/raindrops/raindrops_test.go +++ b/plugins/inputs/raindrops/raindrops_test.go @@ -72,8 +72,8 @@ func TestRaindropsGeneratesMetrics(t *testing.T) { require.NoError(t, err) fields := map[string]interface{}{ - "calling": uint64(100), - "writing": uint64(200), + "calling": uint64(100), + "writing": uint64(200), } addr, err := url.Parse(ts.URL) if err != nil { @@ -97,12 +97,11 @@ func TestRaindropsGeneratesMetrics(t *testing.T) { tags = map[string]string{ "port": "8081", - "ip": "0.0.0.0", + "ip": "0.0.0.0", } - fields = map[string]interface {} { + fields = map[string]interface{}{ "active": uint64(3), "queued": uint64(4), } - fmt.Println("raindropssock_test", fields, tags) - acc.AssertContainsTaggedFields(t, "raindropssock", fields, tags) + acc.AssertContainsTaggedFields(t, "raindrops_listen", fields, tags) } From 85594cc92e8c1b867851bec24b0219745d54ad14 Mon Sep 17 00:00:00 2001 From: Anton Bykov Date: Sun, 14 Feb 2016 14:05:31 +0200 Subject: [PATCH 015/287] Readme: specify compression format for unpacking closes #693 --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index c38890350..3362ad2d6 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,7 @@ Latest: To install the full directory structure with config file, run: ``` -sudo tar -C / -xvf ./telegraf-0.10.2-1_linux_amd64.tar.gz +sudo tar -C / -zxvf ./telegraf-0.10.2-1_linux_amd64.tar.gz ``` To extract only the binary, run: From ccb6b3c64b42568cfe2cc926c6602661e0e7e0f4 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Sat, 13 Feb 2016 11:50:43 -0700 Subject: [PATCH 016/287] Small readme formattings --- CONTRIBUTING.md | 119 ++++++------------ DATA_FORMATS_OUTPUT.md | 17 ++- .../prometheus_client_test.go | 2 + 3 files changed, 50 insertions(+), 88 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 16749fcbc..7eb08a2d5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -12,6 +12,13 @@ but any information you can provide on how the data will look is appreciated. See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb) for a good example. +## GoDoc + +Public interfaces for inputs, outputs, metrics, and the accumulator can be found +on the GoDoc + +[![GoDoc](https://godoc.org/github.com/influxdata/telegraf?status.svg)](https://godoc.org/github.com/influxdata/telegraf) + ## Sign the CLA Before we can merge a pull request, you will need to sign the CLA, @@ -29,7 +36,7 @@ Assuming you can already build the project, run these in the telegraf directory: This section is for developers who want to create new collection inputs. Telegraf is entirely plugin driven. This interface allows for operators to -pick and chose what is gathered as well as makes it easy for developers +pick and chose what is gathered and makes it easy for developers to create new ways of generating metrics. Plugin authorship is kept as simple as possible to promote people to develop @@ -46,49 +53,8 @@ See below for a quick example. plugin can be configured. This is include in `telegraf -sample-config`. * The `Description` function should say in one line what this plugin does. -### Input interface - -```go -type Input interface { - SampleConfig() string - Description() string - Gather(Accumulator) error -} - -type Accumulator interface { - Add(measurement string, - value interface{}, - tags map[string]string, - timestamp ...time.Time) - AddFields(measurement string, - fields map[string]interface{}, - tags map[string]string, - timestamp ...time.Time) -} -``` - -### Accumulator - -The way that a plugin emits metrics is by interacting with the Accumulator. - -The `Add` function takes 3 arguments: -* **measurement**: A string description of the metric. For instance `bytes_read` or ` -faults`. -* **value**: A value for the metric. This accepts 5 different types of value: - * **int**: The most common type. All int types are accepted but favor using `int64` - Useful for counters, etc. - * **float**: Favor `float64`, useful for gauges, percentages, etc. - * **bool**: `true` or `false`, useful to indicate the presence of a state. `light_on`, - etc. - * **string**: Typically used to indicate a message, or some kind of freeform - information. - * **time.Time**: Useful for indicating when a state last occurred, for instance ` - light_on_since`. -* **tags**: This is a map of strings to strings to describe the where or who -about the metric. For instance, the `net` plugin adds a tag named `"interface"` -set to the name of the network interface, like `"eth0"`. - -Let's say you've written a plugin that emits metrics about processes on the current host. +Let's say you've written a plugin that emits metrics about processes on the +current host. ### Input Plugin Example @@ -194,18 +160,6 @@ and `Stop()` methods. * Same as the `Plugin` guidelines, except that they must conform to the `inputs.ServiceInput` interface. -### Service Plugin interface - -```go -type ServicePlugin interface { - SampleConfig() string - Description() string - Gather(Accumulator) error - Start() error - Stop() -} -``` - ## Output Plugins This section is for developers who want to create a new output sink. Outputs @@ -223,18 +177,6 @@ See below for a quick example. output can be configured. This is include in `telegraf -sample-config`. * The `Description` function should say in one line what this output does. -### Output interface - -```go -type Output interface { - Connect() error - Close() error - Description() string - SampleConfig() string - Write(metrics []telegraf.Metric) error -} -``` - ### Output Example ```go @@ -282,6 +224,33 @@ func init() { ``` +## Output Plugins Writing Arbitrary Data Formats + +Some output plugins (such as +[file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file)) +can write arbitrary output data formats. An overview of these data formats can +be found +[here](https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md). + +In order to enable this, you must specify a +`SetSerializer(serializer serializers.Serializer)` +function on the plugin object (see the file plugin for an example), as well as +defining `serializer` as a field of the object. + +You can then utilize the serializer internally in your plugin, serializing data +before it's written. Telegraf's configuration layer will take care of +instantiating and creating the `Serializer` object. + +You should also add the following to your SampleConfig() return: + +```toml + ### Data format to output. This can be "influx" or "graphite" + ### Each data format has it's own unique set of configuration options, read + ### more about them here: + ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md + data_format = "influx" +``` + ## Service Output Plugins This section is for developers who want to create new "service" output. A @@ -297,20 +266,6 @@ and `Stop()` methods. * Same as the `Output` guidelines, except that they must conform to the `output.ServiceOutput` interface. -### Service Output interface - -```go -type ServiceOutput interface { - Connect() error - Close() error - Description() string - SampleConfig() string - Write(metrics []telegraf.Metric) error - Start() error - Stop() -} -``` - ## Unit Tests ### Execute short tests diff --git a/DATA_FORMATS_OUTPUT.md b/DATA_FORMATS_OUTPUT.md index 7dca85a4c..0ad019b10 100644 --- a/DATA_FORMATS_OUTPUT.md +++ b/DATA_FORMATS_OUTPUT.md @@ -10,19 +10,24 @@ are a combination of four basic parts: 1. Timestamp In InfluxDB line protocol, these 4 parts are easily defined in textual form: -`measurement_name[,tag1=val1,...] field1=val1[,field2=val2,...] [timestamp]` + +``` +measurement_name[,tag1=val1,...] field1=val1[,field2=val2,...] [timestamp] +``` For Telegraf outputs that write textual data (such as `kafka`, `mqtt`, and `file`), InfluxDB line protocol was originally the only available output format. But now -we are normalizing telegraf metric "serializers" into a plugin-like format across -all output plugins that can support it. You will be able to identify a plugin -that supports different data formats by the presence of a `data_format` -config option, for example, in the file plugin: +we are normalizing telegraf metric "serializers" into a +[plugin-like interface](https://github.com/influxdata/telegraf/tree/master/plugins/serializers) +across all output plugins that can support it. +You will be able to identify a plugin that supports different data formats +by the presence of a `data_format` +config option, for example, in the `file` output plugin: ```toml [[outputs.file]] ### Files to write to, "stdout" is a specially handled file. - files = ["stdout", "/tmp/metrics.out"] + files = ["stdout"] ### Data format to output. This can be "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read diff --git a/plugins/outputs/prometheus_client/prometheus_client_test.go b/plugins/outputs/prometheus_client/prometheus_client_test.go index adcdf9c5f..16414a8e4 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_test.go @@ -2,6 +2,7 @@ package prometheus_client import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -18,6 +19,7 @@ func TestPrometheusWritePointEmptyTag(t *testing.T) { } pTesting = &PrometheusClient{Listen: "localhost:9127"} err := pTesting.Start() + time.Sleep(time.Millisecond * 200) require.NoError(t, err) defer pTesting.Stop() From e495ae90302007d3fba5a1032b59226104073763 Mon Sep 17 00:00:00 2001 From: Thibault Cohen Date: Tue, 26 Jan 2016 19:12:54 -0500 Subject: [PATCH 017/287] Add tcp/udp check connection input plugin closes #650 --- CHANGELOG.md | 1 + README.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/net_response/README.md | 66 ++++++ plugins/inputs/net_response/net_response.go | 196 +++++++++++++++++ .../inputs/net_response/net_response_test.go | 198 ++++++++++++++++++ 6 files changed, 463 insertions(+) create mode 100644 plugins/inputs/net_response/README.md create mode 100644 plugins/inputs/net_response/net_response.go create mode 100644 plugins/inputs/net_response/net_response_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index f1c802f3a..d9917fc57 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ they would like to output. Currently supports: "influx" and "graphite" - [#679](https://github.com/influxdata/telegraf/pull/679): File/stdout output plugin. - [#679](https://github.com/influxdata/telegraf/pull/679): Support for arbitrary output data formats. - [#695](https://github.com/influxdata/telegraf/pull/695): raindrops input plugin. Thanks @burdandrei! +- [#650](https://github.com/influxdata/telegraf/pull/650): net_response input plugin. Thanks @titilambert! ### Bugfixes - [#443](https://github.com/influxdata/telegraf/issues/443): Fix Ping command timeout parameter on Linux. diff --git a/README.md b/README.md index 3362ad2d6..407107602 100644 --- a/README.md +++ b/README.md @@ -171,6 +171,7 @@ Currently implemented sources: * memcached * mongodb * mysql +* net_response * nginx * nsq * phpfpm diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 639afbe09..9f2122e21 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -24,6 +24,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/mysql" _ "github.com/influxdata/telegraf/plugins/inputs/nats_consumer" + _ "github.com/influxdata/telegraf/plugins/inputs/net_response" _ "github.com/influxdata/telegraf/plugins/inputs/nginx" _ "github.com/influxdata/telegraf/plugins/inputs/nsq" _ "github.com/influxdata/telegraf/plugins/inputs/passenger" diff --git a/plugins/inputs/net_response/README.md b/plugins/inputs/net_response/README.md new file mode 100644 index 000000000..69e72a379 --- /dev/null +++ b/plugins/inputs/net_response/README.md @@ -0,0 +1,66 @@ +# Example Input Plugin + +The input plugin test UDP/TCP connections response time. +It can also check response text. + +### Configuration: + +``` +# List of UDP/TCP connections you want to check +[[inputs.net_response]] + protocol = "tcp" + # Server address (default IP localhost) + address = "github.com:80" + # Set timeout (default 1.0) + timeout = 1.0 + # Set read timeout (default 1.0) + read_timeout = 1.0 + # String sent to the server + send = "ssh" + # Expected string in answer + expect = "ssh" + +[[inputs.net_response]] + protocol = "tcp" + address = ":80" + +[[inputs.net_response]] + protocol = "udp" + # Server address (default IP localhost) + address = "github.com:80" + # Set timeout (default 1.0) + timeout = 1.0 + # Set read timeout (default 1.0) + read_timeout = 1.0 + # String sent to the server + send = "ssh" + # Expected string in answer + expect = "ssh" + +[[inputs.net_response]] + protocol = "udp" + address = "localhost:161" + timeout = 2.0 +``` + +### Measurements & Fields: + +- net_response + - response_time (float, seconds) + - string_found (bool) # Only if "expected: option is set + +### Tags: + +- All measurements have the following tags: + - host + - port + - protocol + +### Example Output: + +``` +$ ./telegraf -config telegraf.conf -input-filter net_response -test +net_response,host=127.0.0.1,port=22,protocol=tcp response_time=0.18070360500000002,string_found=true 1454785464182527094 +net_response,host=127.0.0.1,port=2222,protocol=tcp response_time=1.090124776,string_found=false 1454784433658942325 + +``` diff --git a/plugins/inputs/net_response/net_response.go b/plugins/inputs/net_response/net_response.go new file mode 100644 index 000000000..60468c157 --- /dev/null +++ b/plugins/inputs/net_response/net_response.go @@ -0,0 +1,196 @@ +package net_response + +import ( + "bufio" + "errors" + "net" + "net/textproto" + "regexp" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// NetResponses struct +type NetResponse struct { + Address string + Timeout float64 + ReadTimeout float64 + Send string + Expect string + Protocol string +} + +func (_ *NetResponse) Description() string { + return "TCP or UDP 'ping' given url and collect response time in seconds" +} + +var sampleConfig = ` + ### Protocol, must be "tcp" or "udp" + protocol = "tcp" + ### Server address (default localhost) + address = "github.com:80" + ### Set timeout (default 1.0 seconds) + timeout = 1.0 + ### Set read timeout (default 1.0 seconds) + read_timeout = 1.0 + ### Optional string sent to the server + # send = "ssh" + ### Optional expected string in answer + # expect = "ssh" +` + +func (_ *NetResponse) SampleConfig() string { + return sampleConfig +} + +func (t *NetResponse) TcpGather() (map[string]interface{}, error) { + // Prepare fields + fields := make(map[string]interface{}) + // Start Timer + start := time.Now() + // Resolving + tcpAddr, err := net.ResolveTCPAddr("tcp", t.Address) + // Connecting + conn, err := net.DialTCP("tcp", nil, tcpAddr) + // Stop timer + responseTime := time.Since(start).Seconds() + // Handle error + if err != nil { + return nil, err + } + defer conn.Close() + // Send string if needed + if t.Send != "" { + msg := []byte(t.Send) + conn.Write(msg) + conn.CloseWrite() + // Stop timer + responseTime = time.Since(start).Seconds() + } + // Read string if needed + if t.Expect != "" { + // Set read timeout + conn.SetReadDeadline(time.Now().Add(time.Duration(t.ReadTimeout) * time.Second)) + // Prepare reader + reader := bufio.NewReader(conn) + tp := textproto.NewReader(reader) + // Read + data, err := tp.ReadLine() + // Stop timer + responseTime = time.Since(start).Seconds() + // Handle error + if err != nil { + fields["string_found"] = false + } else { + // Looking for string in answer + RegEx := regexp.MustCompile(`.*` + t.Expect + `.*`) + find := RegEx.FindString(string(data)) + if find != "" { + fields["string_found"] = true + } else { + fields["string_found"] = false + } + } + + } + fields["response_time"] = responseTime + return fields, nil +} + +func (u *NetResponse) UdpGather() (map[string]interface{}, error) { + // Prepare fields + fields := make(map[string]interface{}) + // Start Timer + start := time.Now() + // Resolving + udpAddr, err := net.ResolveUDPAddr("udp", u.Address) + LocalAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0") + // Connecting + conn, err := net.DialUDP("udp", LocalAddr, udpAddr) + defer conn.Close() + // Handle error + if err != nil { + return nil, err + } + // Send string + msg := []byte(u.Send) + conn.Write(msg) + // Read string + // Set read timeout + conn.SetReadDeadline(time.Now().Add(time.Duration(u.ReadTimeout) * time.Second)) + // Read + buf := make([]byte, 1024) + _, _, err = conn.ReadFromUDP(buf) + // Stop timer + responseTime := time.Since(start).Seconds() + // Handle error + if err != nil { + return nil, err + } else { + // Looking for string in answer + RegEx := regexp.MustCompile(`.*` + u.Expect + `.*`) + find := RegEx.FindString(string(buf)) + if find != "" { + fields["string_found"] = true + } else { + fields["string_found"] = false + } + } + fields["response_time"] = responseTime + return fields, nil +} + +func (c *NetResponse) Gather(acc telegraf.Accumulator) error { + // Set default values + if c.Timeout == 0 { + c.Timeout = 1.0 + } + if c.ReadTimeout == 0 { + c.ReadTimeout = 1.0 + } + // Check send and expected string + if c.Protocol == "udp" && c.Send == "" { + return errors.New("Send string cannot be empty") + } + if c.Protocol == "udp" && c.Expect == "" { + return errors.New("Expected string cannot be empty") + } + // Prepare host and port + host, port, err := net.SplitHostPort(c.Address) + if err != nil { + return err + } + if host == "" { + c.Address = "localhost:" + port + } + if port == "" { + return errors.New("Bad port") + } + // Prepare data + tags := map[string]string{"host": host, "port": port} + var fields map[string]interface{} + // Gather data + if c.Protocol == "tcp" { + fields, err = c.TcpGather() + tags["protocol"] = "tcp" + } else if c.Protocol == "udp" { + fields, err = c.UdpGather() + tags["protocol"] = "udp" + } else { + return errors.New("Bad protocol") + } + if err != nil { + return err + } + // Add metrics + acc.AddFields("net_response", fields, tags) + return nil +} + +func init() { + inputs.Add("net_response", func() telegraf.Input { + return &NetResponse{} + }) +} diff --git a/plugins/inputs/net_response/net_response_test.go b/plugins/inputs/net_response/net_response_test.go new file mode 100644 index 000000000..538d059c0 --- /dev/null +++ b/plugins/inputs/net_response/net_response_test.go @@ -0,0 +1,198 @@ +package net_response + +import ( + "net" + "regexp" + "sync" + "testing" + + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBadProtocol(t *testing.T) { + var acc testutil.Accumulator + // Init plugin + c := NetResponse{ + Protocol: "unknownprotocol", + Address: ":9999", + } + // Error + err1 := c.Gather(&acc) + require.Error(t, err1) + assert.Equal(t, "Bad protocol", err1.Error()) +} + +func TestTCPError(t *testing.T) { + var acc testutil.Accumulator + // Init plugin + c := NetResponse{ + Protocol: "tcp", + Address: ":9999", + } + // Error + err1 := c.Gather(&acc) + require.Error(t, err1) + assert.Equal(t, "dial tcp 127.0.0.1:9999: getsockopt: connection refused", err1.Error()) +} + +func TestTCPOK1(t *testing.T) { + var wg sync.WaitGroup + var acc testutil.Accumulator + // Init plugin + c := NetResponse{ + Address: "127.0.0.1:2004", + Send: "test", + Expect: "test", + ReadTimeout: 3.0, + Timeout: 1.0, + Protocol: "tcp", + } + // Start TCP server + wg.Add(1) + go TCPServer(t, &wg) + wg.Wait() + // Connect + wg.Add(1) + err1 := c.Gather(&acc) + wg.Wait() + // Override response time + for _, p := range acc.Metrics { + p.Fields["response_time"] = 1.0 + } + require.NoError(t, err1) + acc.AssertContainsTaggedFields(t, + "net_response", + map[string]interface{}{ + "string_found": true, + "response_time": 1.0, + }, + map[string]string{"host": "127.0.0.1", + "port": "2004", + "protocol": "tcp", + }, + ) + // Waiting TCPserver + wg.Wait() +} + +func TestTCPOK2(t *testing.T) { + var wg sync.WaitGroup + var acc testutil.Accumulator + // Init plugin + c := NetResponse{ + Address: "127.0.0.1:2004", + Send: "test", + Expect: "test2", + ReadTimeout: 3.0, + Timeout: 1.0, + Protocol: "tcp", + } + // Start TCP server + wg.Add(1) + go TCPServer(t, &wg) + wg.Wait() + // Connect + wg.Add(1) + err1 := c.Gather(&acc) + wg.Wait() + // Override response time + for _, p := range acc.Metrics { + p.Fields["response_time"] = 1.0 + } + require.NoError(t, err1) + acc.AssertContainsTaggedFields(t, + "net_response", + map[string]interface{}{ + "string_found": false, + "response_time": 1.0, + }, + map[string]string{"host": "127.0.0.1", + "port": "2004", + "protocol": "tcp", + }, + ) + // Waiting TCPserver + wg.Wait() +} + +func TestUDPrror(t *testing.T) { + var acc testutil.Accumulator + // Init plugin + c := NetResponse{ + Address: ":9999", + Send: "test", + Expect: "test", + Protocol: "udp", + } + // Error + err1 := c.Gather(&acc) + require.Error(t, err1) + assert.Regexp(t, regexp.MustCompile(`read udp 127.0.0.1:[0-9]*->127.0.0.1:9999: recvfrom: connection refused`), err1.Error()) +} + +func TestUDPOK1(t *testing.T) { + var wg sync.WaitGroup + var acc testutil.Accumulator + // Init plugin + c := NetResponse{ + Address: "127.0.0.1:2004", + Send: "test", + Expect: "test", + ReadTimeout: 3.0, + Timeout: 1.0, + Protocol: "udp", + } + // Start UDP server + wg.Add(1) + go UDPServer(t, &wg) + wg.Wait() + // Connect + wg.Add(1) + err1 := c.Gather(&acc) + wg.Wait() + // Override response time + for _, p := range acc.Metrics { + p.Fields["response_time"] = 1.0 + } + require.NoError(t, err1) + acc.AssertContainsTaggedFields(t, + "net_response", + map[string]interface{}{ + "string_found": true, + "response_time": 1.0, + }, + map[string]string{"host": "127.0.0.1", + "port": "2004", + "protocol": "udp", + }, + ) + // Waiting TCPserver + wg.Wait() +} + +func UDPServer(t *testing.T, wg *sync.WaitGroup) { + udpAddr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:2004") + conn, _ := net.ListenUDP("udp", udpAddr) + wg.Done() + buf := make([]byte, 1024) + _, remoteaddr, _ := conn.ReadFromUDP(buf) + conn.WriteToUDP(buf, remoteaddr) + conn.Close() + wg.Done() +} + +func TCPServer(t *testing.T, wg *sync.WaitGroup) { + tcpAddr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2004") + tcpServer, _ := net.ListenTCP("tcp", tcpAddr) + wg.Done() + conn, _ := tcpServer.AcceptTCP() + buf := make([]byte, 1024) + conn.Read(buf) + conn.Write(buf) + conn.CloseWrite() + tcpServer.Close() + wg.Done() +} From 7f539c951a1e31d720b775c2328573c262e4d9fc Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 15 Feb 2016 16:08:45 -0700 Subject: [PATCH 018/287] changelog update --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d9917fc57..272073dc4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,8 +7,10 @@ format that they would like to parse. Currently supports: "json", "influx", and "graphite" - Users of message broker and file output plugins can now choose what data format they would like to output. Currently supports: "influx" and "graphite" -- More info on parsing arbitrary data formats can be found +- More info on parsing _incoming_ data formats can be found [here](https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md) +- More info on serializing _outgoing_ data formats can be found +[here](https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md) ### Features - [#652](https://github.com/influxdata/telegraf/pull/652): CouchDB Input Plugin. Thanks @codehate! From ee468be6962165b0a9ec0b672dc0b1eb95fd2ffb Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 15 Feb 2016 17:21:38 -0700 Subject: [PATCH 019/287] Flush based on buffer size rather than time this includes: - Add Accumulator to the Start() function of service inputs - For message consumer plugins, use the Accumulator to constantly add metrics and make Gather a dummy function - rework unit tests to match this new behavior. - make "flush_buffer_when_full" a config option that defaults to true closes #666 --- agent/agent.go | 20 +-- etc/telegraf.conf | 34 ++++-- input.go | 2 +- internal/config/config.go | 12 +- internal/models/running_output.go | 115 +++++++++++++----- .../inputs/github_webhooks/github_webhooks.go | 2 +- .../inputs/kafka_consumer/kafka_consumer.go | 43 ++----- .../kafka_consumer_integration_test.go | 19 +-- .../kafka_consumer/kafka_consumer_test.go | 58 ++++----- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 55 +++------ .../mqtt_consumer/mqtt_consumer_test.go | 68 ++++------- plugins/inputs/nats_consumer/nats_consumer.go | 38 ++---- .../nats_consumer/nats_consumer_test.go | 74 ++++------- plugins/inputs/statsd/statsd.go | 2 +- testutil/accumulator.go | 14 +++ 15 files changed, 271 insertions(+), 285 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index bd52e7875..5a70097fc 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -58,7 +58,8 @@ func (a *Agent) Connect() error { } err := o.Output.Connect() if err != nil { - log.Printf("Failed to connect to output %s, retrying in 15s, error was '%s' \n", o.Name, err) + log.Printf("Failed to connect to output %s, retrying in 15s, "+ + "error was '%s' \n", o.Name, err) time.Sleep(15 * time.Second) err = o.Output.Connect() if err != nil { @@ -241,7 +242,7 @@ func (a *Agent) Test() error { return nil } -// flush writes a list of points to all configured outputs +// flush writes a list of metrics to all configured outputs func (a *Agent) flush() { var wg sync.WaitGroup @@ -260,7 +261,7 @@ func (a *Agent) flush() { wg.Wait() } -// flusher monitors the points input channel and flushes on the minimum interval +// flusher monitors the metrics input channel and flushes on the minimum interval func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) error { // Inelegant, but this sleep is to allow the Gather threads to run, so that // the flusher will flush after metrics are collected. @@ -271,14 +272,14 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er for { select { case <-shutdown: - log.Println("Hang on, flushing any cached points before shutdown") + log.Println("Hang on, flushing any cached metrics before shutdown") a.flush() return nil case <-ticker.C: a.flush() case m := <-metricC: for _, o := range a.Config.Outputs { - o.AddPoint(m) + o.AddMetric(m) } } } @@ -318,8 +319,8 @@ func (a *Agent) Run(shutdown chan struct{}) error { a.Config.Agent.Interval.Duration, a.Config.Agent.Debug, a.Config.Agent.Quiet, a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration) - // channel shared between all input threads for accumulating points - metricC := make(chan telegraf.Metric, 1000) + // channel shared between all input threads for accumulating metrics + metricC := make(chan telegraf.Metric, 10000) // Round collection to nearest interval by sleeping if a.Config.Agent.RoundInterval { @@ -342,7 +343,10 @@ func (a *Agent) Run(shutdown chan struct{}) error { // Start service of any ServicePlugins switch p := input.Input.(type) { case telegraf.ServiceInput: - if err := p.Start(); err != nil { + acc := NewAccumulator(input.Config, metricC) + acc.SetDebug(a.Config.Agent.Debug) + acc.setDefaultTags(a.Config.Tags) + if err := p.Start(acc); err != nil { log.Printf("Service for input %s failed to start, exiting\n%s\n", input.Name, err.Error()) return err diff --git a/etc/telegraf.conf b/etc/telegraf.conf index b62e50263..5095f3bdf 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -16,23 +16,37 @@ # Configuration for telegraf agent [agent] - # Default data collection interval for all plugins + ### Default data collection interval for all inputs interval = "10s" - # Rounds collection interval to 'interval' - # ie, if interval="10s" then always collect on :00, :10, :20, etc. + ### Rounds collection interval to 'interval' + ### ie, if interval="10s" then always collect on :00, :10, :20, etc. round_interval = true - # Default data flushing interval for all outputs. You should not set this below - # interval. Maximum flush_interval will be flush_interval + flush_jitter + ### Telegraf will cache metric_buffer_limit metrics for each output, and will + ### flush this buffer on a successful write. + metric_buffer_limit = 10000 + ### Flush the buffer whenever full, regardless of flush_interval. + flush_buffer_when_full = true + + ### Collection jitter is used to jitter the collection by a random amount. + ### Each plugin will sleep for a random time within jitter before collecting. + ### This can be used to avoid many plugins querying things like sysfs at the + ### same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ### Default flushing interval for all outputs. You shouldn't set this below + ### interval. Maximum flush_interval will be flush_interval + flush_jitter flush_interval = "10s" - # Jitter the flush interval by a random amount. This is primarily to avoid - # large write spikes for users running a large number of telegraf instances. - # ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + ### Jitter the flush interval by a random amount. This is primarily to avoid + ### large write spikes for users running a large number of telegraf instances. + ### ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s flush_jitter = "0s" - # Run telegraf in debug mode + ### Run telegraf in debug mode debug = false - # Override default hostname, if empty use os.Hostname() + ### Run telegraf in quiet mode + quiet = false + ### Override default hostname, if empty use os.Hostname() hostname = "" diff --git a/input.go b/input.go index 6992c1b43..f7e1493e2 100644 --- a/input.go +++ b/input.go @@ -24,7 +24,7 @@ type ServiceInput interface { Gather(Accumulator) error // Start starts the ServiceInput's service, whatever that may be - Start() error + Start(Accumulator) error // Stop stops the services and closes any necessary channels and connections Stop() diff --git a/internal/config/config.go b/internal/config/config.go index ffd4f632a..82246f2a4 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -68,7 +68,7 @@ type AgentConfig struct { // same time, which can have a measurable effect on the system. CollectionJitter internal.Duration - // Interval at which to flush data + // FlushInterval is the Interval at which to flush data FlushInterval internal.Duration // FlushJitter Jitters the flush interval by a random amount. @@ -82,6 +82,11 @@ type AgentConfig struct { // full, the oldest metrics will be overwritten. MetricBufferLimit int + // FlushBufferWhenFull tells Telegraf to flush the metric buffer whenever + // it fills up, regardless of FlushInterval. Setting this option to true + // does _not_ deactivate FlushInterval. + FlushBufferWhenFull bool + // TODO(cam): Remove UTC and Precision parameters, they are no longer // valid for the agent config. Leaving them here for now for backwards- // compatability @@ -157,6 +162,8 @@ var header = `################################################################## ### Telegraf will cache metric_buffer_limit metrics for each output, and will ### flush this buffer on a successful write. metric_buffer_limit = 10000 + ### Flush the buffer whenever full, regardless of flush_interval. + flush_buffer_when_full = true ### Collection jitter is used to jitter the collection by a random amount. ### Each plugin will sleep for a random time within jitter before collecting. @@ -421,8 +428,9 @@ func (c *Config) addOutput(name string, table *ast.Table) error { ro := internal_models.NewRunningOutput(name, output, outputConfig) if c.Agent.MetricBufferLimit > 0 { - ro.PointBufferLimit = c.Agent.MetricBufferLimit + ro.MetricBufferLimit = c.Agent.MetricBufferLimit } + ro.FlushBufferWhenFull = c.Agent.FlushBufferWhenFull ro.Quiet = c.Agent.Quiet c.Outputs = append(c.Outputs, ro) return nil diff --git a/internal/models/running_output.go b/internal/models/running_output.go index 49a01f8ee..1b27f66de 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -2,22 +2,34 @@ package internal_models import ( "log" + "sync" "time" "github.com/influxdata/telegraf" ) -const DEFAULT_POINT_BUFFER_LIMIT = 10000 +const ( + // Default number of metrics kept between flushes. + DEFAULT_METRIC_BUFFER_LIMIT = 10000 + + // Limit how many full metric buffers are kept due to failed writes. + FULL_METRIC_BUFFERS_LIMIT = 100 +) type RunningOutput struct { - Name string - Output telegraf.Output - Config *OutputConfig - Quiet bool - PointBufferLimit int + Name string + Output telegraf.Output + Config *OutputConfig + Quiet bool + MetricBufferLimit int + FlushBufferWhenFull bool - metrics []telegraf.Metric - overwriteCounter int + metrics []telegraf.Metric + tmpmetrics map[int][]telegraf.Metric + overwriteI int + mapI int + + sync.Mutex } func NewRunningOutput( @@ -26,47 +38,94 @@ func NewRunningOutput( conf *OutputConfig, ) *RunningOutput { ro := &RunningOutput{ - Name: name, - metrics: make([]telegraf.Metric, 0), - Output: output, - Config: conf, - PointBufferLimit: DEFAULT_POINT_BUFFER_LIMIT, + Name: name, + metrics: make([]telegraf.Metric, 0), + tmpmetrics: make(map[int][]telegraf.Metric), + Output: output, + Config: conf, + MetricBufferLimit: DEFAULT_METRIC_BUFFER_LIMIT, } return ro } -func (ro *RunningOutput) AddPoint(point telegraf.Metric) { +// AddMetric adds a metric to the output. This function can also write cached +// points if FlushBufferWhenFull is true. +func (ro *RunningOutput) AddMetric(metric telegraf.Metric) { if ro.Config.Filter.IsActive { - if !ro.Config.Filter.ShouldMetricPass(point) { + if !ro.Config.Filter.ShouldMetricPass(metric) { return } } + ro.Lock() + defer ro.Unlock() - if len(ro.metrics) < ro.PointBufferLimit { - ro.metrics = append(ro.metrics, point) + if len(ro.metrics) < ro.MetricBufferLimit { + ro.metrics = append(ro.metrics, metric) } else { - log.Printf("WARNING: overwriting cached metrics, you may want to " + - "increase the metric_buffer_limit setting in your [agent] config " + - "if you do not wish to overwrite metrics.\n") - if ro.overwriteCounter == len(ro.metrics) { - ro.overwriteCounter = 0 + if ro.FlushBufferWhenFull { + tmpmetrics := make([]telegraf.Metric, len(ro.metrics)) + copy(tmpmetrics, ro.metrics) + ro.metrics = make([]telegraf.Metric, 0) + err := ro.write(tmpmetrics) + if err != nil { + log.Printf("ERROR writing full metric buffer to output %s, %s", + ro.Name, err) + if len(ro.tmpmetrics) == FULL_METRIC_BUFFERS_LIMIT { + ro.mapI = 0 + // overwrite one + ro.tmpmetrics[ro.mapI] = tmpmetrics + ro.mapI++ + } else { + ro.tmpmetrics[ro.mapI] = tmpmetrics + ro.mapI++ + } + } + } else { + log.Printf("WARNING: overwriting cached metrics, you may want to " + + "increase the metric_buffer_limit setting in your [agent] " + + "config if you do not wish to overwrite metrics.\n") + if ro.overwriteI == len(ro.metrics) { + ro.overwriteI = 0 + } + ro.metrics[ro.overwriteI] = metric + ro.overwriteI++ } - ro.metrics[ro.overwriteCounter] = point - ro.overwriteCounter++ } } +// Write writes all cached points to this output. func (ro *RunningOutput) Write() error { + ro.Lock() + defer ro.Unlock() + err := ro.write(ro.metrics) + if err != nil { + return err + } else { + ro.metrics = make([]telegraf.Metric, 0) + ro.overwriteI = 0 + } + + // Write any cached metric buffers that failed previously + for i, tmpmetrics := range ro.tmpmetrics { + if err := ro.write(tmpmetrics); err != nil { + return err + } else { + delete(ro.tmpmetrics, i) + } + } + + return nil +} + +func (ro *RunningOutput) write(metrics []telegraf.Metric) error { start := time.Now() - err := ro.Output.Write(ro.metrics) + err := ro.Output.Write(metrics) elapsed := time.Since(start) if err == nil { if !ro.Quiet { log.Printf("Wrote %d metrics to output %s in %s\n", - len(ro.metrics), ro.Name, elapsed) + len(metrics), ro.Name, elapsed) } - ro.metrics = make([]telegraf.Metric, 0) - ro.overwriteCounter = 0 } return err } diff --git a/plugins/inputs/github_webhooks/github_webhooks.go b/plugins/inputs/github_webhooks/github_webhooks.go index a66563add..6dc97f5a3 100644 --- a/plugins/inputs/github_webhooks/github_webhooks.go +++ b/plugins/inputs/github_webhooks/github_webhooks.go @@ -61,7 +61,7 @@ func (gh *GithubWebhooks) Listen() { } } -func (gh *GithubWebhooks) Start() error { +func (gh *GithubWebhooks) Start(_ telegraf.Accumulator) error { go gh.Listen() log.Printf("Started the github_webhooks service on %s\n", gh.ServiceAddress) return nil diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 9fa47dee9..66fce3fcf 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -1,7 +1,6 @@ package kafka_consumer import ( - "fmt" "log" "strings" "sync" @@ -19,11 +18,13 @@ type Kafka struct { Topics []string ZookeeperPeers []string Consumer *consumergroup.ConsumerGroup - MetricBuffer int + + // Legacy metric buffer support + MetricBuffer int // TODO remove PointBuffer, legacy support PointBuffer int - Offset string + Offset string parser parsers.Parser sync.Mutex @@ -32,9 +33,10 @@ type Kafka struct { in <-chan *sarama.ConsumerMessage // channel for all kafka consumer errors errs <-chan *sarama.ConsumerError - // channel for all incoming parsed kafka metrics - metricC chan telegraf.Metric - done chan struct{} + done chan struct{} + + // keep the accumulator internally: + acc telegraf.Accumulator // doNotCommitMsgs tells the parser not to call CommitUpTo on the consumer // this is mostly for test purposes, but there may be a use-case for it later. @@ -48,8 +50,6 @@ var sampleConfig = ` zookeeper_peers = ["localhost:2181"] ### the name of the consumer group consumer_group = "telegraf_metrics_consumers" - ### Maximum number of metrics to buffer between collection intervals - metric_buffer = 100000 ### Offset (must be either "oldest" or "newest") offset = "oldest" @@ -72,11 +72,13 @@ func (k *Kafka) SetParser(parser parsers.Parser) { k.parser = parser } -func (k *Kafka) Start() error { +func (k *Kafka) Start(acc telegraf.Accumulator) error { k.Lock() defer k.Unlock() var consumerErr error + k.acc = acc + config := consumergroup.NewConfig() switch strings.ToLower(k.Offset) { case "oldest", "": @@ -106,13 +108,6 @@ func (k *Kafka) Start() error { } k.done = make(chan struct{}) - if k.PointBuffer == 0 && k.MetricBuffer == 0 { - k.MetricBuffer = 100000 - } else if k.PointBuffer > 0 { - // Legacy support of PointBuffer field TODO remove - k.MetricBuffer = k.PointBuffer - } - k.metricC = make(chan telegraf.Metric, k.MetricBuffer) // Start the kafka message reader go k.receiver() @@ -138,14 +133,7 @@ func (k *Kafka) receiver() { } for _, metric := range metrics { - fmt.Println(string(metric.Name())) - select { - case k.metricC <- metric: - continue - default: - log.Printf("Kafka Consumer buffer is full, dropping a metric." + - " You may want to increase the metric_buffer setting") - } + k.acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time()) } if !k.doNotCommitMsgs { @@ -169,13 +157,6 @@ func (k *Kafka) Stop() { } func (k *Kafka) Gather(acc telegraf.Accumulator) error { - k.Lock() - defer k.Unlock() - nmetrics := len(k.metricC) - for i := 0; i < nmetrics; i++ { - metric := <-k.metricC - acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time()) - } return nil } diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go index 458d43d35..e823f49a5 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go @@ -44,18 +44,19 @@ func TestReadsMetricsFromKafka(t *testing.T) { } p, _ := parsers.NewInfluxParser() k.SetParser(p) - if err := k.Start(); err != nil { + + // Verify that we can now gather the sent message + var acc testutil.Accumulator + + // Sanity check + assert.Equal(t, 0, len(acc.Metrics), "There should not be any points") + if err := k.Start(&acc); err != nil { t.Fatal(err.Error()) } else { defer k.Stop() } - waitForPoint(k, t) - - // Verify that we can now gather the sent message - var acc testutil.Accumulator - // Sanity check - assert.Equal(t, 0, len(acc.Metrics), "There should not be any points") + waitForPoint(&acc, t) // Gather points err = k.Gather(&acc) @@ -77,7 +78,7 @@ func TestReadsMetricsFromKafka(t *testing.T) { // Waits for the metric that was sent to the kafka broker to arrive at the kafka // consumer -func waitForPoint(k *Kafka, t *testing.T) { +func waitForPoint(acc *testutil.Accumulator, t *testing.T) { // Give the kafka container up to 2 seconds to get the point to the consumer ticker := time.NewTicker(5 * time.Millisecond) counter := 0 @@ -87,7 +88,7 @@ func waitForPoint(k *Kafka, t *testing.T) { counter++ if counter > 1000 { t.Fatal("Waited for 5s, point never arrived to consumer") - } else if len(k.metricC) == 1 { + } else if acc.NFields() == 1 { return } } diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index ec69cb926..e631f6708 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -4,7 +4,6 @@ import ( "testing" "time" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" @@ -17,29 +16,28 @@ const ( testMsgGraphite = "cpu.load.short.graphite 23422 1454780029" testMsgJSON = "{\"a\": 5, \"b\": {\"c\": 6}}\n" invalidMsg = "cpu_load_short,host=server01 1422568543702900257" - pointBuffer = 5 ) -func NewTestKafka() (*Kafka, chan *sarama.ConsumerMessage) { - in := make(chan *sarama.ConsumerMessage, pointBuffer) +func newTestKafka() (*Kafka, chan *sarama.ConsumerMessage) { + in := make(chan *sarama.ConsumerMessage, 1000) k := Kafka{ ConsumerGroup: "test", Topics: []string{"telegraf"}, ZookeeperPeers: []string{"localhost:2181"}, - PointBuffer: pointBuffer, Offset: "oldest", in: in, doNotCommitMsgs: true, - errs: make(chan *sarama.ConsumerError, pointBuffer), + errs: make(chan *sarama.ConsumerError, 1000), done: make(chan struct{}), - metricC: make(chan telegraf.Metric, pointBuffer), } return &k, in } // Test that the parser parses kafka messages into points func TestRunParser(t *testing.T) { - k, in := NewTestKafka() + k, in := newTestKafka() + acc := testutil.Accumulator{} + k.acc = &acc defer close(k.done) k.parser, _ = parsers.NewInfluxParser() @@ -47,12 +45,14 @@ func TestRunParser(t *testing.T) { in <- saramaMsg(testMsg) time.Sleep(time.Millisecond) - assert.Equal(t, len(k.metricC), 1) + assert.Equal(t, acc.NFields(), 1) } // Test that the parser ignores invalid messages func TestRunParserInvalidMsg(t *testing.T) { - k, in := NewTestKafka() + k, in := newTestKafka() + acc := testutil.Accumulator{} + k.acc = &acc defer close(k.done) k.parser, _ = parsers.NewInfluxParser() @@ -60,27 +60,14 @@ func TestRunParserInvalidMsg(t *testing.T) { in <- saramaMsg(invalidMsg) time.Sleep(time.Millisecond) - assert.Equal(t, len(k.metricC), 0) -} - -// Test that points are dropped when we hit the buffer limit -func TestRunParserRespectsBuffer(t *testing.T) { - k, in := NewTestKafka() - defer close(k.done) - - k.parser, _ = parsers.NewInfluxParser() - go k.receiver() - for i := 0; i < pointBuffer+1; i++ { - in <- saramaMsg(testMsg) - } - time.Sleep(time.Millisecond) - - assert.Equal(t, len(k.metricC), 5) + assert.Equal(t, acc.NFields(), 0) } // Test that the parser parses kafka messages into points func TestRunParserAndGather(t *testing.T) { - k, in := NewTestKafka() + k, in := newTestKafka() + acc := testutil.Accumulator{} + k.acc = &acc defer close(k.done) k.parser, _ = parsers.NewInfluxParser() @@ -88,17 +75,18 @@ func TestRunParserAndGather(t *testing.T) { in <- saramaMsg(testMsg) time.Sleep(time.Millisecond) - acc := testutil.Accumulator{} k.Gather(&acc) - assert.Equal(t, len(acc.Metrics), 1) + assert.Equal(t, acc.NFields(), 1) acc.AssertContainsFields(t, "cpu_load_short", map[string]interface{}{"value": float64(23422)}) } // Test that the parser parses kafka messages into points func TestRunParserAndGatherGraphite(t *testing.T) { - k, in := NewTestKafka() + k, in := newTestKafka() + acc := testutil.Accumulator{} + k.acc = &acc defer close(k.done) k.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) @@ -106,17 +94,18 @@ func TestRunParserAndGatherGraphite(t *testing.T) { in <- saramaMsg(testMsgGraphite) time.Sleep(time.Millisecond) - acc := testutil.Accumulator{} k.Gather(&acc) - assert.Equal(t, len(acc.Metrics), 1) + assert.Equal(t, acc.NFields(), 1) acc.AssertContainsFields(t, "cpu_load_short_graphite", map[string]interface{}{"value": float64(23422)}) } // Test that the parser parses kafka messages into points func TestRunParserAndGatherJSON(t *testing.T) { - k, in := NewTestKafka() + k, in := newTestKafka() + acc := testutil.Accumulator{} + k.acc = &acc defer close(k.done) k.parser, _ = parsers.NewJSONParser("kafka_json_test", []string{}, nil) @@ -124,10 +113,9 @@ func TestRunParserAndGatherJSON(t *testing.T) { in <- saramaMsg(testMsgJSON) time.Sleep(time.Millisecond) - acc := testutil.Accumulator{} k.Gather(&acc) - assert.Equal(t, len(acc.Metrics), 1) + assert.Equal(t, acc.NFields(), 2) acc.AssertContainsFields(t, "kafka_json_test", map[string]interface{}{ "a": float64(5), diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 8ca0d44b1..ac4b738d7 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -15,15 +15,17 @@ import ( ) type MQTTConsumer struct { - Servers []string - Topics []string - Username string - Password string - MetricBuffer int - QoS int `toml:"qos"` + Servers []string + Topics []string + Username string + Password string + QoS int `toml:"qos"` parser parsers.Parser + // Legacy metric buffer support + MetricBuffer int + // Path to CA file SSLCA string `toml:"ssl_ca"` // Path to host cert file @@ -35,13 +37,12 @@ type MQTTConsumer struct { sync.Mutex client *mqtt.Client - // channel for all incoming parsed mqtt metrics - metricC chan telegraf.Metric - // channel for the topics of all incoming metrics (for tagging metrics) - topicC chan string // channel of all incoming raw mqtt messages in chan mqtt.Message done chan struct{} + + // keep the accumulator internally: + acc telegraf.Accumulator } var sampleConfig = ` @@ -56,9 +57,6 @@ var sampleConfig = ` "sensors/#", ] - ### Maximum number of metrics to buffer between collection intervals - metric_buffer = 100000 - ### username and password to connect MQTT server. # username = "telegraf" # password = "metricsmetricsmetricsmetrics" @@ -89,9 +87,11 @@ func (m *MQTTConsumer) SetParser(parser parsers.Parser) { m.parser = parser } -func (m *MQTTConsumer) Start() error { +func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { m.Lock() defer m.Unlock() + + m.acc = acc if m.QoS > 2 || m.QoS < 0 { return fmt.Errorf("MQTT Consumer, invalid QoS value: %d", m.QoS) } @@ -106,13 +106,8 @@ func (m *MQTTConsumer) Start() error { return token.Error() } - m.in = make(chan mqtt.Message, m.MetricBuffer) + m.in = make(chan mqtt.Message, 1000) m.done = make(chan struct{}) - if m.MetricBuffer == 0 { - m.MetricBuffer = 100000 - } - m.metricC = make(chan telegraf.Metric, m.MetricBuffer) - m.topicC = make(chan string, m.MetricBuffer) topics := make(map[string]byte) for _, topic := range m.Topics { @@ -145,13 +140,9 @@ func (m *MQTTConsumer) receiver() { } for _, metric := range metrics { - select { - case m.metricC <- metric: - m.topicC <- topic - default: - log.Printf("MQTT Consumer buffer is full, dropping a metric." + - " You may want to increase the metric_buffer setting") - } + tags := metric.Tags() + tags["topic"] = topic + m.acc.AddFields(metric.Name(), metric.Fields(), tags, metric.Time()) } } } @@ -169,16 +160,6 @@ func (m *MQTTConsumer) Stop() { } func (m *MQTTConsumer) Gather(acc telegraf.Accumulator) error { - m.Lock() - defer m.Unlock() - nmetrics := len(m.metricC) - for i := 0; i < nmetrics; i++ { - metric := <-m.metricC - topic := <-m.topicC - tags := metric.Tags() - tags["topic"] = topic - acc.AddFields(metric.Name(), metric.Fields(), tags, metric.Time()) - } return nil } diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index be216dfbb..b1dd59bcf 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -4,7 +4,6 @@ import ( "testing" "time" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" @@ -16,19 +15,15 @@ const ( testMsgGraphite = "cpu.load.short.graphite 23422 1454780029" testMsgJSON = "{\"a\": 5, \"b\": {\"c\": 6}}\n" invalidMsg = "cpu_load_short,host=server01 1422568543702900257" - metricBuffer = 5 ) func newTestMQTTConsumer() (*MQTTConsumer, chan mqtt.Message) { - in := make(chan mqtt.Message, metricBuffer) + in := make(chan mqtt.Message, 100) n := &MQTTConsumer{ - Topics: []string{"telegraf"}, - Servers: []string{"localhost:1883"}, - MetricBuffer: metricBuffer, - in: in, - done: make(chan struct{}), - metricC: make(chan telegraf.Metric, metricBuffer), - topicC: make(chan string, metricBuffer), + Topics: []string{"telegraf"}, + Servers: []string{"localhost:1883"}, + in: in, + done: make(chan struct{}), } return n, in } @@ -36,14 +31,16 @@ func newTestMQTTConsumer() (*MQTTConsumer, chan mqtt.Message) { // Test that the parser parses NATS messages into metrics func TestRunParser(t *testing.T) { n, in := newTestMQTTConsumer() + acc := testutil.Accumulator{} + n.acc = &acc defer close(n.done) n.parser, _ = parsers.NewInfluxParser() go n.receiver() in <- mqttMsg(testMsg) - time.Sleep(time.Millisecond) + time.Sleep(time.Millisecond * 25) - if a := len(n.metricC); a != 1 { + if a := acc.NFields(); a != 1 { t.Errorf("got %v, expected %v", a, 1) } } @@ -51,51 +48,34 @@ func TestRunParser(t *testing.T) { // Test that the parser ignores invalid messages func TestRunParserInvalidMsg(t *testing.T) { n, in := newTestMQTTConsumer() + acc := testutil.Accumulator{} + n.acc = &acc defer close(n.done) n.parser, _ = parsers.NewInfluxParser() go n.receiver() in <- mqttMsg(invalidMsg) - time.Sleep(time.Millisecond) + time.Sleep(time.Millisecond * 25) - if a := len(n.metricC); a != 0 { + if a := acc.NFields(); a != 0 { t.Errorf("got %v, expected %v", a, 0) } } -// Test that metrics are dropped when we hit the buffer limit -func TestRunParserRespectsBuffer(t *testing.T) { - n, in := newTestMQTTConsumer() - defer close(n.done) - - n.parser, _ = parsers.NewInfluxParser() - go n.receiver() - for i := 0; i < metricBuffer+1; i++ { - in <- mqttMsg(testMsg) - } - time.Sleep(time.Millisecond) - - if a := len(n.metricC); a != metricBuffer { - t.Errorf("got %v, expected %v", a, metricBuffer) - } -} - // Test that the parser parses line format messages into metrics func TestRunParserAndGather(t *testing.T) { n, in := newTestMQTTConsumer() + acc := testutil.Accumulator{} + n.acc = &acc defer close(n.done) n.parser, _ = parsers.NewInfluxParser() go n.receiver() in <- mqttMsg(testMsg) - time.Sleep(time.Millisecond) + time.Sleep(time.Millisecond * 25) - acc := testutil.Accumulator{} n.Gather(&acc) - if a := len(acc.Metrics); a != 1 { - t.Errorf("got %v, expected %v", a, 1) - } acc.AssertContainsFields(t, "cpu_load_short", map[string]interface{}{"value": float64(23422)}) } @@ -103,19 +83,17 @@ func TestRunParserAndGather(t *testing.T) { // Test that the parser parses graphite format messages into metrics func TestRunParserAndGatherGraphite(t *testing.T) { n, in := newTestMQTTConsumer() + acc := testutil.Accumulator{} + n.acc = &acc defer close(n.done) n.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) go n.receiver() in <- mqttMsg(testMsgGraphite) - time.Sleep(time.Millisecond) + time.Sleep(time.Millisecond * 25) - acc := testutil.Accumulator{} n.Gather(&acc) - if a := len(acc.Metrics); a != 1 { - t.Errorf("got %v, expected %v", a, 1) - } acc.AssertContainsFields(t, "cpu_load_short_graphite", map[string]interface{}{"value": float64(23422)}) } @@ -123,19 +101,17 @@ func TestRunParserAndGatherGraphite(t *testing.T) { // Test that the parser parses json format messages into metrics func TestRunParserAndGatherJSON(t *testing.T) { n, in := newTestMQTTConsumer() + acc := testutil.Accumulator{} + n.acc = &acc defer close(n.done) n.parser, _ = parsers.NewJSONParser("nats_json_test", []string{}, nil) go n.receiver() in <- mqttMsg(testMsgJSON) - time.Sleep(time.Millisecond) + time.Sleep(time.Millisecond * 25) - acc := testutil.Accumulator{} n.Gather(&acc) - if a := len(acc.Metrics); a != 1 { - t.Errorf("got %v, expected %v", a, 1) - } acc.AssertContainsFields(t, "nats_json_test", map[string]interface{}{ "a": float64(5), diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index 56d56990f..7dad47b46 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -28,8 +28,10 @@ type natsConsumer struct { Servers []string Secure bool + // Legacy metric buffer support MetricBuffer int - parser parsers.Parser + + parser parsers.Parser sync.Mutex Conn *nats.Conn @@ -39,9 +41,8 @@ type natsConsumer struct { in chan *nats.Msg // channel for all NATS read errors errs chan error - // channel for all incoming parsed metrics - metricC chan telegraf.Metric - done chan struct{} + done chan struct{} + acc telegraf.Accumulator } var sampleConfig = ` @@ -53,9 +54,7 @@ var sampleConfig = ` subjects = ["telegraf"] ### name a queue group queue_group = "telegraf_consumers" - ### Maximum number of metrics to buffer between collection intervals - metric_buffer = 100000 - + ### Data format to consume. This can be "json", "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read ### more about them here: @@ -84,10 +83,12 @@ func (n *natsConsumer) natsErrHandler(c *nats.Conn, s *nats.Subscription, e erro } // Start the nats consumer. Caller must call *natsConsumer.Stop() to clean up. -func (n *natsConsumer) Start() error { +func (n *natsConsumer) Start(acc telegraf.Accumulator) error { n.Lock() defer n.Unlock() + n.acc = acc + var connectErr error opts := nats.DefaultOptions @@ -115,11 +116,6 @@ func (n *natsConsumer) Start() error { } n.done = make(chan struct{}) - if n.MetricBuffer == 0 { - n.MetricBuffer = 100000 - } - - n.metricC = make(chan telegraf.Metric, n.MetricBuffer) // Start the message reader go n.receiver() @@ -146,13 +142,7 @@ func (n *natsConsumer) receiver() { } for _, metric := range metrics { - select { - case n.metricC <- metric: - continue - default: - log.Printf("NATS Consumer buffer is full, dropping a metric." + - " You may want to increase the metric_buffer setting") - } + n.acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time()) } } @@ -163,7 +153,6 @@ func (n *natsConsumer) clean() { n.Lock() defer n.Unlock() close(n.in) - close(n.metricC) close(n.errs) for _, sub := range n.Subs { @@ -185,13 +174,6 @@ func (n *natsConsumer) Stop() { } func (n *natsConsumer) Gather(acc telegraf.Accumulator) error { - n.Lock() - defer n.Unlock() - nmetrics := len(n.metricC) - for i := 0; i < nmetrics; i++ { - metric := <-n.metricC - acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time()) - } return nil } diff --git a/plugins/inputs/nats_consumer/nats_consumer_test.go b/plugins/inputs/nats_consumer/nats_consumer_test.go index 214695d91..75fde66a6 100644 --- a/plugins/inputs/nats_consumer/nats_consumer_test.go +++ b/plugins/inputs/nats_consumer/nats_consumer_test.go @@ -4,7 +4,6 @@ import ( "testing" "time" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" "github.com/nats-io/nats" @@ -21,15 +20,13 @@ const ( func newTestNatsConsumer() (*natsConsumer, chan *nats.Msg) { in := make(chan *nats.Msg, metricBuffer) n := &natsConsumer{ - QueueGroup: "test", - Subjects: []string{"telegraf"}, - Servers: []string{"nats://localhost:4222"}, - Secure: false, - MetricBuffer: metricBuffer, - in: in, - errs: make(chan error, metricBuffer), - done: make(chan struct{}), - metricC: make(chan telegraf.Metric, metricBuffer), + QueueGroup: "test", + Subjects: []string{"telegraf"}, + Servers: []string{"nats://localhost:4222"}, + Secure: false, + in: in, + errs: make(chan error, metricBuffer), + done: make(chan struct{}), } return n, in } @@ -37,66 +34,51 @@ func newTestNatsConsumer() (*natsConsumer, chan *nats.Msg) { // Test that the parser parses NATS messages into metrics func TestRunParser(t *testing.T) { n, in := newTestNatsConsumer() + acc := testutil.Accumulator{} + n.acc = &acc defer close(n.done) n.parser, _ = parsers.NewInfluxParser() go n.receiver() in <- natsMsg(testMsg) - time.Sleep(time.Millisecond) + time.Sleep(time.Millisecond * 25) - if a := len(n.metricC); a != 1 { - t.Errorf("got %v, expected %v", a, 1) + if acc.NFields() != 1 { + t.Errorf("got %v, expected %v", acc.NFields(), 1) } } // Test that the parser ignores invalid messages func TestRunParserInvalidMsg(t *testing.T) { n, in := newTestNatsConsumer() + acc := testutil.Accumulator{} + n.acc = &acc defer close(n.done) n.parser, _ = parsers.NewInfluxParser() go n.receiver() in <- natsMsg(invalidMsg) - time.Sleep(time.Millisecond) + time.Sleep(time.Millisecond * 25) - if a := len(n.metricC); a != 0 { - t.Errorf("got %v, expected %v", a, 0) - } -} - -// Test that metrics are dropped when we hit the buffer limit -func TestRunParserRespectsBuffer(t *testing.T) { - n, in := newTestNatsConsumer() - defer close(n.done) - - n.parser, _ = parsers.NewInfluxParser() - go n.receiver() - for i := 0; i < metricBuffer+1; i++ { - in <- natsMsg(testMsg) - } - time.Sleep(time.Millisecond) - - if a := len(n.metricC); a != metricBuffer { - t.Errorf("got %v, expected %v", a, metricBuffer) + if acc.NFields() != 0 { + t.Errorf("got %v, expected %v", acc.NFields(), 0) } } // Test that the parser parses line format messages into metrics func TestRunParserAndGather(t *testing.T) { n, in := newTestNatsConsumer() + acc := testutil.Accumulator{} + n.acc = &acc defer close(n.done) n.parser, _ = parsers.NewInfluxParser() go n.receiver() in <- natsMsg(testMsg) - time.Sleep(time.Millisecond) + time.Sleep(time.Millisecond * 25) - acc := testutil.Accumulator{} n.Gather(&acc) - if a := len(acc.Metrics); a != 1 { - t.Errorf("got %v, expected %v", a, 1) - } acc.AssertContainsFields(t, "cpu_load_short", map[string]interface{}{"value": float64(23422)}) } @@ -104,19 +86,17 @@ func TestRunParserAndGather(t *testing.T) { // Test that the parser parses graphite format messages into metrics func TestRunParserAndGatherGraphite(t *testing.T) { n, in := newTestNatsConsumer() + acc := testutil.Accumulator{} + n.acc = &acc defer close(n.done) n.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) go n.receiver() in <- natsMsg(testMsgGraphite) - time.Sleep(time.Millisecond) + time.Sleep(time.Millisecond * 25) - acc := testutil.Accumulator{} n.Gather(&acc) - if a := len(acc.Metrics); a != 1 { - t.Errorf("got %v, expected %v", a, 1) - } acc.AssertContainsFields(t, "cpu_load_short_graphite", map[string]interface{}{"value": float64(23422)}) } @@ -124,19 +104,17 @@ func TestRunParserAndGatherGraphite(t *testing.T) { // Test that the parser parses json format messages into metrics func TestRunParserAndGatherJSON(t *testing.T) { n, in := newTestNatsConsumer() + acc := testutil.Accumulator{} + n.acc = &acc defer close(n.done) n.parser, _ = parsers.NewJSONParser("nats_json_test", []string{}, nil) go n.receiver() in <- natsMsg(testMsgJSON) - time.Sleep(time.Millisecond) + time.Sleep(time.Millisecond * 25) - acc := testutil.Accumulator{} n.Gather(&acc) - if a := len(acc.Metrics); a != 1 { - t.Errorf("got %v, expected %v", a, 1) - } acc.AssertContainsFields(t, "nats_json_test", map[string]interface{}{ "a": float64(5), diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index fb8de402e..470e31884 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -213,7 +213,7 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error { return nil } -func (s *Statsd) Start() error { +func (s *Statsd) Start(_ telegraf.Accumulator) error { // Make data structures s.done = make(chan struct{}) s.in = make(chan []byte, s.AllowedPendingMessages) diff --git a/testutil/accumulator.go b/testutil/accumulator.go index 7101db091..cb56d8d28 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -108,6 +108,8 @@ func (a *Accumulator) Get(measurement string) (*Metric, bool) { // NFields returns the total number of fields in the accumulator, across all // measurements func (a *Accumulator) NFields() int { + a.Lock() + defer a.Unlock() counter := 0 for _, pt := range a.Metrics { for _, _ = range pt.Fields { @@ -123,6 +125,8 @@ func (a *Accumulator) AssertContainsTaggedFields( fields map[string]interface{}, tags map[string]string, ) { + a.Lock() + defer a.Unlock() for _, p := range a.Metrics { if !reflect.DeepEqual(tags, p.Tags) { continue @@ -148,6 +152,8 @@ func (a *Accumulator) AssertContainsFields( measurement string, fields map[string]interface{}, ) { + a.Lock() + defer a.Unlock() for _, p := range a.Metrics { if p.Measurement == measurement { if !reflect.DeepEqual(fields, p.Fields) { @@ -166,6 +172,8 @@ func (a *Accumulator) AssertContainsFields( // HasIntValue returns true if the measurement has an Int value func (a *Accumulator) HasIntField(measurement string, field string) bool { + a.Lock() + defer a.Unlock() for _, p := range a.Metrics { if p.Measurement == measurement { for fieldname, value := range p.Fields { @@ -182,6 +190,8 @@ func (a *Accumulator) HasIntField(measurement string, field string) bool { // HasUIntValue returns true if the measurement has a UInt value func (a *Accumulator) HasUIntField(measurement string, field string) bool { + a.Lock() + defer a.Unlock() for _, p := range a.Metrics { if p.Measurement == measurement { for fieldname, value := range p.Fields { @@ -198,6 +208,8 @@ func (a *Accumulator) HasUIntField(measurement string, field string) bool { // HasFloatValue returns true if the given measurement has a float value func (a *Accumulator) HasFloatField(measurement string, field string) bool { + a.Lock() + defer a.Unlock() for _, p := range a.Metrics { if p.Measurement == measurement { for fieldname, value := range p.Fields { @@ -215,6 +227,8 @@ func (a *Accumulator) HasFloatField(measurement string, field string) bool { // HasMeasurement returns true if the accumulator has a measurement with the // given name func (a *Accumulator) HasMeasurement(measurement string) bool { + a.Lock() + defer a.Unlock() for _, p := range a.Metrics { if p.Measurement == measurement { return true From 4860dc148cce6d36c2516119ebb7395f66d0e0e8 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 17 Feb 2016 09:53:41 -0700 Subject: [PATCH 020/287] changelog update --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 272073dc4..2cdc09bac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,10 @@ they would like to output. Currently supports: "influx" and "graphite" [here](https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md) - More info on serializing _outgoing_ data formats can be found [here](https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md) +- Telegraf now has an option `flush_buffer_when_full` that will flush the +metric buffer whenever it fills up for each output, rather than dropping +points and only flushing on a set time interval. This will default to `true` +and is in the `[agent]` config section. ### Features - [#652](https://github.com/influxdata/telegraf/pull/652): CouchDB Input Plugin. Thanks @codehate! @@ -23,6 +27,7 @@ they would like to output. Currently supports: "influx" and "graphite" - [#679](https://github.com/influxdata/telegraf/pull/679): Support for arbitrary output data formats. - [#695](https://github.com/influxdata/telegraf/pull/695): raindrops input plugin. Thanks @burdandrei! - [#650](https://github.com/influxdata/telegraf/pull/650): net_response input plugin. Thanks @titilambert! +- [#699](https://github.com/influxdata/telegraf/pull/699): Flush based on buffer size rather than time. ### Bugfixes - [#443](https://github.com/influxdata/telegraf/issues/443): Fix Ping command timeout parameter on Linux. From d0734b105bd6ecc9686fe665a10494b8529b3339 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 17 Feb 2016 14:50:19 -0700 Subject: [PATCH 021/287] Start service plugins immediately, fix off-by-one bug --- agent/agent.go | 31 ++++++++++++++++--------------- internal/models/running_output.go | 3 ++- plugins/outputs/mqtt/mqtt.go | 2 +- 3 files changed, 19 insertions(+), 17 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 5a70097fc..42ade45f2 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -322,6 +322,22 @@ func (a *Agent) Run(shutdown chan struct{}) error { // channel shared between all input threads for accumulating metrics metricC := make(chan telegraf.Metric, 10000) + for _, input := range a.Config.Inputs { + // Start service of any ServicePlugins + switch p := input.Input.(type) { + case telegraf.ServiceInput: + acc := NewAccumulator(input.Config, metricC) + acc.SetDebug(a.Config.Agent.Debug) + acc.setDefaultTags(a.Config.Tags) + if err := p.Start(acc); err != nil { + log.Printf("Service for input %s failed to start, exiting\n%s\n", + input.Name, err.Error()) + return err + } + defer p.Stop() + } + } + // Round collection to nearest interval by sleeping if a.Config.Agent.RoundInterval { i := int64(a.Config.Agent.Interval.Duration) @@ -339,21 +355,6 @@ func (a *Agent) Run(shutdown chan struct{}) error { }() for _, input := range a.Config.Inputs { - - // Start service of any ServicePlugins - switch p := input.Input.(type) { - case telegraf.ServiceInput: - acc := NewAccumulator(input.Config, metricC) - acc.SetDebug(a.Config.Agent.Debug) - acc.setDefaultTags(a.Config.Tags) - if err := p.Start(acc); err != nil { - log.Printf("Service for input %s failed to start, exiting\n%s\n", - input.Name, err.Error()) - return err - } - defer p.Stop() - } - // Special handling for inputs that have their own collection interval // configured. Default intervals are handled below with gatherParallel if input.Config.Interval != 0 { diff --git a/internal/models/running_output.go b/internal/models/running_output.go index 1b27f66de..de7c8ab21 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -59,10 +59,11 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) { ro.Lock() defer ro.Unlock() - if len(ro.metrics) < ro.MetricBufferLimit { + if len(ro.metrics) < ro.MetricBufferLimit-1 { ro.metrics = append(ro.metrics, metric) } else { if ro.FlushBufferWhenFull { + ro.metrics = append(ro.metrics, metric) tmpmetrics := make([]telegraf.Metric, len(ro.metrics)) copy(tmpmetrics, ro.metrics) ro.metrics = make([]telegraf.Metric, 0) diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index d28a04d72..48046878b 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -18,7 +18,7 @@ var sampleConfig = ` ### MQTT outputs send metrics to this topic format ### "///" - ### ex: prefix/host/web01.example.com/mem + ### ex: prefix/web01.example.com/mem topic_prefix = "telegraf" ### username and password to connect MQTT server. From 88c83277c61f58ee376bbce3b8b429bd031b5bff Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 17 Feb 2016 16:46:53 -0700 Subject: [PATCH 022/287] Write unit tests for RunningOutput --- internal/models/running_output.go | 2 +- internal/models/running_output_test.go | 265 +++++++++++++++++++++++++ 2 files changed, 266 insertions(+), 1 deletion(-) create mode 100644 internal/models/running_output_test.go diff --git a/internal/models/running_output.go b/internal/models/running_output.go index de7c8ab21..37b479dfb 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -59,7 +59,7 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) { ro.Lock() defer ro.Unlock() - if len(ro.metrics) < ro.MetricBufferLimit-1 { + if len(ro.metrics) < ro.MetricBufferLimit { ro.metrics = append(ro.metrics, metric) } else { if ro.FlushBufferWhenFull { diff --git a/internal/models/running_output_test.go b/internal/models/running_output_test.go new file mode 100644 index 000000000..6eee3bd11 --- /dev/null +++ b/internal/models/running_output_test.go @@ -0,0 +1,265 @@ +package internal_models + +import ( + "fmt" + "sort" + "sync" + "testing" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var first5 = []telegraf.Metric{ + testutil.TestMetric(101, "metric1"), + testutil.TestMetric(101, "metric2"), + testutil.TestMetric(101, "metric3"), + testutil.TestMetric(101, "metric4"), + testutil.TestMetric(101, "metric5"), +} + +var next5 = []telegraf.Metric{ + testutil.TestMetric(101, "metric6"), + testutil.TestMetric(101, "metric7"), + testutil.TestMetric(101, "metric8"), + testutil.TestMetric(101, "metric9"), + testutil.TestMetric(101, "metric10"), +} + +// Test that we can write metrics with simple default setup. +func TestRunningOutputDefault(t *testing.T) { + conf := &OutputConfig{ + Filter: Filter{ + IsActive: false, + }, + } + + m := &mockOutput{} + ro := NewRunningOutput("test", m, conf) + + for _, metric := range first5 { + ro.AddMetric(metric) + } + for _, metric := range next5 { + ro.AddMetric(metric) + } + assert.Len(t, m.Metrics(), 0) + + err := ro.Write() + assert.NoError(t, err) + assert.Len(t, m.Metrics(), 10) +} + +// Test that the first metric gets overwritten if there is a buffer overflow. +func TestRunningOutputOverwrite(t *testing.T) { + conf := &OutputConfig{ + Filter: Filter{ + IsActive: false, + }, + } + + m := &mockOutput{} + ro := NewRunningOutput("test", m, conf) + ro.MetricBufferLimit = 4 + + for _, metric := range first5 { + ro.AddMetric(metric) + } + require.Len(t, m.Metrics(), 0) + + err := ro.Write() + require.NoError(t, err) + require.Len(t, m.Metrics(), 4) + + var expected, actual []string + for i, exp := range first5[1:] { + expected = append(expected, exp.String()) + actual = append(actual, m.Metrics()[i].String()) + } + + sort.Strings(expected) + sort.Strings(actual) + + assert.Equal(t, expected, actual) +} + +// Test that multiple buffer overflows are handled properly. +func TestRunningOutputMultiOverwrite(t *testing.T) { + conf := &OutputConfig{ + Filter: Filter{ + IsActive: false, + }, + } + + m := &mockOutput{} + ro := NewRunningOutput("test", m, conf) + ro.MetricBufferLimit = 3 + + for _, metric := range first5 { + ro.AddMetric(metric) + } + for _, metric := range next5 { + ro.AddMetric(metric) + } + require.Len(t, m.Metrics(), 0) + + err := ro.Write() + require.NoError(t, err) + require.Len(t, m.Metrics(), 3) + + var expected, actual []string + for i, exp := range next5[2:] { + expected = append(expected, exp.String()) + actual = append(actual, m.Metrics()[i].String()) + } + + sort.Strings(expected) + sort.Strings(actual) + + assert.Equal(t, expected, actual) +} + +// Test that running output doesn't flush until it's full when +// FlushBufferWhenFull is set. +func TestRunningOutputFlushWhenFull(t *testing.T) { + conf := &OutputConfig{ + Filter: Filter{ + IsActive: false, + }, + } + + m := &mockOutput{} + ro := NewRunningOutput("test", m, conf) + ro.FlushBufferWhenFull = true + ro.MetricBufferLimit = 5 + + // Fill buffer to limit + for _, metric := range first5 { + ro.AddMetric(metric) + } + // no flush yet + assert.Len(t, m.Metrics(), 0) + + // add one more metric + ro.AddMetric(next5[0]) + // now it flushed + assert.Len(t, m.Metrics(), 6) + + // add one more metric and write it manually + ro.AddMetric(next5[1]) + err := ro.Write() + assert.NoError(t, err) + assert.Len(t, m.Metrics(), 7) +} + +// Test that running output doesn't flush until it's full when +// FlushBufferWhenFull is set, twice. +func TestRunningOutputMultiFlushWhenFull(t *testing.T) { + conf := &OutputConfig{ + Filter: Filter{ + IsActive: false, + }, + } + + m := &mockOutput{} + ro := NewRunningOutput("test", m, conf) + ro.FlushBufferWhenFull = true + ro.MetricBufferLimit = 4 + + // Fill buffer past limit twive + for _, metric := range first5 { + ro.AddMetric(metric) + } + for _, metric := range next5 { + ro.AddMetric(metric) + } + // flushed twice + assert.Len(t, m.Metrics(), 10) +} + +func TestRunningOutputWriteFail(t *testing.T) { + conf := &OutputConfig{ + Filter: Filter{ + IsActive: false, + }, + } + + m := &mockOutput{} + m.failWrite = true + ro := NewRunningOutput("test", m, conf) + ro.FlushBufferWhenFull = true + ro.MetricBufferLimit = 4 + + // Fill buffer past limit twice + for _, metric := range first5 { + ro.AddMetric(metric) + } + for _, metric := range next5 { + ro.AddMetric(metric) + } + // no successful flush yet + assert.Len(t, m.Metrics(), 0) + + // manual write fails + err := ro.Write() + require.Error(t, err) + // no successful flush yet + assert.Len(t, m.Metrics(), 0) + + m.failWrite = false + err = ro.Write() + require.NoError(t, err) + + assert.Len(t, m.Metrics(), 10) +} + +type mockOutput struct { + sync.Mutex + + metrics []telegraf.Metric + + // if true, mock a write failure + failWrite bool +} + +func (m *mockOutput) Connect() error { + return nil +} + +func (m *mockOutput) Close() error { + return nil +} + +func (m *mockOutput) Description() string { + return "" +} + +func (m *mockOutput) SampleConfig() string { + return "" +} + +func (m *mockOutput) Write(metrics []telegraf.Metric) error { + m.Lock() + defer m.Unlock() + if m.failWrite { + return fmt.Errorf("Failed Write!") + } + + if m.metrics == nil { + m.metrics = []telegraf.Metric{} + } + + for _, metric := range metrics { + m.metrics = append(m.metrics, metric) + } + return nil +} + +func (m *mockOutput) Metrics() []telegraf.Metric { + m.Lock() + defer m.Unlock() + return m.metrics +} From 05170d78be7e95b1cc7706103327c6390849cda5 Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Sun, 31 Jan 2016 22:45:44 +0100 Subject: [PATCH 023/287] plugin(mesos): Initial commit The plugin is able to query a Mesos master and push the metrics, a blacklist can be configured and a timeout, it's still not used. Added unit test, might be a good idea to have system test using docker. --- plugins/inputs/all/all.go | 1 + plugins/inputs/mesos/mesos.go | 260 +++++++++++++++++++++++++++++ plugins/inputs/mesos/mesos_test.go | 119 +++++++++++++ 3 files changed, 380 insertions(+) create mode 100644 plugins/inputs/mesos/mesos.go create mode 100644 plugins/inputs/mesos/mesos_test.go diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 9f2122e21..74331e54b 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -20,6 +20,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/lustre2" _ "github.com/influxdata/telegraf/plugins/inputs/mailchimp" _ "github.com/influxdata/telegraf/plugins/inputs/memcached" + _ "github.com/influxdata/telegraf/plugins/inputs/mesos" _ "github.com/influxdata/telegraf/plugins/inputs/mongodb" _ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/mysql" diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go new file mode 100644 index 000000000..835c14b78 --- /dev/null +++ b/plugins/inputs/mesos/mesos.go @@ -0,0 +1,260 @@ +package mesos + +import ( + "encoding/json" + "errors" + "io/ioutil" + "net" + "net/http" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type Mesos struct { + Timeout string + Servers []string + Blacklist []string +} + +func masterBlocks(g string) ([]string, error) { + var m map[string][]string + + m = make(map[string][]string) + + m["resources"] = []string{ + "master/cpus_percent", + "master/cpus_used", + "master/cpus_total", + "master/cpus_revocable_percent", + "master/cpus_revocable_total", + "master/cpus_revocable_used", + "master/disk_percent", + "master/disk_used", + "master/disk_total", + "master/disk_revocable_percent", + "master/disk_revocable_total", + "master/disk_revocable_used", + "master/mem_percent", + "master/mem_used", + "master/mem_total", + "master/mem_revocable_percent", + "master/mem_revocable_total", + "master/mem_revocable_used", + } + + m["master"] = []string{ + "master/elected", + "master/uptime_secs", + } + + m["system"] = []string{ + "system/cpus_total", + "system/load_15min", + "system/load_5min", + "system/load_1min", + "system/mem_free_bytes", + "system/mem_total_bytes", + } + + m["slaves"] = []string{ + "master/slave_registrations", + "master/slave_removals", + "master/slave_reregistrations", + "master/slave_shutdowns_scheduled", + "master/slave_shutdowns_canceled", + "master/slave_shutdowns_completed", + "master/slaves_active", + "master/slaves_connected", + "master/slaves_disconnected", + "master/slaves_inactive", + } + + m["frameworks"] = []string{ + "master/frameworks_active", + "master/frameworks_connected", + "master/frameworks_disconnected", + "master/frameworks_inactive", + "master/outstanding_offers", + } + + m["tasks"] = []string{ + "master/tasks_error", + "master/tasks_failed", + "master/tasks_finished", + "master/tasks_killed", + "master/tasks_lost", + "master/tasks_running", + "master/tasks_staging", + "master/tasks_starting", + } + + m["messages"] = []string{ + "master/invalid_executor_to_framework_messages", + "master/invalid_framework_to_executor_messages", + "master/invalid_status_update_acknowledgements", + "master/invalid_status_updates", + "master/dropped_messages", + "master/messages_authenticate", + "master/messages_deactivate_framework", + "master/messages_decline_offers", + "master/messages_executor_to_framework", + "master/messages_exited_executor", + "master/messages_framework_to_executor", + "master/messages_kill_task", + "master/messages_launch_tasks", + "master/messages_reconcile_tasks", + "master/messages_register_framework", + "master/messages_register_slave", + "master/messages_reregister_framework", + "master/messages_reregister_slave", + "master/messages_resource_request", + "master/messages_revive_offers", + "master/messages_status_update", + "master/messages_status_update_acknowledgement", + "master/messages_unregister_framework", + "master/messages_unregister_slave", + "master/messages_update_slave", + "master/recovery_slave_removals", + "master/slave_removals/reason_registered", + "master/slave_removals/reason_unhealthy", + "master/slave_removals/reason_unregistered", + "master/valid_framework_to_executor_messages", + "master/valid_status_update_acknowledgements", + "master/valid_status_updates", + "master/task_lost/source_master/reason_invalid_offers", + "master/task_lost/source_master/reason_slave_removed", + "master/task_lost/source_slave/reason_executor_terminated", + "master/valid_executor_to_framework_messages", + } + + m["evqueue"] = []string{ + "master/event_queue_dispatches", + "master/event_queue_http_requests", + "master/event_queue_messages", + } + + m["registrar"] = []string{ + "registrar/state_fetch_ms", + "registrar/state_store_ms", + "registrar/state_store_ms/max", + "registrar/state_store_ms/min", + "registrar/state_store_ms/p50", + "registrar/state_store_ms/p90", + "registrar/state_store_ms/p95", + "registrar/state_store_ms/p99", + "registrar/state_store_ms/p999", + "registrar/state_store_ms/p9999", + } + + ret, ok := m[g] + + if !ok { + return nil, errors.New("Unknown group:" + g) + } + + return ret, nil +} + +type masterMestrics struct { + resources []string +} + +var sampleConfig = ` + # Timeout, in ms. + timeout = 2000 + # A list of Mesos masters. e.g. master1:5050, master2:5080, etc. + # The port can be skipped if using the default (5050) + # Default value is localhost:5050. + servers = ["localhost:5050"] + blacklist = ["system"] +` + +// removeGroup(), remove blacklisted groups +func (m *Mesos) removeGroup(j *map[string]interface{}) error { + for _, v := range m.Blacklist { + ms, err := masterBlocks(v) + if err != nil { + return err + } + for _, sv := range ms { + delete((*j), sv) + } + } + return nil +} + +// SampleConfig returns a sample configuration block +func (m *Mesos) SampleConfig() string { + return sampleConfig +} + +// Description just returns a short description of the Mesos plugin +func (m *Mesos) Description() string { + return "Telegraf plugin for gathering metrics from N Mesos masters" +} + +func (m *Mesos) Gather(acc telegraf.Accumulator) error { + if len(m.Servers) == 0 { + return m.gatherMetrics("localhost:5050", acc) + } + + for _, v := range m.Servers { + if err := m.gatherMetrics(v, acc); err != nil { + return err + } + } + return nil +} + +func (m *Mesos) gatherMetrics(a string, acc telegraf.Accumulator) error { + var jsonOut map[string]interface{} + + if _, _, err := net.SplitHostPort(a); err != nil { + a = a + ":5050" + } + + tags := map[string]string{ + "server": a, + } + + // TODO: Use Timeout + resp, err := http.Get("http://" + a + "/metrics/snapshot") + + if err != nil { + return err + } + + data, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return err + } + + if err = json.Unmarshal([]byte(data), &jsonOut); err != nil { + return errors.New("Error decoding JSON response") + } + + if len(m.Blacklist) > 0 { + m.removeGroup(&jsonOut) + } + + jf := internal.JSONFlattener{} + + err = jf.FlattenJSON("", jsonOut) + + if err != nil { + return err + } + + acc.AddFields("mesos", jf.Fields, tags) + + return nil +} + +func init() { + inputs.Add("mesos", func() telegraf.Input { + return &Mesos{} + }) +} diff --git a/plugins/inputs/mesos/mesos_test.go b/plugins/inputs/mesos/mesos_test.go new file mode 100644 index 000000000..0bd9d02cb --- /dev/null +++ b/plugins/inputs/mesos/mesos_test.go @@ -0,0 +1,119 @@ +package mesos + +import ( + "encoding/json" + "math/rand" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/influxdata/telegraf/testutil" +) + +var mesosMetrics map[string]interface{} +var ts *httptest.Server + +func generateMetrics() { + mesosMetrics = make(map[string]interface{}) + + metricNames := []string{"master/cpus_percent", "master/cpus_used", "master/cpus_total", + "master/cpus_revocable_percent", "master/cpus_revocable_total", "master/cpus_revocable_used", + "master/disk_percent", "master/disk_used", "master/disk_total", "master/disk_revocable_percent", + "master/disk_revocable_total", "master/disk_revocable_used", "master/mem_percent", + "master/mem_used", "master/mem_total", "master/mem_revocable_percent", "master/mem_revocable_total", + "master/mem_revocable_used", "master/elected", "master/uptime_secs", "system/cpus_total", + "system/load_15min", "system/load_5min", "system/load_1min", "system/mem_free_bytes", + "system/mem_total_bytes", "master/slave_registrations", "master/slave_removals", + "master/slave_reregistrations", "master/slave_shutdowns_scheduled", "master/slave_shutdowns_canceled", + "master/slave_shutdowns_completed", "master/slaves_active", "master/slaves_connected", + "master/slaves_disconnected", "master/slaves_inactive", "master/frameworks_active", + "master/frameworks_connected", "master/frameworks_disconnected", "master/frameworks_inactive", + "master/outstanding_offers", "master/tasks_error", "master/tasks_failed", "master/tasks_finished", + "master/tasks_killed", "master/tasks_lost", "master/tasks_running", "master/tasks_staging", + "master/tasks_starting", "master/invalid_executor_to_framework_messages", "master/invalid_framework_to_executor_messages", + "master/invalid_status_update_acknowledgements", "master/invalid_status_updates", + "master/dropped_messages", "master/messages_authenticate", "master/messages_deactivate_framework", + "master/messages_decline_offers", "master/messages_executor_to_framework", "master/messages_exited_executor", + "master/messages_framework_to_executor", "master/messages_kill_task", "master/messages_launch_tasks", + "master/messages_reconcile_tasks", "master/messages_register_framework", "master/messages_register_slave", + "master/messages_reregister_framework", "master/messages_reregister_slave", "master/messages_resource_request", + "master/messages_revive_offers", "master/messages_status_update", "master/messages_status_update_acknowledgement", + "master/messages_unregister_framework", "master/messages_unregister_slave", "master/messages_update_slave", + "master/recovery_slave_removals", "master/slave_removals/reason_registered", "master/slave_removals/reason_unhealthy", + "master/slave_removals/reason_unregistered", "master/valid_framework_to_executor_messages", "master/valid_status_update_acknowledgements", + "master/valid_status_updates", "master/task_lost/source_master/reason_invalid_offers", + "master/task_lost/source_master/reason_slave_removed", "master/task_lost/source_slave/reason_executor_terminated", + "master/valid_executor_to_framework_messages", "master/event_queue_dispatches", + "master/event_queue_http_requests", "master/event_queue_messages", "registrar/state_fetch_ms", + "registrar/state_store_ms", "registrar/state_store_ms/max", "registrar/state_store_ms/min", + "registrar/state_store_ms/p50", "registrar/state_store_ms/p90", "registrar/state_store_ms/p95", + "registrar/state_store_ms/p99", "registrar/state_store_ms/p999", "registrar/state_store_ms/p9999"} + + for _, k := range metricNames { + mesosMetrics[k] = rand.Float64() + } +} + +func TestMain(m *testing.M) { + generateMetrics() + r := http.NewServeMux() + r.HandleFunc("/metrics/snapshot", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(mesosMetrics) + }) + ts = httptest.NewServer(r) + rc := m.Run() + ts.Close() + os.Exit(rc) +} + +func TestMesosMaster(t *testing.T) { + var acc testutil.Accumulator + + m := Mesos{ + Servers: []string{ts.Listener.Addr().String()}, + } + + err := m.Gather(&acc) + + if err != nil { + t.Errorf(err.Error()) + } + + acc.AssertContainsFields(t, "mesos", mesosMetrics) +} + +func TestRemoveGroup(t *testing.T) { + j := []string{ + "resources", "master", + "system", "slaves", "frameworks", + "tasks", "messages", "evqueue", + "messages", "registrar", + } + + generateMetrics() + + for _, v := range j { + m := Mesos{ + Blacklist: []string{v}, + } + err := m.removeGroup(&mesosMetrics) + if err != nil { + t.Errorf("Error removing non-exiting key: %s.", v) + } + } + + if len(mesosMetrics) > 0 { + t.Error("Keys were left at slice sample") + } + + m := Mesos{ + Blacklist: []string{"fail"}, + } + + if err := m.removeGroup(&mesosMetrics); err == nil { + t.Errorf("Key %s should have returned error.", m.Blacklist[0]) + } +} From 59e0e49822aa01eaa1cbf7ccefe630daafbfb221 Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Sun, 31 Jan 2016 23:04:14 +0100 Subject: [PATCH 024/287] Indentation for sample config string --- plugins/inputs/mesos/mesos.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 835c14b78..aede459fc 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -168,7 +168,7 @@ var sampleConfig = ` # The port can be skipped if using the default (5050) # Default value is localhost:5050. servers = ["localhost:5050"] - blacklist = ["system"] + blacklist = ["system"] ` // removeGroup(), remove blacklisted groups From 07502c9804929b748cc9efa61c10858b9b4f1e6b Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Sun, 31 Jan 2016 23:14:10 +0100 Subject: [PATCH 025/287] Don't add port to tags just the host --- plugins/inputs/mesos/mesos.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index aede459fc..1607b1b42 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -211,12 +211,14 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error { func (m *Mesos) gatherMetrics(a string, acc telegraf.Accumulator) error { var jsonOut map[string]interface{} - if _, _, err := net.SplitHostPort(a); err != nil { + host, _, err := net.SplitHostPort(a) + if err != nil { + host = a a = a + ":5050" } tags := map[string]string{ - "server": a, + "server": host, } // TODO: Use Timeout From 1d50d62a79f1f3c602008d5ee9e6a7ee852e515a Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Tue, 2 Feb 2016 02:17:38 +0100 Subject: [PATCH 026/287] plugin(mesos): Added goroutines. The plugin will iterate over the Servers slice and create a goroutine for each of them. --- plugins/inputs/mesos/mesos.go | 30 +++++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 1607b1b42..ef1b8269d 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -6,6 +6,8 @@ import ( "io/ioutil" "net" "net/http" + "strings" + "sync" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" @@ -196,15 +198,37 @@ func (m *Mesos) Description() string { } func (m *Mesos) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + var errorChannel chan error + if len(m.Servers) == 0 { - return m.gatherMetrics("localhost:5050", acc) + m.Servers = []string{"localhost:5050"} } + errorChannel = make(chan error, len(m.Servers)*2) + for _, v := range m.Servers { - if err := m.gatherMetrics(v, acc); err != nil { - return err + wg.Add(1) + go func() { + errorChannel <- m.gatherMetrics(v, acc) + wg.Done() + return + }() + } + + wg.Wait() + close(errorChannel) + errorStrings := []string{} + + for err := range errorChannel { + if err != nil { + errorStrings = append(errorStrings, err.Error()) } } + + if len(errorStrings) > 0 { + return errors.New(strings.Join(errorStrings, "\n")) + } return nil } From 52b329be4e1ee033a81b059e8a6cc1ef2761138b Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Wed, 3 Feb 2016 03:31:39 +0100 Subject: [PATCH 027/287] plugin(mesos): Reversed removeGroup() * Now the user selects what to push instead of what not * Required to check and improve tests * Missing checks in the code when MetricsCol is empty --- plugins/inputs/mesos/mesos.go | 135 +++++++++++++++-------------- plugins/inputs/mesos/mesos_test.go | 22 +++-- 2 files changed, 80 insertions(+), 77 deletions(-) diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index ef1b8269d..88be7c027 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -4,6 +4,7 @@ import ( "encoding/json" "errors" "io/ioutil" + "log" "net" "net/http" "strings" @@ -15,12 +16,57 @@ import ( ) type Mesos struct { - Timeout string - Servers []string - Blacklist []string + Timeout string + Servers []string + MetricsCol []string `toml:"metrics_collection"` } -func masterBlocks(g string) ([]string, error) { +// SampleConfig returns a sample configuration block +func (m *Mesos) SampleConfig() string { + return sampleConfig +} + +// Description just returns a short description of the Mesos plugin +func (m *Mesos) Description() string { + return "Telegraf plugin for gathering metrics from N Mesos masters" +} + +func (m *Mesos) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + var errorChannel chan error + + if len(m.Servers) == 0 { + m.Servers = []string{"localhost:5050"} + } + + errorChannel = make(chan error, len(m.Servers)*2) + + for _, v := range m.Servers { + wg.Add(1) + go func() { + errorChannel <- m.gatherMetrics(v, acc) + wg.Done() + return + }() + } + + wg.Wait() + close(errorChannel) + errorStrings := []string{} + + for err := range errorChannel { + if err != nil { + errorStrings = append(errorStrings, err.Error()) + } + } + + if len(errorStrings) > 0 { + return errors.New(strings.Join(errorStrings, "\n")) + } + return nil +} + +func masterBlocks(g string) []string { var m map[string][]string m = make(map[string][]string) @@ -153,14 +199,11 @@ func masterBlocks(g string) ([]string, error) { ret, ok := m[g] if !ok { - return nil, errors.New("Unknown group:" + g) + log.Println("Unkown metrics group: ", g) + return []string{} } - return ret, nil -} - -type masterMestrics struct { - resources []string + return ret } var sampleConfig = ` @@ -170,68 +213,30 @@ var sampleConfig = ` # The port can be skipped if using the default (5050) # Default value is localhost:5050. servers = ["localhost:5050"] - blacklist = ["system"] + # Metrics groups to be collected. + # Default, all enabled. + metrics_collection = ["resources","master","system","slaves","frameworks","messages","evqueues","registrar"] ` // removeGroup(), remove blacklisted groups -func (m *Mesos) removeGroup(j *map[string]interface{}) error { - for _, v := range m.Blacklist { - ms, err := masterBlocks(v) - if err != nil { - return err - } - for _, sv := range ms { - delete((*j), sv) - } - } - return nil -} +func (m *Mesos) removeGroup(j *map[string]interface{}) { + var ok bool + u := map[string]bool{} -// SampleConfig returns a sample configuration block -func (m *Mesos) SampleConfig() string { - return sampleConfig -} - -// Description just returns a short description of the Mesos plugin -func (m *Mesos) Description() string { - return "Telegraf plugin for gathering metrics from N Mesos masters" -} - -func (m *Mesos) Gather(acc telegraf.Accumulator) error { - var wg sync.WaitGroup - var errorChannel chan error - - if len(m.Servers) == 0 { - m.Servers = []string{"localhost:5050"} - } - - errorChannel = make(chan error, len(m.Servers)*2) - - for _, v := range m.Servers { - wg.Add(1) - go func() { - errorChannel <- m.gatherMetrics(v, acc) - wg.Done() - return - }() - } - - wg.Wait() - close(errorChannel) - errorStrings := []string{} - - for err := range errorChannel { - if err != nil { - errorStrings = append(errorStrings, err.Error()) + for _, v := range m.MetricsCol { + for _, k := range masterBlocks(v) { + u[k] = true } } - if len(errorStrings) > 0 { - return errors.New(strings.Join(errorStrings, "\n")) + for k, _ := range u { + if _, ok = (*j)[k]; ok { + delete((*j), k) + } } - return nil } +// This should not belong to the object func (m *Mesos) gatherMetrics(a string, acc telegraf.Accumulator) error { var jsonOut map[string]interface{} @@ -262,9 +267,9 @@ func (m *Mesos) gatherMetrics(a string, acc telegraf.Accumulator) error { return errors.New("Error decoding JSON response") } - if len(m.Blacklist) > 0 { - m.removeGroup(&jsonOut) - } + //if len(m.Blacklist) > 0 { + // m.removeGroup(&jsonOut) + //} jf := internal.JSONFlattener{} diff --git a/plugins/inputs/mesos/mesos_test.go b/plugins/inputs/mesos/mesos_test.go index 0bd9d02cb..1f69e4ebb 100644 --- a/plugins/inputs/mesos/mesos_test.go +++ b/plugins/inputs/mesos/mesos_test.go @@ -86,6 +86,9 @@ func TestMesosMaster(t *testing.T) { } func TestRemoveGroup(t *testing.T) { + //t.Skip("needs refactoring") + // FIXME: removeGroup() behavior is the opposite as it was, + // this test has to be refactored j := []string{ "resources", "master", "system", "slaves", "frameworks", @@ -97,23 +100,18 @@ func TestRemoveGroup(t *testing.T) { for _, v := range j { m := Mesos{ - Blacklist: []string{v}, + MetricsCol: []string{v}, } - err := m.removeGroup(&mesosMetrics) - if err != nil { - t.Errorf("Error removing non-exiting key: %s.", v) + m.removeGroup(&mesosMetrics) + for _, x := range masterBlocks(v) { + if _, ok := mesosMetrics[x]; ok { + t.Errorf("Found key %s, it should be gone.", x) + } } } if len(mesosMetrics) > 0 { t.Error("Keys were left at slice sample") } - - m := Mesos{ - Blacklist: []string{"fail"}, - } - - if err := m.removeGroup(&mesosMetrics); err == nil { - t.Errorf("Key %s should have returned error.", m.Blacklist[0]) - } + //Test for wrong keys } From 4c1e817b3867d37afd574b0fb99dde999ba0e33f Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Thu, 4 Feb 2016 00:27:23 +0100 Subject: [PATCH 028/287] fix(indent): For configuration sample --- plugins/inputs/mesos/mesos.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 88be7c027..81e85ed66 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -213,8 +213,8 @@ var sampleConfig = ` # The port can be skipped if using the default (5050) # Default value is localhost:5050. servers = ["localhost:5050"] - # Metrics groups to be collected. - # Default, all enabled. + # Metrics groups to be collected. + # Default, all enabled. metrics_collection = ["resources","master","system","slaves","frameworks","messages","evqueues","registrar"] ` From 9770802901b8760c97f34d3bdbcc084d31e3e149 Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Thu, 4 Feb 2016 02:46:20 +0100 Subject: [PATCH 029/287] feat(whitelist): Converted black to whitelist * Defined global var for holding default metric groups * Refactor removeGroup() to work with the whitelist * Refactor TestRemoveGroup() --- plugins/inputs/mesos/mesos.go | 43 +++++++++++++++++++++++------- plugins/inputs/mesos/mesos_test.go | 30 +++++++++------------ 2 files changed, 46 insertions(+), 27 deletions(-) diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 81e85ed66..8f59e4bef 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -21,6 +21,11 @@ type Mesos struct { MetricsCol []string `toml:"metrics_collection"` } +var defaultMetrics = []string{ + "resources", "master", "system", "slaves", "frameworks", + "tasks", "messages", "evqueue", "messages", "registrar", +} + // SampleConfig returns a sample configuration block func (m *Mesos) SampleConfig() string { return sampleConfig @@ -66,6 +71,27 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error { return nil } +func metricsDiff(w []string) []string { + b := []string{} + s := make(map[string]bool) + + if len(w) == 0 { + return b + } + + for _, v := range w { + s[v] = true + } + + for _, d := range defaultMetrics { + if _, ok := s[d]; !ok { + b = append(b, d) + } + } + + return b +} + func masterBlocks(g string) []string { var m map[string][]string @@ -215,23 +241,20 @@ var sampleConfig = ` servers = ["localhost:5050"] # Metrics groups to be collected. # Default, all enabled. - metrics_collection = ["resources","master","system","slaves","frameworks","messages","evqueues","registrar"] + metrics_collection = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"] ` // removeGroup(), remove blacklisted groups func (m *Mesos) removeGroup(j *map[string]interface{}) { var ok bool - u := map[string]bool{} - for _, v := range m.MetricsCol { - for _, k := range masterBlocks(v) { - u[k] = true - } - } + b := metricsDiff(m.MetricsCol) - for k, _ := range u { - if _, ok = (*j)[k]; ok { - delete((*j), k) + for _, k := range b { + for _, v := range masterBlocks(k) { + if _, ok = (*j)[v]; ok { + delete((*j), v) + } } } } diff --git a/plugins/inputs/mesos/mesos_test.go b/plugins/inputs/mesos/mesos_test.go index 1f69e4ebb..f85f94f0f 100644 --- a/plugins/inputs/mesos/mesos_test.go +++ b/plugins/inputs/mesos/mesos_test.go @@ -89,29 +89,25 @@ func TestRemoveGroup(t *testing.T) { //t.Skip("needs refactoring") // FIXME: removeGroup() behavior is the opposite as it was, // this test has to be refactored - j := []string{ - "resources", "master", - "system", "slaves", "frameworks", - "tasks", "messages", "evqueue", - "messages", "registrar", - } - generateMetrics() - for _, v := range j { - m := Mesos{ - MetricsCol: []string{v}, - } - m.removeGroup(&mesosMetrics) + m := Mesos{ + MetricsCol: []string{ + "resources", "master", "registrar", + }, + } + b := []string{ + "system", "slaves", "frameworks", + "messages", "evqueue", + } + + m.removeGroup(&mesosMetrics) + + for _, v := range b { for _, x := range masterBlocks(v) { if _, ok := mesosMetrics[x]; ok { t.Errorf("Found key %s, it should be gone.", x) } } } - - if len(mesosMetrics) > 0 { - t.Error("Keys were left at slice sample") - } - //Test for wrong keys } From babecb6d4925215f6143b2ffcec79543e2eea080 Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Tue, 9 Feb 2016 23:49:30 +0100 Subject: [PATCH 030/287] feat(timeout): Use timeout setting * Use timeout as parameter in the http request * A bit of cleanup * More tests --- plugins/inputs/mesos/mesos.go | 23 +++++++++------- plugins/inputs/mesos/mesos_test.go | 43 +++++++++++++++++++++++++++--- 2 files changed, 54 insertions(+), 12 deletions(-) diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 8f59e4bef..29c424815 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -7,6 +7,7 @@ import ( "log" "net" "net/http" + "strconv" "strings" "sync" @@ -16,7 +17,7 @@ import ( ) type Mesos struct { - Timeout string + Timeout int Servers []string MetricsCol []string `toml:"metrics_collection"` } @@ -225,7 +226,7 @@ func masterBlocks(g string) []string { ret, ok := m[g] if !ok { - log.Println("Unkown metrics group: ", g) + log.Println("[mesos] Unkown metrics group: ", g) return []string{} } @@ -234,7 +235,7 @@ func masterBlocks(g string) []string { var sampleConfig = ` # Timeout, in ms. - timeout = 2000 + timeout = 100 # A list of Mesos masters. e.g. master1:5050, master2:5080, etc. # The port can be skipped if using the default (5050) # Default value is localhost:5050. @@ -244,7 +245,7 @@ var sampleConfig = ` metrics_collection = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"] ` -// removeGroup(), remove blacklisted groups +// removeGroup(), remove unwanted groups func (m *Mesos) removeGroup(j *map[string]interface{}) { var ok bool @@ -273,8 +274,14 @@ func (m *Mesos) gatherMetrics(a string, acc telegraf.Accumulator) error { "server": host, } - // TODO: Use Timeout - resp, err := http.Get("http://" + a + "/metrics/snapshot") + if m.Timeout == 0 { + log.Println("[mesos] Missing timeout value, setting default value (100ms)") + m.Timeout = 100 + } + + ts := strconv.Itoa(m.Timeout) + "ms" + + resp, err := http.Get("http://" + a + "/metrics/snapshot?timeout=" + ts) if err != nil { return err @@ -290,9 +297,7 @@ func (m *Mesos) gatherMetrics(a string, acc telegraf.Accumulator) error { return errors.New("Error decoding JSON response") } - //if len(m.Blacklist) > 0 { - // m.removeGroup(&jsonOut) - //} + m.removeGroup(&jsonOut) jf := internal.JSONFlattener{} diff --git a/plugins/inputs/mesos/mesos_test.go b/plugins/inputs/mesos/mesos_test.go index f85f94f0f..6b0a1beae 100644 --- a/plugins/inputs/mesos/mesos_test.go +++ b/plugins/inputs/mesos/mesos_test.go @@ -6,6 +6,7 @@ import ( "net/http" "net/http/httptest" "os" + "reflect" "testing" "github.com/influxdata/telegraf/testutil" @@ -86,9 +87,6 @@ func TestMesosMaster(t *testing.T) { } func TestRemoveGroup(t *testing.T) { - //t.Skip("needs refactoring") - // FIXME: removeGroup() behavior is the opposite as it was, - // this test has to be refactored generateMetrics() m := Mesos{ @@ -111,3 +109,42 @@ func TestRemoveGroup(t *testing.T) { } } } + +func TestMasterBlocks(t *testing.T) { + a := "wrong_key" + expect := []string{} + got := masterBlocks(a) + + if !reflect.DeepEqual(got, expect) { + t.Errorf("Expected empty string slice, got: %v", got) + } +} + +func TestSampleConfig(t *testing.T) { + expect := ` + # Timeout, in ms. + timeout = 100 + # A list of Mesos masters. e.g. master1:5050, master2:5080, etc. + # The port can be skipped if using the default (5050) + # Default value is localhost:5050. + servers = ["localhost:5050"] + # Metrics groups to be collected. + # Default, all enabled. + metrics_collection = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"] +` + + got := new(Mesos).SampleConfig() + + if expect != got { + t.Errorf("Got %s", got) + } +} + +func TestDescription(t *testing.T) { + expect := "Telegraf plugin for gathering metrics from N Mesos masters" + got := new(Mesos).Description() + + if expect != got { + t.Errorf("Got %s", got) + } +} From 7170280401fd777d28bbcbe6c57eac7143f2011c Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Tue, 9 Feb 2016 23:57:48 +0100 Subject: [PATCH 031/287] fix(import): Json parser lives outside internal * Fixed import for JSONFlattener{} it's now in parsers, broke after rebasing. --- plugins/inputs/mesos/mesos.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 29c424815..800843236 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -12,8 +12,8 @@ import ( "sync" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" + jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" ) type Mesos struct { @@ -299,7 +299,7 @@ func (m *Mesos) gatherMetrics(a string, acc telegraf.Accumulator) error { m.removeGroup(&jsonOut) - jf := internal.JSONFlattener{} + jf := jsonparser.JSONFlattener{} err = jf.FlattenJSON("", jsonOut) From 8c6a6604cef9819e243b20338163bc97cb093099 Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Wed, 10 Feb 2016 00:05:58 +0100 Subject: [PATCH 032/287] Comments and cleanup --- plugins/inputs/mesos/mesos.go | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 800843236..c53fc65c9 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -27,6 +27,18 @@ var defaultMetrics = []string{ "tasks", "messages", "evqueue", "messages", "registrar", } +var sampleConfig = ` + # Timeout, in ms. + timeout = 100 + # A list of Mesos masters. e.g. master1:5050, master2:5080, etc. + # The port can be skipped if using the default (5050) + # Default value is localhost:5050. + servers = ["localhost:5050"] + # Metrics groups to be collected. + # Default, all enabled. + metrics_collection = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"] +` + // SampleConfig returns a sample configuration block func (m *Mesos) SampleConfig() string { return sampleConfig @@ -37,6 +49,7 @@ func (m *Mesos) Description() string { return "Telegraf plugin for gathering metrics from N Mesos masters" } +// Gather() metrics from given list of Mesos Masters func (m *Mesos) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup var errorChannel chan error @@ -60,6 +73,7 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error { close(errorChannel) errorStrings := []string{} + // Gather all errors for returning them at once for err := range errorChannel { if err != nil { errorStrings = append(errorStrings, err.Error()) @@ -72,6 +86,7 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error { return nil } +// metricsDiff() returns set names for removal func metricsDiff(w []string) []string { b := []string{} s := make(map[string]bool) @@ -93,6 +108,7 @@ func metricsDiff(w []string) []string { return b } +// masterBlocks serves as kind of metrics registry groupping them in sets func masterBlocks(g string) []string { var m map[string][]string @@ -233,19 +249,7 @@ func masterBlocks(g string) []string { return ret } -var sampleConfig = ` - # Timeout, in ms. - timeout = 100 - # A list of Mesos masters. e.g. master1:5050, master2:5080, etc. - # The port can be skipped if using the default (5050) - # Default value is localhost:5050. - servers = ["localhost:5050"] - # Metrics groups to be collected. - # Default, all enabled. - metrics_collection = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"] -` - -// removeGroup(), remove unwanted groups +// removeGroup(), remove unwanted sets func (m *Mesos) removeGroup(j *map[string]interface{}) { var ok bool From 7d10986f10251bb77f7e52f5e54604b32a621cf3 Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Thu, 11 Feb 2016 00:57:36 +0100 Subject: [PATCH 033/287] test(unit): Test for whitelisted metrics --- plugins/inputs/mesos/mesos_test.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/plugins/inputs/mesos/mesos_test.go b/plugins/inputs/mesos/mesos_test.go index 6b0a1beae..297e0d2b8 100644 --- a/plugins/inputs/mesos/mesos_test.go +++ b/plugins/inputs/mesos/mesos_test.go @@ -75,6 +75,7 @@ func TestMesosMaster(t *testing.T) { m := Mesos{ Servers: []string{ts.Listener.Addr().String()}, + Timeout: 10, } err := m.Gather(&acc) @@ -108,6 +109,13 @@ func TestRemoveGroup(t *testing.T) { } } } + for _, v := range m.MetricsCol { + for _, x := range masterBlocks(v) { + if _, ok := mesosMetrics[x]; !ok { + t.Errorf("Didn't find key %s, it should present.", x) + } + } + } } func TestMasterBlocks(t *testing.T) { From 3cc2cda026feb71a7ffc774978de929ce3ab310d Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Thu, 11 Feb 2016 01:06:51 +0100 Subject: [PATCH 034/287] refactor(naming): For master specific settings * This should help backwards compatibility when adding more features or supported Mesos components --- plugins/inputs/mesos/mesos.go | 18 +++++++++--------- plugins/inputs/mesos/mesos_test.go | 10 +++++----- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index c53fc65c9..843ebb0ef 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -18,8 +18,8 @@ import ( type Mesos struct { Timeout int - Servers []string - MetricsCol []string `toml:"metrics_collection"` + Masters []string + MasterCols []string `toml:"metrics_collection"` } var defaultMetrics = []string{ @@ -33,10 +33,10 @@ var sampleConfig = ` # A list of Mesos masters. e.g. master1:5050, master2:5080, etc. # The port can be skipped if using the default (5050) # Default value is localhost:5050. - servers = ["localhost:5050"] + masters = ["localhost:5050"] # Metrics groups to be collected. # Default, all enabled. - metrics_collection = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"] + master_collections = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"] ` // SampleConfig returns a sample configuration block @@ -54,13 +54,13 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup var errorChannel chan error - if len(m.Servers) == 0 { - m.Servers = []string{"localhost:5050"} + if len(m.Masters) == 0 { + m.Masters = []string{"localhost:5050"} } - errorChannel = make(chan error, len(m.Servers)*2) + errorChannel = make(chan error, len(m.Masters)*2) - for _, v := range m.Servers { + for _, v := range m.Masters { wg.Add(1) go func() { errorChannel <- m.gatherMetrics(v, acc) @@ -253,7 +253,7 @@ func masterBlocks(g string) []string { func (m *Mesos) removeGroup(j *map[string]interface{}) { var ok bool - b := metricsDiff(m.MetricsCol) + b := metricsDiff(m.MasterCols) for _, k := range b { for _, v := range masterBlocks(k) { diff --git a/plugins/inputs/mesos/mesos_test.go b/plugins/inputs/mesos/mesos_test.go index 297e0d2b8..3c9d0ca6d 100644 --- a/plugins/inputs/mesos/mesos_test.go +++ b/plugins/inputs/mesos/mesos_test.go @@ -74,7 +74,7 @@ func TestMesosMaster(t *testing.T) { var acc testutil.Accumulator m := Mesos{ - Servers: []string{ts.Listener.Addr().String()}, + Masters: []string{ts.Listener.Addr().String()}, Timeout: 10, } @@ -91,7 +91,7 @@ func TestRemoveGroup(t *testing.T) { generateMetrics() m := Mesos{ - MetricsCol: []string{ + MasterCols: []string{ "resources", "master", "registrar", }, } @@ -109,7 +109,7 @@ func TestRemoveGroup(t *testing.T) { } } } - for _, v := range m.MetricsCol { + for _, v := range m.MasterCols { for _, x := range masterBlocks(v) { if _, ok := mesosMetrics[x]; !ok { t.Errorf("Didn't find key %s, it should present.", x) @@ -135,10 +135,10 @@ func TestSampleConfig(t *testing.T) { # A list of Mesos masters. e.g. master1:5050, master2:5080, etc. # The port can be skipped if using the default (5050) # Default value is localhost:5050. - servers = ["localhost:5050"] + masters = ["localhost:5050"] # Metrics groups to be collected. # Default, all enabled. - metrics_collection = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"] + master_collections = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"] ` got := new(Mesos).SampleConfig() From 3573d93855b06e586bde2176bd15f44d36f14c0c Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Thu, 11 Feb 2016 01:18:53 +0100 Subject: [PATCH 035/287] fix(vet): Range var used by goroutine * Use it as a paramater for the closure --- plugins/inputs/mesos/mesos.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 843ebb0ef..0f9bd5573 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -62,11 +62,11 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error { for _, v := range m.Masters { wg.Add(1) - go func() { - errorChannel <- m.gatherMetrics(v, acc) + go func(c string) { + errorChannel <- m.gatherMetrics(c, acc) wg.Done() return - }() + }(v) } wg.Wait() From 38ac9d2ecf763e565c63b56a9357f8fa2d445847 Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Thu, 11 Feb 2016 01:37:24 +0100 Subject: [PATCH 036/287] List mesos in main README And on the test configuration file --- README.md | 1 + internal/config/testdata/telegraf-agent.toml | 12 ++++++++++++ 2 files changed, 13 insertions(+) diff --git a/README.md b/README.md index 407107602..a2b7c39c9 100644 --- a/README.md +++ b/README.md @@ -169,6 +169,7 @@ Currently implemented sources: * lustre2 * mailchimp * memcached +* mesos * mongodb * mysql * net_response diff --git a/internal/config/testdata/telegraf-agent.toml b/internal/config/testdata/telegraf-agent.toml index b2ffa0cf0..e1430b954 100644 --- a/internal/config/testdata/telegraf-agent.toml +++ b/internal/config/testdata/telegraf-agent.toml @@ -184,6 +184,18 @@ # If no servers are specified, then localhost is used as the host. servers = ["localhost"] +# Telegraf plugin for gathering metrics from N Mesos masters +[[inputs.mesos]] + # Timeout, in ms. + timeout = 100 + # A list of Mesos masters. e.g. master1:5050, master2:5080, etc. + # The port can be skipped if using the default (5050) + # Default value is localhost:5050. + masters = ["localhost:5050"] + # Metrics groups to be collected. + # Default, all enabled. + master_collections = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"] + # Read metrics from one or many MongoDB servers [[inputs.mongodb]] # An array of URI to gather stats about. Specify an ip or hostname From 29c671ce462f36cfa752cdaf1a8fe6f072939dce Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Thu, 11 Feb 2016 01:54:05 +0100 Subject: [PATCH 037/287] fix(mesos): TOML annotation * It was still using the previous config name --- plugins/inputs/mesos/mesos.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 0f9bd5573..b70f3c7dc 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -19,7 +19,7 @@ import ( type Mesos struct { Timeout int Masters []string - MasterCols []string `toml:"metrics_collection"` + MasterCols []string `toml:"master_collections"` } var defaultMetrics = []string{ From c8365b3b7ebc1e5adf514983dfae8d9c759f5be6 Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Thu, 11 Feb 2016 18:55:00 +0100 Subject: [PATCH 038/287] test(unit): Removed useless tests --- plugins/inputs/mesos/mesos_test.go | 40 ------------------------------ 1 file changed, 40 deletions(-) diff --git a/plugins/inputs/mesos/mesos_test.go b/plugins/inputs/mesos/mesos_test.go index 3c9d0ca6d..c56580649 100644 --- a/plugins/inputs/mesos/mesos_test.go +++ b/plugins/inputs/mesos/mesos_test.go @@ -6,7 +6,6 @@ import ( "net/http" "net/http/httptest" "os" - "reflect" "testing" "github.com/influxdata/telegraf/testutil" @@ -117,42 +116,3 @@ func TestRemoveGroup(t *testing.T) { } } } - -func TestMasterBlocks(t *testing.T) { - a := "wrong_key" - expect := []string{} - got := masterBlocks(a) - - if !reflect.DeepEqual(got, expect) { - t.Errorf("Expected empty string slice, got: %v", got) - } -} - -func TestSampleConfig(t *testing.T) { - expect := ` - # Timeout, in ms. - timeout = 100 - # A list of Mesos masters. e.g. master1:5050, master2:5080, etc. - # The port can be skipped if using the default (5050) - # Default value is localhost:5050. - masters = ["localhost:5050"] - # Metrics groups to be collected. - # Default, all enabled. - master_collections = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"] -` - - got := new(Mesos).SampleConfig() - - if expect != got { - t.Errorf("Got %s", got) - } -} - -func TestDescription(t *testing.T) { - expect := "Telegraf plugin for gathering metrics from N Mesos masters" - got := new(Mesos).Description() - - if expect != got { - t.Errorf("Got %s", got) - } -} From e5f3acd139aec38c91d8f0ec1aaeb3f3ca4f7c6d Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Tue, 16 Feb 2016 09:18:59 +0100 Subject: [PATCH 039/287] doc(readme): Added README.md. --- plugins/inputs/mesos/README.md | 168 +++++++++++++++++++++++++++++++++ 1 file changed, 168 insertions(+) create mode 100644 plugins/inputs/mesos/README.md diff --git a/plugins/inputs/mesos/README.md b/plugins/inputs/mesos/README.md new file mode 100644 index 000000000..603d3c7f5 --- /dev/null +++ b/plugins/inputs/mesos/README.md @@ -0,0 +1,168 @@ +# Mesos Input Plugin + +This input plugin gathers metrics from Mesos (*currently only Mesos masters*). +For more information, please check the [Mesos Observability Metrics](http://mesos.apache.org/documentation/latest/monitoring/) page. + +### Configuration: + +```toml +# Telegraf plugin for gathering metrics from N Mesos masters +[[inputs.mesos]] + # Timeout, in ms. + timeout = 100 + # A list of Mesos masters. e.g. master1:5050, master2:5080, etc. + # The port can be skipped if using the default (5050) + # Default value is localhost:5050. + masters = ["localhost:5050"] + # Metrics groups to be collected. + # Default, all enabled. + master_collections = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"] +``` + +### Measurements & Fields: + +Mesos master metric groups + +- resources + - master/cpus_percent + - master/cpus_used + - master/cpus_total + - master/cpus_revocable_percent + - master/cpus_revocable_total + - master/cpus_revocable_used + - master/disk_percent + - master/disk_used + - master/disk_total + - master/disk_revocable_percent + - master/disk_revocable_total + - master/disk_revocable_used + - master/mem_percent + - master/mem_used + - master/mem_total + - master/mem_revocable_percent + - master/mem_revocable_total + - master/mem_revocable_used + +- master + - master/elected + - master/uptime_secs + +- system + - system/cpus_total + - system/load_15min + - system/load_5min + - system/load_1min + - system/mem_free_bytes + - system/mem_total_bytes + +- slaves + - master/slave_registrations + - master/slave_removals + - master/slave_reregistrations + - master/slave_shutdowns_scheduled + - master/slave_shutdowns_canceled + - master/slave_shutdowns_completed + - master/slaves_active + - master/slaves_connected + - master/slaves_disconnected + - master/slaves_inactive + +- frameworks + - master/frameworks_active + - master/frameworks_connected + - master/frameworks_disconnected + - master/frameworks_inactive + - master/outstanding_offers + +- tasks + - master/tasks_error + - master/tasks_failed + - master/tasks_finished + - master/tasks_killed + - master/tasks_lost + - master/tasks_running + - master/tasks_staging + - master/tasks_starting + +- messages + - master/invalid_executor_to_framework_messages + - master/invalid_framework_to_executor_messages + - master/invalid_status_update_acknowledgements + - master/invalid_status_updates + - master/dropped_messages + - master/messages_authenticate + - master/messages_deactivate_framework + - master/messages_decline_offers + - master/messages_executor_to_framework + - master/messages_exited_executor + - master/messages_framework_to_executor + - master/messages_kill_task + - master/messages_launch_tasks + - master/messages_reconcile_tasks + - master/messages_register_framework + - master/messages_register_slave + - master/messages_reregister_framework + - master/messages_reregister_slave + - master/messages_resource_request + - master/messages_revive_offers + - master/messages_status_update + - master/messages_status_update_acknowledgement + - master/messages_unregister_framework + - master/messages_unregister_slave + - master/messages_update_slave + - master/recovery_slave_removals + - master/slave_removals/reason_registered + - master/slave_removals/reason_unhealthy + - master/slave_removals/reason_unregistered + - master/valid_framework_to_executor_messages + - master/valid_status_update_acknowledgements + - master/valid_status_updates + - master/task_lost/source_master/reason_invalid_offers + - master/task_lost/source_master/reason_slave_removed + - master/task_lost/source_slave/reason_executor_terminated + - master/valid_executor_to_framework_messages + +- evqueue + - master/event_queue_dispatches + - master/event_queue_http_requests + - master/event_queue_messages + +- registrar + - registrar/state_fetch_ms + - registrar/state_store_ms + - registrar/state_store_ms/max + - registrar/state_store_ms/min + - registrar/state_store_ms/p50 + - registrar/state_store_ms/p90 + - registrar/state_store_ms/p95 + - registrar/state_store_ms/p99 + - registrar/state_store_ms/p999 + - registrar/state_store_ms/p9999 + +### Tags: + +- All measurements have the following tags: + - server + +### Example Output: + +``` +$ telegraf -config ~/mesos.conf -input-filter mesos -test +* Plugin: mesos, Collection 1 +mesos,server=172.17.8.101 allocator/event_queue_dispatches=0,master/cpus_percent=0, +master/cpus_revocable_percent=0,master/cpus_revocable_total=0, +master/cpus_revocable_used=0,master/cpus_total=2, +master/cpus_used=0,master/disk_percent=0,master/disk_revocable_percent=0, +master/disk_revocable_total=0,master/disk_revocable_used=0,master/disk_total=10823, +master/disk_used=0,master/dropped_messages=2,master/elected=1, +master/event_queue_dispatches=10,master/event_queue_http_requests=0, +master/event_queue_messages=0,master/frameworks_active=2,master/frameworks_connected=2, +master/frameworks_disconnected=0,master/frameworks_inactive=0, +master/invalid_executor_to_framework_messages=0, +master/invalid_framework_to_executor_messages=0, +master/invalid_status_update_acknowledgements=0,master/invalid_status_updates=0,master/mem_percent=0, +master/mem_revocable_percent=0,master/mem_revocable_total=0, +master/mem_revocable_used=0,master/mem_total=1002, +master/mem_used=0,master/messages_authenticate=0, +master/messages_deactivate_framework=0 ... +``` From 1f77b75e14b0316afe298fd9c452028ed8c6da15 Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Wed, 17 Feb 2016 02:19:26 +0100 Subject: [PATCH 040/287] fix(sample): Made TOML parser happy again --- plugins/inputs/mesos/mesos.go | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index b70f3c7dc..5bcda7970 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -30,12 +30,9 @@ var defaultMetrics = []string{ var sampleConfig = ` # Timeout, in ms. timeout = 100 - # A list of Mesos masters. e.g. master1:5050, master2:5080, etc. - # The port can be skipped if using the default (5050) - # Default value is localhost:5050. + # A list of Mesos masters, default value is localhost:5050. masters = ["localhost:5050"] - # Metrics groups to be collected. - # Default, all enabled. + # Metrics groups to be collected, by default, all enabled. master_collections = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"] ` From 963c51f4739d307e684a64a76e1044ebc14bcac9 Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Thu, 18 Feb 2016 09:01:50 +0100 Subject: [PATCH 041/287] fix(config): Made sample config consistent. closes #682 --- CHANGELOG.md | 1 + internal/config/testdata/telegraf-agent.toml | 7 ++----- plugins/inputs/mesos/README.md | 7 ++----- 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2cdc09bac..d39cdf9c9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,7 @@ and is in the `[agent]` config section. - [#695](https://github.com/influxdata/telegraf/pull/695): raindrops input plugin. Thanks @burdandrei! - [#650](https://github.com/influxdata/telegraf/pull/650): net_response input plugin. Thanks @titilambert! - [#699](https://github.com/influxdata/telegraf/pull/699): Flush based on buffer size rather than time. +- [#682](https://github.com/influxdata/telegraf/pull/682): Mesos input plugin. Thanks @tripledes! ### Bugfixes - [#443](https://github.com/influxdata/telegraf/issues/443): Fix Ping command timeout parameter on Linux. diff --git a/internal/config/testdata/telegraf-agent.toml b/internal/config/testdata/telegraf-agent.toml index e1430b954..1e6a6a276 100644 --- a/internal/config/testdata/telegraf-agent.toml +++ b/internal/config/testdata/telegraf-agent.toml @@ -188,12 +188,9 @@ [[inputs.mesos]] # Timeout, in ms. timeout = 100 - # A list of Mesos masters. e.g. master1:5050, master2:5080, etc. - # The port can be skipped if using the default (5050) - # Default value is localhost:5050. + # A list of Mesos masters, default value is localhost:5050. masters = ["localhost:5050"] - # Metrics groups to be collected. - # Default, all enabled. + # Metrics groups to be collected, by default, all enabled. master_collections = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"] # Read metrics from one or many MongoDB servers diff --git a/plugins/inputs/mesos/README.md b/plugins/inputs/mesos/README.md index 603d3c7f5..20a6dd244 100644 --- a/plugins/inputs/mesos/README.md +++ b/plugins/inputs/mesos/README.md @@ -10,12 +10,9 @@ For more information, please check the [Mesos Observability Metrics](http://meso [[inputs.mesos]] # Timeout, in ms. timeout = 100 - # A list of Mesos masters. e.g. master1:5050, master2:5080, etc. - # The port can be skipped if using the default (5050) - # Default value is localhost:5050. + # A list of Mesos masters, default value is localhost:5050. masters = ["localhost:5050"] - # Metrics groups to be collected. - # Default, all enabled. + # Metrics groups to be collected, by default, all enabled. master_collections = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"] ``` From b14cfd6c643792027f0b669015c9eae68c3f43d1 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 18 Feb 2016 10:08:15 -0700 Subject: [PATCH 042/287] Add Configuration to statsd input readme closes #714 --- plugins/inputs/statsd/README.md | 51 +++++++++++++++++++++++++++++---- 1 file changed, 46 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md index 49b8ff842..1ed8c72a2 100644 --- a/plugins/inputs/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -1,6 +1,47 @@ # Telegraf Service Plugin: statsd -#### Description +### Configuration + +```toml +# Statsd Server +[[inputs.statsd]] + ### Address and port to host UDP listener on + service_address = ":8125" + ### Delete gauges every interval (default=false) + delete_gauges = false + ### Delete counters every interval (default=false) + delete_counters = false + ### Delete sets every interval (default=false) + delete_sets = false + ### Delete timings & histograms every interval (default=true) + delete_timings = true + ### Percentiles to calculate for timing & histogram stats + percentiles = [90] + + ### convert measurement names, "." to "_" and "-" to "__" + convert_names = true + + ### Statsd data translation templates, more info can be read here: + ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md#graphite + # templates = [ + # "cpu.* measurement*" + # ] + + ### Number of UDP messages allowed to queue up, once filled, + ### the statsd server will start dropping packets + allowed_pending_messages = 10000 + + ### Number of timing/histogram values to track per-measurement in the + ### calculation of percentiles. Raising this limit increases the accuracy + ### of percentiles but also increases the memory usage and cpu time. + percentile_limit = 1000 + + ### UDP packet size for the server to listen for. This will depend on the size + ### of the packets that the client is sending, which is usually 1500 bytes. + udp_packet_size = 1500 +``` + +### Description The statsd plugin is a special type of plugin which runs a backgrounded statsd listener service while telegraf is running. @@ -42,7 +83,7 @@ The string `foo:1|c:200|ms` is internally split into two individual metrics `foo:1|c` and `foo:200|ms` which are added to the aggregator separately. -#### Influx Statsd +### Influx Statsd In order to take advantage of InfluxDB's tagging system, we have made a couple additions to the standard statsd protocol. First, you can specify @@ -59,7 +100,7 @@ COMING SOON: there will be a way to specify multiple fields. current.users,service=payroll,server=host01:west=10,east=10,central=2,south=10|g ``` --> -#### Measurements: +### Measurements: Meta: - tags: `metric_type=` @@ -99,7 +140,7 @@ metric type: period are below x. The most common value that people use for `P` is the `90`, this is a great number to try to optimize. -#### Plugin arguments +### Plugin arguments - **service_address** string: Address to listen for statsd UDP packets on - **delete_gauges** boolean: Delete gauges on every collection interval @@ -115,7 +156,7 @@ the accuracy of percentiles but also increases the memory usage and cpu time. - **templates** []string: Templates for transforming statsd buckets into influx measurements and tags. -#### Statsd bucket -> InfluxDB line-protocol Templates +### Statsd bucket -> InfluxDB line-protocol Templates The plugin supports specifying templates for transforming statsd buckets into InfluxDB measurement names and tags. The templates have a _measurement_ keyword, From 1837f8328237dcd7573ca7bca498b5a2d7fc8201 Mon Sep 17 00:00:00 2001 From: Gabriel Levine Date: Fri, 12 Feb 2016 21:37:51 -0500 Subject: [PATCH 043/287] cleaned up the httpjson POST function. closes #688 closes #394 --- CHANGELOG.md | 1 + plugins/inputs/httpjson/httpjson.go | 25 +++++++++++++++++++------ 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d39cdf9c9..68dfb8dcc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,7 @@ and is in the `[agent]` config section. - [#443](https://github.com/influxdata/telegraf/issues/443): Fix Ping command timeout parameter on Linux. - [#662](https://github.com/influxdata/telegraf/pull/667): Change `[tags]` to `[global_tags]` to fix multiple-plugin tags bug. - [#642](https://github.com/influxdata/telegraf/issues/642): Riemann output plugin issues. +- [#394](https://github.com/influxdata/telegraf/issues/394): Support HTTP POST. Thanks @gabelev! ## v0.10.2 [2016-02-04] diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index 4c3f4dee5..fa6ab70db 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -1,6 +1,7 @@ package httpjson import ( + "bytes" "errors" "fmt" "io/ioutil" @@ -58,7 +59,7 @@ var sampleConfig = ` "http://localhost:9998/stats/", ] - ### HTTP method to use (case-sensitive) + ### HTTP method to use: GET or POST (case-sensitive) method = "GET" ### List of tag names to extract from top-level of JSON server response @@ -166,7 +167,8 @@ func (h *HttpJson) gatherServer( return nil } -// Sends an HTTP request to the server using the HttpJson object's HTTPClient +// Sends an HTTP request to the server using the HttpJson object's HTTPClient. +// This request can be either a GET or a POST. // Parameters: // serverURL: endpoint to send request to // @@ -181,13 +183,24 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) { } params := url.Values{} - for k, v := range h.Parameters { - params.Add(k, v) + data := url.Values{} + + switch { + case h.Method == "GET": + requestURL.RawQuery = params.Encode() + for k, v := range h.Parameters { + params.Add(k, v) + } + + case h.Method == "POST": + requestURL.RawQuery = "" + for k, v := range h.Parameters { + data.Add(k, v) + } } - requestURL.RawQuery = params.Encode() // Create + send request - req, err := http.NewRequest(h.Method, requestURL.String(), nil) + req, err := http.NewRequest(h.Method, requestURL.String(), bytes.NewBufferString(data.Encode())) if err != nil { return "", -1, err } From a13d19c58255be6e296a7fb01b7bd836985808cb Mon Sep 17 00:00:00 2001 From: "Dragostin Yanev (netixen)" Date: Thu, 18 Feb 2016 21:21:20 +0200 Subject: [PATCH 044/287] pugins/outputs/influxdb: Prevent runtime panic. - Check and return error from NewBatchPoints to prevent runtime panic if user provides an unparsable precision time unit in config. - Provide correct sample config precision examples. - Update etc/telegraf.conf precision comment. closes #715 --- CHANGELOG.md | 1 + etc/telegraf.conf | 2 +- plugins/outputs/influxdb/influxdb.go | 9 ++++++--- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 68dfb8dcc..e8fd5d8f3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ and is in the `[agent]` config section. - [#662](https://github.com/influxdata/telegraf/pull/667): Change `[tags]` to `[global_tags]` to fix multiple-plugin tags bug. - [#642](https://github.com/influxdata/telegraf/issues/642): Riemann output plugin issues. - [#394](https://github.com/influxdata/telegraf/issues/394): Support HTTP POST. Thanks @gabelev! +- [#715](https://github.com/influxdata/telegraf/pull/715): Fix influxdb precision config panic. Thanks @netixen! ## v0.10.2 [2016-02-04] diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 5095f3bdf..037f730bf 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -63,7 +63,7 @@ urls = ["http://localhost:8086"] # required # The target database for metrics (telegraf will create it if not exists) database = "telegraf" # required - # Precision of writes, valid values are n, u, ms, s, m, and h + # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". # note: using second precision greatly helps InfluxDB compression precision = "s" diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 52fd8039b..cb235f903 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -48,7 +48,7 @@ var sampleConfig = ` urls = ["http://localhost:8086"] # required ### The target database for metrics (telegraf will create it if not exists) database = "telegraf" # required - ### Precision of writes, valid values are n, u, ms, s, m, and h + ### Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". ### note: using "s" precision greatly improves InfluxDB compression precision = "s" @@ -156,17 +156,20 @@ func (i *InfluxDB) Description() string { // Choose a random server in the cluster to write to until a successful write // occurs, logging each unsuccessful. If all servers fail, return error. func (i *InfluxDB) Write(metrics []telegraf.Metric) error { - bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ + bp, err := client.NewBatchPoints(client.BatchPointsConfig{ Database: i.Database, Precision: i.Precision, }) + if err != nil { + return err + } for _, metric := range metrics { bp.AddPoint(metric.Point()) } // This will get set to nil if a successful write occurs - err := errors.New("Could not write to any InfluxDB server in cluster") + err = errors.New("Could not write to any InfluxDB server in cluster") p := rand.Perm(len(i.conns)) for _, n := range p { From 7def6663bd0ebd573fe98c22ec5d9449d1e9b410 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 18 Feb 2016 13:37:36 -0700 Subject: [PATCH 045/287] Root directory cleanup --- CHANGELOG.md | 4 ++-- CONTRIBUTING.md | 8 ++++---- README.md | 2 +- CONFIGURATION.md => docs/CONFIGURATION.md | 0 DATA_FORMATS_INPUT.md => docs/DATA_FORMATS_INPUT.md | 8 ++++---- DATA_FORMATS_OUTPUT.md => docs/DATA_FORMATS_OUTPUT.md | 6 +++--- .../LICENSE_OF_DEPENDENCIES.md | 0 plugins/inputs/exec/exec.go | 2 +- plugins/inputs/kafka_consumer/README.md | 2 +- plugins/inputs/kafka_consumer/kafka_consumer.go | 2 +- plugins/inputs/mqtt_consumer/README.md | 4 ++-- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 2 +- plugins/inputs/nats_consumer/README.md | 4 ++-- plugins/inputs/nats_consumer/nats_consumer.go | 2 +- plugins/inputs/statsd/README.md | 2 +- plugins/inputs/statsd/statsd.go | 2 +- plugins/outputs/amqp/amqp.go | 2 +- plugins/outputs/file/file.go | 2 +- plugins/outputs/kafka/kafka.go | 2 +- plugins/outputs/mqtt/mqtt.go | 2 +- plugins/outputs/nsq/nsq.go | 2 +- build.py => scripts/build.py | 0 22 files changed, 30 insertions(+), 30 deletions(-) rename CONFIGURATION.md => docs/CONFIGURATION.md (100%) rename DATA_FORMATS_INPUT.md => docs/DATA_FORMATS_INPUT.md (95%) rename DATA_FORMATS_OUTPUT.md => docs/DATA_FORMATS_OUTPUT.md (91%) rename LICENSE_OF_DEPENDENCIES.md => docs/LICENSE_OF_DEPENDENCIES.md (100%) rename build.py => scripts/build.py (100%) diff --git a/CHANGELOG.md b/CHANGELOG.md index e8fd5d8f3..c264ce94b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,9 +8,9 @@ format that they would like to parse. Currently supports: "json", "influx", and - Users of message broker and file output plugins can now choose what data format they would like to output. Currently supports: "influx" and "graphite" - More info on parsing _incoming_ data formats can be found -[here](https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md) +[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md) - More info on serializing _outgoing_ data formats can be found -[here](https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md) +[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md) - Telegraf now has an option `flush_buffer_when_full` that will flush the metric buffer whenever it fills up for each output, rather than dropping points and only flushing on a set time interval. This will default to `true` diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7eb08a2d5..9ab185d8c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -101,7 +101,7 @@ Some input plugins (such as [exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec)) accept arbitrary input data formats. An overview of these data formats can be found -[here](https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md). +[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). In order to enable this, you must specify a `SetParser(parser parsers.Parser)` function on the plugin object (see the exec plugin for an example), as well as @@ -117,7 +117,7 @@ You should also add the following to your SampleConfig() return: ### Data format to consume. This can be "json", "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md + ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ``` @@ -230,7 +230,7 @@ Some output plugins (such as [file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file)) can write arbitrary output data formats. An overview of these data formats can be found -[here](https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md). +[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md). In order to enable this, you must specify a `SetSerializer(serializer serializers.Serializer)` @@ -247,7 +247,7 @@ You should also add the following to your SampleConfig() return: ### Data format to output. This can be "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md + ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ``` diff --git a/README.md b/README.md index a2b7c39c9..e15cb822b 100644 --- a/README.md +++ b/README.md @@ -141,7 +141,7 @@ Examples: ## Configuration -See the [configuration guide](CONFIGURATION.md) for a rundown of the more advanced +See the [configuration guide](docs/CONFIGURATION.md) for a rundown of the more advanced configuration options. ## Supported Input Plugins diff --git a/CONFIGURATION.md b/docs/CONFIGURATION.md similarity index 100% rename from CONFIGURATION.md rename to docs/CONFIGURATION.md diff --git a/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md similarity index 95% rename from DATA_FORMATS_INPUT.md rename to docs/DATA_FORMATS_INPUT.md index 16870adc7..3e230519b 100644 --- a/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -34,7 +34,7 @@ example, in the exec plugin: ### Data format to consume. This can be "json", "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md + ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "json" ### Additional configuration options go here @@ -61,7 +61,7 @@ metrics are parsed directly into Telegraf metrics. ### Data format to consume. This can be "json", "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md + ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ``` @@ -106,7 +106,7 @@ For example, if you had this configuration: ### Data format to consume. This can be "json", "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md + ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "json" ### List of tag names to extract from top-level of JSON server response @@ -250,7 +250,7 @@ There are many more options available, ### Data format to consume. This can be "json", "influx" or "graphite" (line-protocol) ### Each data format has it's own unique set of configuration options, read ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md + ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "graphite" ### This string will be used to join the matched values. diff --git a/DATA_FORMATS_OUTPUT.md b/docs/DATA_FORMATS_OUTPUT.md similarity index 91% rename from DATA_FORMATS_OUTPUT.md rename to docs/DATA_FORMATS_OUTPUT.md index 0ad019b10..30f6e63de 100644 --- a/DATA_FORMATS_OUTPUT.md +++ b/docs/DATA_FORMATS_OUTPUT.md @@ -32,7 +32,7 @@ config option, for example, in the `file` output plugin: ### Data format to output. This can be "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md + ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ### Additional configuration options go here @@ -56,7 +56,7 @@ metrics are serialized directly into InfluxDB line-protocol. ### Data format to output. This can be "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md + ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ``` @@ -90,7 +90,7 @@ tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690 ### Data format to output. This can be "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md + ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" prefix = "telegraf" diff --git a/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md similarity index 100% rename from LICENSE_OF_DEPENDENCIES.md rename to docs/LICENSE_OF_DEPENDENCIES.md diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index e297721ba..86309bf73 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -23,7 +23,7 @@ const sampleConfig = ` ### Data format to consume. This can be "json", "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md + ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ` diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index 4fdda0c3a..2e9d8cf3d 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -25,7 +25,7 @@ from the same topic in parallel. ### Data format to consume. This can be "json", "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md + ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ``` diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 66fce3fcf..98f2b2990 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -56,7 +56,7 @@ var sampleConfig = ` ### Data format to consume. This can be "json", "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md + ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ` diff --git a/plugins/inputs/mqtt_consumer/README.md b/plugins/inputs/mqtt_consumer/README.md index 6f7fa911c..07a64e901 100644 --- a/plugins/inputs/mqtt_consumer/README.md +++ b/plugins/inputs/mqtt_consumer/README.md @@ -3,7 +3,7 @@ The [MQTT](http://mqtt.org/) consumer plugin reads from specified MQTT topics and adds messages to InfluxDB. The plugin expects messages in the -[Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md). +[Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). ### Configuration: @@ -38,7 +38,7 @@ The plugin expects messages in the ### Data format to consume. This can be "json", "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md + ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ``` diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index ac4b738d7..e9a7ef8b1 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -71,7 +71,7 @@ var sampleConfig = ` ### Data format to consume. This can be "json", "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md + ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ` diff --git a/plugins/inputs/nats_consumer/README.md b/plugins/inputs/nats_consumer/README.md index 31d13297e..42993e813 100644 --- a/plugins/inputs/nats_consumer/README.md +++ b/plugins/inputs/nats_consumer/README.md @@ -2,7 +2,7 @@ The [NATS](http://www.nats.io/about/) consumer plugin reads from specified NATS subjects and adds messages to InfluxDB. The plugin expects messages -in the [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md). +in the [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). A [Queue Group](http://www.nats.io/documentation/concepts/nats-queueing/) is used when subscribing to subjects so multiple instances of telegraf can read from a NATS cluster in parallel. @@ -26,6 +26,6 @@ from a NATS cluster in parallel. ### Data format to consume. This can be "json", "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md + ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ``` diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index 7dad47b46..c0bf50849 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -58,7 +58,7 @@ var sampleConfig = ` ### Data format to consume. This can be "json", "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md + ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ` diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md index 1ed8c72a2..294c12b84 100644 --- a/plugins/inputs/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -22,7 +22,7 @@ convert_names = true ### Statsd data translation templates, more info can be read here: - ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md#graphite + ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite # templates = [ # "cpu.* measurement*" # ] diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 470e31884..bc792149a 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -140,7 +140,7 @@ const sampleConfig = ` convert_names = true ### Statsd data translation templates, more info can be read here: - ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_INPUT.md#graphite + ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite # templates = [ # "cpu.* measurement*" # ] diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index d826e6d52..ea80ad6a7 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -77,7 +77,7 @@ var sampleConfig = ` ### Data format to output. This can be "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md + ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ` diff --git a/plugins/outputs/file/file.go b/plugins/outputs/file/file.go index deae8aaf8..3d431774c 100644 --- a/plugins/outputs/file/file.go +++ b/plugins/outputs/file/file.go @@ -26,7 +26,7 @@ var sampleConfig = ` ### Data format to output. This can be "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md + ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ` diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 71c2642dd..667212f62 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -63,7 +63,7 @@ var sampleConfig = ` ### Data format to output. This can be "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md + ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ` diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index 48046878b..ea638f3da 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -35,7 +35,7 @@ var sampleConfig = ` ### Data format to output. This can be "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md + ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ` diff --git a/plugins/outputs/nsq/nsq.go b/plugins/outputs/nsq/nsq.go index 7fe9b2068..ef23fab97 100644 --- a/plugins/outputs/nsq/nsq.go +++ b/plugins/outputs/nsq/nsq.go @@ -27,7 +27,7 @@ var sampleConfig = ` ### Data format to output. This can be "influx" or "graphite" ### Each data format has it's own unique set of configuration options, read ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/DATA_FORMATS_OUTPUT.md + ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ` diff --git a/build.py b/scripts/build.py similarity index 100% rename from build.py rename to scripts/build.py From 8d2e5f0bdad8d5b8f56d4c37776256429549d797 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 18 Feb 2016 14:26:51 -0700 Subject: [PATCH 046/287] Seems to be a toml parse bug around triple pounds --- CONTRIBUTING.md | 16 ++--- docs/DATA_FORMATS_INPUT.md | 72 +++++++++---------- docs/DATA_FORMATS_OUTPUT.md | 32 ++++----- etc/telegraf.conf | 36 +++++----- internal/config/config.go | 36 +++++----- plugins/inputs/aerospike/aerospike.go | 6 +- plugins/inputs/apache/apache.go | 2 +- plugins/inputs/bcache/bcache.go | 10 +-- plugins/inputs/couchdb/couchdb.go | 4 +- plugins/inputs/disque/disque.go | 8 +-- plugins/inputs/docker/docker.go | 8 +-- plugins/inputs/dovecot/dovecot.go | 12 ++-- plugins/inputs/elasticsearch/elasticsearch.go | 8 +-- plugins/inputs/exec/README.md | 44 ++++++------ plugins/inputs/exec/exec.go | 12 ++-- .../inputs/github_webhooks/github_webhooks.go | 2 +- plugins/inputs/haproxy/haproxy.go | 10 +-- plugins/inputs/httpjson/httpjson.go | 16 ++--- plugins/inputs/influxdb/influxdb.go | 8 +-- plugins/inputs/jolokia/jolokia.go | 12 ++-- plugins/inputs/kafka_consumer/README.md | 18 ++--- .../inputs/kafka_consumer/kafka_consumer.go | 16 ++--- plugins/inputs/leofs/leofs.go | 4 +- plugins/inputs/lustre2/lustre2.go | 6 +- plugins/inputs/mailchimp/mailchimp.go | 10 +-- plugins/inputs/memcached/memcached.go | 4 +- plugins/inputs/mongodb/mongodb.go | 10 +-- plugins/inputs/mqtt_consumer/README.md | 20 +++--- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 18 ++--- plugins/inputs/mysql/mysql.go | 16 ++--- plugins/inputs/nats_consumer/README.md | 18 ++--- plugins/inputs/nats_consumer/nats_consumer.go | 16 ++--- plugins/inputs/net_response/net_response.go | 12 ++-- plugins/inputs/nginx/nginx.go | 2 +- plugins/inputs/nsq/nsq.go | 2 +- plugins/inputs/passenger/passenger.go | 16 ++--- plugins/inputs/phpfpm/phpfpm.go | 38 +++++----- plugins/inputs/ping/ping.go | 14 ++-- plugins/inputs/postgresql/postgresql.go | 28 ++++---- plugins/inputs/powerdns/powerdns.go | 4 +- plugins/inputs/procstat/procstat.go | 10 +-- plugins/inputs/prometheus/prometheus.go | 2 +- plugins/inputs/puppetagent/puppetagent.go | 2 +- plugins/inputs/rabbitmq/rabbitmq.go | 4 +- plugins/inputs/raindrops/raindrops.go | 2 +- plugins/inputs/redis/redis.go | 16 ++--- plugins/inputs/rethinkdb/rethinkdb.go | 10 +-- plugins/inputs/sensors/sensors.go | 18 ++--- plugins/inputs/snmp/snmp.go | 10 +-- plugins/inputs/sqlserver/sqlserver.go | 12 ++-- plugins/inputs/statsd/README.md | 32 ++++----- plugins/inputs/statsd/statsd.go | 32 ++++----- plugins/inputs/system/cpu.go | 6 +- plugins/inputs/system/disk.go | 4 +- plugins/inputs/system/net.go | 8 +-- plugins/inputs/trig/trig.go | 2 +- plugins/inputs/twemproxy/twemproxy.go | 4 +- .../win_perf_counters/win_perf_counters.go | 12 ++-- plugins/inputs/zfs/zfs.go | 10 +-- plugins/inputs/zookeeper/zookeeper.go | 8 +-- plugins/outputs/amon/amon.go | 6 +- plugins/outputs/amqp/amqp.go | 26 +++---- plugins/outputs/cloudwatch/cloudwatch.go | 4 +- plugins/outputs/datadog/datadog.go | 4 +- plugins/outputs/file/file.go | 10 +-- plugins/outputs/graphite/graphite.go | 6 +- plugins/outputs/influxdb/influxdb.go | 24 +++---- plugins/outputs/kafka/kafka.go | 20 +++--- plugins/outputs/kinesis/kinesis.go | 12 ++-- plugins/outputs/librato/librato.go | 14 ++-- plugins/outputs/mqtt/mqtt.go | 20 +++--- plugins/outputs/nsq/nsq.go | 12 ++-- plugins/outputs/opentsdb/opentsdb.go | 8 +-- .../prometheus_client/prometheus_client.go | 2 +- plugins/outputs/riemann/riemann.go | 6 +- 75 files changed, 502 insertions(+), 502 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9ab185d8c..afbfbf088 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -114,10 +114,10 @@ creating the `Parser` object. You should also add the following to your SampleConfig() return: ```toml - ### Data format to consume. This can be "json", "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ``` @@ -244,10 +244,10 @@ instantiating and creating the `Serializer` object. You should also add the following to your SampleConfig() return: ```toml - ### Data format to output. This can be "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + ## Data format to output. This can be "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ``` diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index 3e230519b..79528a962 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -25,19 +25,19 @@ example, in the exec plugin: ```toml [[inputs.exec]] - ### Commands array + ## Commands array commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] - ### measurement name suffix (for separating different commands) + ## measurement name suffix (for separating different commands) name_suffix = "_mycollector" - ### Data format to consume. This can be "json", "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "json" - ### Additional configuration options go here + ## Additional configuration options go here ``` Each data_format has an additional set of configuration options available, which @@ -52,16 +52,16 @@ metrics are parsed directly into Telegraf metrics. ```toml [[inputs.exec]] - ### Commands array + ## Commands array commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] - ### measurement name suffix (for separating different commands) + ## measurement name suffix (for separating different commands) name_suffix = "_mycollector" - ### Data format to consume. This can be "json", "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ``` @@ -97,19 +97,19 @@ For example, if you had this configuration: ```toml [[inputs.exec]] - ### Commands array + ## Commands array commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] - ### measurement name suffix (for separating different commands) + ## measurement name suffix (for separating different commands) name_suffix = "_mycollector" - ### Data format to consume. This can be "json", "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "json" - ### List of tag names to extract from top-level of JSON server response + ## List of tag names to extract from top-level of JSON server response tag_keys = [ "my_tag_1", "my_tag_2" @@ -241,30 +241,30 @@ There are many more options available, ```toml [[inputs.exec]] - ### Commands array + ## Commands array commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] - ### measurement name suffix (for separating different commands) + ## measurement name suffix (for separating different commands) name_suffix = "_mycollector" - ### Data format to consume. This can be "json", "influx" or "graphite" (line-protocol) - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" (line-protocol) + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "graphite" - ### This string will be used to join the matched values. + ## This string will be used to join the matched values. separator = "_" - ### Each template line requires a template pattern. It can have an optional - ### filter before the template and separated by spaces. It can also have optional extra - ### tags following the template. Multiple tags should be separated by commas and no spaces - ### similar to the line protocol format. There can be only one default template. - ### Templates support below format: - ### 1. filter + template - ### 2. filter + template + extra tag - ### 3. filter + template with field key - ### 4. default template + ## Each template line requires a template pattern. It can have an optional + ## filter before the template and separated by spaces. It can also have optional extra + ## tags following the template. Multiple tags should be separated by commas and no spaces + ## similar to the line protocol format. There can be only one default template. + ## Templates support below format: + ## 1. filter + template + ## 2. filter + template + extra tag + ## 3. filter + template with field key + ## 4. default template templates = [ "*.app env.service.resource.measurement", "stats.* .host.measurement* region=us-west,agent=sensu", diff --git a/docs/DATA_FORMATS_OUTPUT.md b/docs/DATA_FORMATS_OUTPUT.md index 30f6e63de..524ec6d66 100644 --- a/docs/DATA_FORMATS_OUTPUT.md +++ b/docs/DATA_FORMATS_OUTPUT.md @@ -26,16 +26,16 @@ config option, for example, in the `file` output plugin: ```toml [[outputs.file]] - ### Files to write to, "stdout" is a specially handled file. + ## Files to write to, "stdout" is a specially handled file. files = ["stdout"] - ### Data format to output. This can be "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + ## Data format to output. This can be "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" - ### Additional configuration options go here + ## Additional configuration options go here ``` Each data_format has an additional set of configuration options available, which @@ -50,13 +50,13 @@ metrics are serialized directly into InfluxDB line-protocol. ```toml [[outputs.file]] - ### Files to write to, "stdout" is a specially handled file. + ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/metrics.out"] - ### Data format to output. This can be "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + ## Data format to output. This can be "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ``` @@ -84,13 +84,13 @@ tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690 ```toml [[outputs.file]] - ### Files to write to, "stdout" is a specially handled file. + ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/metrics.out"] - ### Data format to output. This can be "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + ## Data format to output. This can be "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" prefix = "telegraf" diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 037f730bf..db87251d5 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -16,37 +16,37 @@ # Configuration for telegraf agent [agent] - ### Default data collection interval for all inputs + ## Default data collection interval for all inputs interval = "10s" - ### Rounds collection interval to 'interval' - ### ie, if interval="10s" then always collect on :00, :10, :20, etc. + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. round_interval = true - ### Telegraf will cache metric_buffer_limit metrics for each output, and will - ### flush this buffer on a successful write. + ## Telegraf will cache metric_buffer_limit metrics for each output, and will + ## flush this buffer on a successful write. metric_buffer_limit = 10000 - ### Flush the buffer whenever full, regardless of flush_interval. + ## Flush the buffer whenever full, regardless of flush_interval. flush_buffer_when_full = true - ### Collection jitter is used to jitter the collection by a random amount. - ### Each plugin will sleep for a random time within jitter before collecting. - ### This can be used to avoid many plugins querying things like sysfs at the - ### same time, which can have a measurable effect on the system. + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. collection_jitter = "0s" - ### Default flushing interval for all outputs. You shouldn't set this below - ### interval. Maximum flush_interval will be flush_interval + flush_jitter + ## Default flushing interval for all outputs. You shouldn't set this below + ## interval. Maximum flush_interval will be flush_interval + flush_jitter flush_interval = "10s" - ### Jitter the flush interval by a random amount. This is primarily to avoid - ### large write spikes for users running a large number of telegraf instances. - ### ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s flush_jitter = "0s" - ### Run telegraf in debug mode + ## Run telegraf in debug mode debug = false - ### Run telegraf in quiet mode + ## Run telegraf in quiet mode quiet = false - ### Override default hostname, if empty use os.Hostname() + ## Override default hostname, if empty use os.Hostname() hostname = "" diff --git a/internal/config/config.go b/internal/config/config.go index 82246f2a4..f47cf7ea7 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -153,37 +153,37 @@ var header = `################################################################## # Configuration for telegraf agent [agent] - ### Default data collection interval for all inputs + ## Default data collection interval for all inputs interval = "10s" - ### Rounds collection interval to 'interval' - ### ie, if interval="10s" then always collect on :00, :10, :20, etc. + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. round_interval = true - ### Telegraf will cache metric_buffer_limit metrics for each output, and will - ### flush this buffer on a successful write. + ## Telegraf will cache metric_buffer_limit metrics for each output, and will + ## flush this buffer on a successful write. metric_buffer_limit = 10000 - ### Flush the buffer whenever full, regardless of flush_interval. + ## Flush the buffer whenever full, regardless of flush_interval. flush_buffer_when_full = true - ### Collection jitter is used to jitter the collection by a random amount. - ### Each plugin will sleep for a random time within jitter before collecting. - ### This can be used to avoid many plugins querying things like sysfs at the - ### same time, which can have a measurable effect on the system. + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. collection_jitter = "0s" - ### Default flushing interval for all outputs. You shouldn't set this below - ### interval. Maximum flush_interval will be flush_interval + flush_jitter + ## Default flushing interval for all outputs. You shouldn't set this below + ## interval. Maximum flush_interval will be flush_interval + flush_jitter flush_interval = "10s" - ### Jitter the flush interval by a random amount. This is primarily to avoid - ### large write spikes for users running a large number of telegraf instances. - ### ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s flush_jitter = "0s" - ### Run telegraf in debug mode + ## Run telegraf in debug mode debug = false - ### Run telegraf in quiet mode + ## Run telegraf in quiet mode quiet = false - ### Override default hostname, if empty use os.Hostname() + ## Override default hostname, if empty use os.Hostname() hostname = "" diff --git a/plugins/inputs/aerospike/aerospike.go b/plugins/inputs/aerospike/aerospike.go index e46960101..cd2ebe25c 100644 --- a/plugins/inputs/aerospike/aerospike.go +++ b/plugins/inputs/aerospike/aerospike.go @@ -104,9 +104,9 @@ type Aerospike struct { } var sampleConfig = ` - ### Aerospike servers to connect to (with port) - ### This plugin will query all namespaces the aerospike - ### server has configured and get stats for them. + ## Aerospike servers to connect to (with port) + ## This plugin will query all namespaces the aerospike + ## server has configured and get stats for them. servers = ["localhost:3000"] ` diff --git a/plugins/inputs/apache/apache.go b/plugins/inputs/apache/apache.go index faedf7f7d..b6e3e50f1 100644 --- a/plugins/inputs/apache/apache.go +++ b/plugins/inputs/apache/apache.go @@ -20,7 +20,7 @@ type Apache struct { } var sampleConfig = ` - ### An array of Apache status URI to gather stats. + ## An array of Apache status URI to gather stats. urls = ["http://localhost/server-status?auto"] ` diff --git a/plugins/inputs/bcache/bcache.go b/plugins/inputs/bcache/bcache.go index 25005fce3..1171dbd92 100644 --- a/plugins/inputs/bcache/bcache.go +++ b/plugins/inputs/bcache/bcache.go @@ -18,13 +18,13 @@ type Bcache struct { } var sampleConfig = ` - ### Bcache sets path - ### If not specified, then default is: + ## Bcache sets path + ## If not specified, then default is: bcachePath = "/sys/fs/bcache" - ### By default, telegraf gather stats for all bcache devices - ### Setting devices will restrict the stats to the specified - ### bcache devices. + ## By default, telegraf gather stats for all bcache devices + ## Setting devices will restrict the stats to the specified + ## bcache devices. bcacheDevs = ["bcache0"] ` diff --git a/plugins/inputs/couchdb/couchdb.go b/plugins/inputs/couchdb/couchdb.go index 7cec65777..ba64e4a6d 100644 --- a/plugins/inputs/couchdb/couchdb.go +++ b/plugins/inputs/couchdb/couchdb.go @@ -75,8 +75,8 @@ func (*CouchDB) Description() string { func (*CouchDB) SampleConfig() string { return ` - ### Works with CouchDB stats endpoints out of the box - ### Multiple HOSTs from which to read CouchDB stats: + ## Works with CouchDB stats endpoints out of the box + ## Multiple HOSTs from which to read CouchDB stats: hosts = ["http://localhost:8086/_stats"] ` } diff --git a/plugins/inputs/disque/disque.go b/plugins/inputs/disque/disque.go index 51457bec1..a311b6739 100644 --- a/plugins/inputs/disque/disque.go +++ b/plugins/inputs/disque/disque.go @@ -22,11 +22,11 @@ type Disque struct { } var sampleConfig = ` - ### An array of URI to gather stats about. Specify an ip or hostname - ### with optional port and password. ie disque://localhost, disque://10.10.3.33:18832, - ### 10.0.0.1:10000, etc. + ## An array of URI to gather stats about. Specify an ip or hostname + ## with optional port and password. ie disque://localhost, disque://10.10.3.33:18832, + ## 10.0.0.1:10000, etc. - ### If no servers are specified, then localhost is used as the host. + ## If no servers are specified, then localhost is used as the host. servers = ["localhost"] ` diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index 60abf71d1..0d89979c1 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -21,11 +21,11 @@ type Docker struct { } var sampleConfig = ` - ### Docker Endpoint - ### To use TCP, set endpoint = "tcp://[ip]:[port]" - ### To use environment variables (ie, docker-machine), set endpoint = "ENV" + ## Docker Endpoint + ## To use TCP, set endpoint = "tcp://[ip]:[port]" + ## To use environment variables (ie, docker-machine), set endpoint = "ENV" endpoint = "unix:///var/run/docker.sock" - ### Only collect metrics for these containers, collect all if empty + ## Only collect metrics for these containers, collect all if empty container_names = [] ` diff --git a/plugins/inputs/dovecot/dovecot.go b/plugins/inputs/dovecot/dovecot.go index de9ef0cfe..75829f595 100644 --- a/plugins/inputs/dovecot/dovecot.go +++ b/plugins/inputs/dovecot/dovecot.go @@ -24,13 +24,13 @@ func (d *Dovecot) Description() string { } var sampleConfig = ` - ### specify dovecot servers via an address:port list - ### e.g. - ### localhost:24242 - ### - ### If no servers are specified, then localhost is used as the host. + ## specify dovecot servers via an address:port list + ## e.g. + ## localhost:24242 + ## + ## If no servers are specified, then localhost is used as the host. servers = ["localhost:24242"] - ### Only collect metrics for these domains, collect all if empty + ## Only collect metrics for these domains, collect all if empty domains = [] ` diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 8c2c055cb..aae97f4d7 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -59,14 +59,14 @@ type indexHealth struct { } const sampleConfig = ` - ### specify a list of one or more Elasticsearch servers + ## specify a list of one or more Elasticsearch servers servers = ["http://localhost:9200"] - ### set local to false when you want to read the indices stats from all nodes - ### within the cluster + ## set local to false when you want to read the indices stats from all nodes + ## within the cluster local = true - ### set cluster_health to true when you want to also obtain cluster level stats + ## set cluster_health to true when you want to also obtain cluster level stats cluster_health = false ` diff --git a/plugins/inputs/exec/README.md b/plugins/inputs/exec/README.md index daf800db3..eddc86ada 100644 --- a/plugins/inputs/exec/README.md +++ b/plugins/inputs/exec/README.md @@ -37,19 +37,19 @@ and strings will be ignored. # measurement name suffix (for separating different commands) name_suffix = "_mycollector" - ### Below configuration will be used for data_format = "graphite", can be ignored for other data_format - ### If matching multiple measurement files, this string will be used to join the matched values. + ## Below configuration will be used for data_format = "graphite", can be ignored for other data_format + ## If matching multiple measurement files, this string will be used to join the matched values. #separator = "." - ### Each template line requires a template pattern. It can have an optional - ### filter before the template and separated by spaces. It can also have optional extra - ### tags following the template. Multiple tags should be separated by commas and no spaces - ### similar to the line protocol format. The can be only one default template. - ### Templates support below format: - ### 1. filter + template - ### 2. filter + template + extra tag - ### 3. filter + template with field key - ### 4. default template + ## Each template line requires a template pattern. It can have an optional + ## filter before the template and separated by spaces. It can also have optional extra + ## tags following the template. Multiple tags should be separated by commas and no spaces + ## similar to the line protocol format. The can be only one default template. + ## Templates support below format: + ## 1. filter + template + ## 2. filter + template + extra tag + ## 3. filter + template with field key + ## 4. default template #templates = [ # "*.app env.service.resource.measurement", # "stats.* .host.measurement* region=us-west,agent=sensu", @@ -141,19 +141,19 @@ We can also change the data_format to "graphite" to use the metrics collecting s # measurement name suffix (for separating different commands) name_suffix = "_mycollector" - ### Below configuration will be used for data_format = "graphite", can be ignored for other data_format - ### If matching multiple measurement files, this string will be used to join the matched values. + ## Below configuration will be used for data_format = "graphite", can be ignored for other data_format + ## If matching multiple measurement files, this string will be used to join the matched values. separator = "." - ### Each template line requires a template pattern. It can have an optional - ### filter before the template and separated by spaces. It can also have optional extra - ### tags following the template. Multiple tags should be separated by commas and no spaces - ### similar to the line protocol format. The can be only one default template. - ### Templates support below format: - ### 1. filter + template - ### 2. filter + template + extra tag - ### 3. filter + template with field key - ### 4. default template + ## Each template line requires a template pattern. It can have an optional + ## filter before the template and separated by spaces. It can also have optional extra + ## tags following the template. Multiple tags should be separated by commas and no spaces + ## similar to the line protocol format. The can be only one default template. + ## Templates support below format: + ## 1. filter + template + ## 2. filter + template + extra tag + ## 3. filter + template with field key + ## 4. default template templates = [ "*.app env.service.resource.measurement", "stats.* .host.measurement* region=us-west,agent=sensu", diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index 86309bf73..5231fd013 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -14,16 +14,16 @@ import ( ) const sampleConfig = ` - ### Commands array + ## Commands array commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] - ### measurement name suffix (for separating different commands) + ## measurement name suffix (for separating different commands) name_suffix = "_mycollector" - ### Data format to consume. This can be "json", "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ` diff --git a/plugins/inputs/github_webhooks/github_webhooks.go b/plugins/inputs/github_webhooks/github_webhooks.go index 6dc97f5a3..bc3f184be 100644 --- a/plugins/inputs/github_webhooks/github_webhooks.go +++ b/plugins/inputs/github_webhooks/github_webhooks.go @@ -31,7 +31,7 @@ func NewGithubWebhooks() *GithubWebhooks { func (gh *GithubWebhooks) SampleConfig() string { return ` - ### Address and port to host Webhook listener on + ## Address and port to host Webhook listener on service_address = ":1618" ` } diff --git a/plugins/inputs/haproxy/haproxy.go b/plugins/inputs/haproxy/haproxy.go index 92969a057..233cd8481 100644 --- a/plugins/inputs/haproxy/haproxy.go +++ b/plugins/inputs/haproxy/haproxy.go @@ -86,13 +86,13 @@ type haproxy struct { } var sampleConfig = ` - ### An array of address to gather stats about. Specify an ip on hostname - ### with optional port. ie localhost, 10.10.3.33:1936, etc. + ## An array of address to gather stats about. Specify an ip on hostname + ## with optional port. ie localhost, 10.10.3.33:1936, etc. - ### If no servers are specified, then default to 127.0.0.1:1936 + ## If no servers are specified, then default to 127.0.0.1:1936 servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"] - ### Or you can also use local socket(not work yet) - ### servers = ["socket://run/haproxy/admin.sock"] + ## Or you can also use local socket(not work yet) + ## servers = ["socket://run/haproxy/admin.sock"] ` func (r *haproxy) SampleConfig() string { diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index fa6ab70db..d5dddd7d4 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -47,33 +47,33 @@ func (c RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) { } var sampleConfig = ` - ### NOTE This plugin only reads numerical measurements, strings and booleans - ### will be ignored. + ## NOTE This plugin only reads numerical measurements, strings and booleans + ## will be ignored. - ### a name for the service being polled + ## a name for the service being polled name = "webserver_stats" - ### URL of each server in the service's cluster + ## URL of each server in the service's cluster servers = [ "http://localhost:9999/stats/", "http://localhost:9998/stats/", ] - ### HTTP method to use: GET or POST (case-sensitive) + ## HTTP method to use: GET or POST (case-sensitive) method = "GET" - ### List of tag names to extract from top-level of JSON server response + ## List of tag names to extract from top-level of JSON server response # tag_keys = [ # "my_tag_1", # "my_tag_2" # ] - ### HTTP parameters (all values must be strings) + ## HTTP parameters (all values must be strings) [inputs.httpjson.parameters] event_type = "cpu_spike" threshold = "0.75" - ### HTTP Header parameters (all values must be strings) + ## HTTP Header parameters (all values must be strings) # [inputs.httpjson.headers] # X-Auth-Token = "my-xauth-token" # apiVersion = "v1" diff --git a/plugins/inputs/influxdb/influxdb.go b/plugins/inputs/influxdb/influxdb.go index b12990cf1..63a3c1854 100644 --- a/plugins/inputs/influxdb/influxdb.go +++ b/plugins/inputs/influxdb/influxdb.go @@ -22,11 +22,11 @@ func (*InfluxDB) Description() string { func (*InfluxDB) SampleConfig() string { return ` - ### Works with InfluxDB debug endpoints out of the box, - ### but other services can use this format too. - ### See the influxdb plugin's README for more details. + ## Works with InfluxDB debug endpoints out of the box, + ## but other services can use this format too. + ## See the influxdb plugin's README for more details. - ### Multiple URLs from which to read InfluxDB-formatted JSON + ## Multiple URLs from which to read InfluxDB-formatted JSON urls = [ "http://localhost:8086/debug/vars" ] diff --git a/plugins/inputs/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go index 77546006f..2e0bba6d5 100644 --- a/plugins/inputs/jolokia/jolokia.go +++ b/plugins/inputs/jolokia/jolokia.go @@ -46,10 +46,10 @@ type Jolokia struct { func (j *Jolokia) SampleConfig() string { return ` - ### This is the context root used to compose the jolokia url + ## This is the context root used to compose the jolokia url context = "/jolokia/read" - ### List of servers exposing jolokia read service + ## List of servers exposing jolokia read service [[inputs.jolokia.servers]] name = "stable" host = "192.168.103.2" @@ -57,10 +57,10 @@ func (j *Jolokia) SampleConfig() string { # username = "myuser" # password = "mypassword" - ### List of metrics collected on above servers - ### Each metric consists in a name, a jmx path and either - ### a pass or drop slice attribute. - ### This collect all heap memory usage metrics. + ## List of metrics collected on above servers + ## Each metric consists in a name, a jmx path and either + ## a pass or drop slice attribute. + ## This collect all heap memory usage metrics. [[inputs.jolokia.metrics]] name = "heap_memory_usage" jmx = "/java.lang:type=Memory/HeapMemoryUsage" diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index 2e9d8cf3d..885c67a28 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -11,21 +11,21 @@ from the same topic in parallel. ```toml # Read metrics from Kafka topic(s) [[inputs.kafka_consumer]] - ### topic(s) to consume + ## topic(s) to consume topics = ["telegraf"] - ### an array of Zookeeper connection strings + ## an array of Zookeeper connection strings zookeeper_peers = ["localhost:2181"] - ### the name of the consumer group + ## the name of the consumer group consumer_group = "telegraf_metrics_consumers" - ### Maximum number of metrics to buffer between collection intervals + ## Maximum number of metrics to buffer between collection intervals metric_buffer = 100000 - ### Offset (must be either "oldest" or "newest") + ## Offset (must be either "oldest" or "newest") offset = "oldest" - ### Data format to consume. This can be "json", "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ``` diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 98f2b2990..bc0d225c6 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -44,19 +44,19 @@ type Kafka struct { } var sampleConfig = ` - ### topic(s) to consume + ## topic(s) to consume topics = ["telegraf"] - ### an array of Zookeeper connection strings + ## an array of Zookeeper connection strings zookeeper_peers = ["localhost:2181"] - ### the name of the consumer group + ## the name of the consumer group consumer_group = "telegraf_metrics_consumers" - ### Offset (must be either "oldest" or "newest") + ## Offset (must be either "oldest" or "newest") offset = "oldest" - ### Data format to consume. This can be "json", "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ` diff --git a/plugins/inputs/leofs/leofs.go b/plugins/inputs/leofs/leofs.go index 4a52706b3..f4910ad0c 100644 --- a/plugins/inputs/leofs/leofs.go +++ b/plugins/inputs/leofs/leofs.go @@ -132,8 +132,8 @@ var serverTypeMapping = map[string]ServerType{ } var sampleConfig = ` - ### An array of URI to gather stats about LeoFS. - ### Specify an ip or hostname with port. ie 127.0.0.1:4020 + ## An array of URI to gather stats about LeoFS. + ## Specify an ip or hostname with port. ie 127.0.0.1:4020 servers = ["127.0.0.1:4021"] ` diff --git a/plugins/inputs/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go index 26d0e3702..6ac41d391 100644 --- a/plugins/inputs/lustre2/lustre2.go +++ b/plugins/inputs/lustre2/lustre2.go @@ -29,9 +29,9 @@ type Lustre2 struct { } var sampleConfig = ` - ### An array of /proc globs to search for Lustre stats - ### If not specified, the default will work on Lustre 2.5.x - ### + ## An array of /proc globs to search for Lustre stats + ## If not specified, the default will work on Lustre 2.5.x + ## # ost_procfiles = [ # "/proc/fs/lustre/obdfilter/*/stats", # "/proc/fs/lustre/osd-ldiskfs/*/stats" diff --git a/plugins/inputs/mailchimp/mailchimp.go b/plugins/inputs/mailchimp/mailchimp.go index 290c01bfd..d7255191a 100644 --- a/plugins/inputs/mailchimp/mailchimp.go +++ b/plugins/inputs/mailchimp/mailchimp.go @@ -17,13 +17,13 @@ type MailChimp struct { } var sampleConfig = ` - ### MailChimp API key - ### get from https://admin.mailchimp.com/account/api/ + ## MailChimp API key + ## get from https://admin.mailchimp.com/account/api/ api_key = "" # required - ### Reports for campaigns sent more than days_old ago will not be collected. - ### 0 means collect all. + ## Reports for campaigns sent more than days_old ago will not be collected. + ## 0 means collect all. days_old = 0 - ### Campaign ID to get, if empty gets all campaigns, this option overrides days_old + ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old # campaign_id = "" ` diff --git a/plugins/inputs/memcached/memcached.go b/plugins/inputs/memcached/memcached.go index 19654937c..24ff09d77 100644 --- a/plugins/inputs/memcached/memcached.go +++ b/plugins/inputs/memcached/memcached.go @@ -19,8 +19,8 @@ type Memcached struct { } var sampleConfig = ` - ### An array of address to gather stats about. Specify an ip on hostname - ### with optional port. ie localhost, 10.0.0.1:11211, etc. + ## An array of address to gather stats about. Specify an ip on hostname + ## with optional port. ie localhost, 10.0.0.1:11211, etc. servers = ["localhost:11211"] # unix_sockets = ["/var/run/memcached.sock"] ` diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go index 4054ccd54..3be04477b 100644 --- a/plugins/inputs/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -26,11 +26,11 @@ type Ssl struct { } var sampleConfig = ` - ### An array of URI to gather stats about. Specify an ip or hostname - ### with optional port add password. ie, - ### mongodb://user:auth_key@10.10.3.30:27017, - ### mongodb://10.10.3.33:18832, - ### 10.0.0.1:10000, etc. + ## An array of URI to gather stats about. Specify an ip or hostname + ## with optional port add password. ie, + ## mongodb://user:auth_key@10.10.3.30:27017, + ## mongodb://10.10.3.33:18832, + ## 10.0.0.1:10000, etc. servers = ["127.0.0.1:27017"] ` diff --git a/plugins/inputs/mqtt_consumer/README.md b/plugins/inputs/mqtt_consumer/README.md index 07a64e901..787494975 100644 --- a/plugins/inputs/mqtt_consumer/README.md +++ b/plugins/inputs/mqtt_consumer/README.md @@ -11,34 +11,34 @@ The plugin expects messages in the # Read metrics from MQTT topic(s) [[inputs.mqtt_consumer]] servers = ["localhost:1883"] - ### MQTT QoS, must be 0, 1, or 2 + ## MQTT QoS, must be 0, 1, or 2 qos = 0 - ### Topics to subscribe to + ## Topics to subscribe to topics = [ "telegraf/host01/cpu", "telegraf/+/mem", "sensors/#", ] - ### Maximum number of metrics to buffer between collection intervals + ## Maximum number of metrics to buffer between collection intervals metric_buffer = 100000 - ### username and password to connect MQTT server. + ## username and password to connect MQTT server. # username = "telegraf" # password = "metricsmetricsmetricsmetrics" - ### Optional SSL Config + ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" # ssl_cert = "/etc/telegraf/cert.pem" # ssl_key = "/etc/telegraf/key.pem" - ### Use SSL but skip chain & host verification + ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ### Data format to consume. This can be "json", "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ``` diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index e9a7ef8b1..2d0fbef06 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -47,31 +47,31 @@ type MQTTConsumer struct { var sampleConfig = ` servers = ["localhost:1883"] - ### MQTT QoS, must be 0, 1, or 2 + ## MQTT QoS, must be 0, 1, or 2 qos = 0 - ### Topics to subscribe to + ## Topics to subscribe to topics = [ "telegraf/host01/cpu", "telegraf/+/mem", "sensors/#", ] - ### username and password to connect MQTT server. + ## username and password to connect MQTT server. # username = "telegraf" # password = "metricsmetricsmetricsmetrics" - ### Optional SSL Config + ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" # ssl_cert = "/etc/telegraf/cert.pem" # ssl_key = "/etc/telegraf/key.pem" - ### Use SSL but skip chain & host verification + ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ### Data format to consume. This can be "json", "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ` diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 272baddb1..b2e2729a9 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -15,14 +15,14 @@ type Mysql struct { } var sampleConfig = ` - ### specify servers via a url matching: - ### [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] - ### see https://github.com/go-sql-driver/mysql#dsn-data-source-name - ### e.g. - ### root:passwd@tcp(127.0.0.1:3306)/?tls=false - ### root@tcp(127.0.0.1:3306)/?tls=false - ### - ### If no servers are specified, then localhost is used as the host. + ## specify servers via a url matching: + ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] + ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name + ## e.g. + ## root:passwd@tcp(127.0.0.1:3306)/?tls=false + ## root@tcp(127.0.0.1:3306)/?tls=false + ## + ## If no servers are specified, then localhost is used as the host. servers = ["tcp(127.0.0.1:3306)/"] ` diff --git a/plugins/inputs/nats_consumer/README.md b/plugins/inputs/nats_consumer/README.md index 42993e813..90563ff55 100644 --- a/plugins/inputs/nats_consumer/README.md +++ b/plugins/inputs/nats_consumer/README.md @@ -12,20 +12,20 @@ from a NATS cluster in parallel. ```toml # Read metrics from NATS subject(s) [[inputs.nats_consumer]] - ### urls of NATS servers + ## urls of NATS servers servers = ["nats://localhost:4222"] - ### Use Transport Layer Security + ## Use Transport Layer Security secure = false - ### subject(s) to consume + ## subject(s) to consume subjects = ["telegraf"] - ### name a queue group + ## name a queue group queue_group = "telegraf_consumers" - ### Maximum number of metrics to buffer between collection intervals + ## Maximum number of metrics to buffer between collection intervals metric_buffer = 100000 - ### Data format to consume. This can be "json", "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ``` diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index c0bf50849..235601100 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -46,19 +46,19 @@ type natsConsumer struct { } var sampleConfig = ` - ### urls of NATS servers + ## urls of NATS servers servers = ["nats://localhost:4222"] - ### Use Transport Layer Security + ## Use Transport Layer Security secure = false - ### subject(s) to consume + ## subject(s) to consume subjects = ["telegraf"] - ### name a queue group + ## name a queue group queue_group = "telegraf_consumers" - ### Data format to consume. This can be "json", "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ` diff --git a/plugins/inputs/net_response/net_response.go b/plugins/inputs/net_response/net_response.go index 60468c157..66bf2ae7b 100644 --- a/plugins/inputs/net_response/net_response.go +++ b/plugins/inputs/net_response/net_response.go @@ -27,17 +27,17 @@ func (_ *NetResponse) Description() string { } var sampleConfig = ` - ### Protocol, must be "tcp" or "udp" + ## Protocol, must be "tcp" or "udp" protocol = "tcp" - ### Server address (default localhost) + ## Server address (default localhost) address = "github.com:80" - ### Set timeout (default 1.0 seconds) + ## Set timeout (default 1.0 seconds) timeout = 1.0 - ### Set read timeout (default 1.0 seconds) + ## Set read timeout (default 1.0 seconds) read_timeout = 1.0 - ### Optional string sent to the server + ## Optional string sent to the server # send = "ssh" - ### Optional expected string in answer + ## Optional expected string in answer # expect = "ssh" ` diff --git a/plugins/inputs/nginx/nginx.go b/plugins/inputs/nginx/nginx.go index 4ceca01f2..3b008fbf3 100644 --- a/plugins/inputs/nginx/nginx.go +++ b/plugins/inputs/nginx/nginx.go @@ -20,7 +20,7 @@ type Nginx struct { } var sampleConfig = ` - ### An array of Nginx stub_status URI to gather stats. + ## An array of Nginx stub_status URI to gather stats. urls = ["http://localhost/status"] ` diff --git a/plugins/inputs/nsq/nsq.go b/plugins/inputs/nsq/nsq.go index 1cf7d4dcc..6b3be66f2 100644 --- a/plugins/inputs/nsq/nsq.go +++ b/plugins/inputs/nsq/nsq.go @@ -41,7 +41,7 @@ type NSQ struct { } var sampleConfig = ` - ### An array of NSQD HTTP API endpoints + ## An array of NSQD HTTP API endpoints endpoints = ["http://localhost:4151"] ` diff --git a/plugins/inputs/passenger/passenger.go b/plugins/inputs/passenger/passenger.go index 802107f4f..84e92cb1a 100644 --- a/plugins/inputs/passenger/passenger.go +++ b/plugins/inputs/passenger/passenger.go @@ -126,14 +126,14 @@ func (p *process) getUptime() int64 { } var sampleConfig = ` - ### Path of passenger-status. - ### - ### Plugin gather metric via parsing XML output of passenger-status - ### More information about the tool: - ### https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html - ### - ### If no path is specified, then the plugin simply execute passenger-status - ### hopefully it can be found in your PATH + ## Path of passenger-status. + ## + ## Plugin gather metric via parsing XML output of passenger-status + ## More information about the tool: + ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html + ## + ## If no path is specified, then the plugin simply execute passenger-status + ## hopefully it can be found in your PATH command = "passenger-status -v --show=xml" ` diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index 157f87691..c07262342 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -41,25 +41,25 @@ type phpfpm struct { } var sampleConfig = ` - ### An array of addresses to gather stats about. Specify an ip or hostname - ### with optional port and path - ### - ### Plugin can be configured in three modes (either can be used): - ### - http: the URL must start with http:// or https://, ie: - ### "http://localhost/status" - ### "http://192.168.130.1/status?full" - ### - ### - unixsocket: path to fpm socket, ie: - ### "/var/run/php5-fpm.sock" - ### or using a custom fpm status path: - ### "/var/run/php5-fpm.sock:fpm-custom-status-path" - ### - ### - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: - ### "fcgi://10.0.0.12:9000/status" - ### "cgi://10.0.10.12:9001/status" - ### - ### Example of multiple gathering from local socket and remove host - ### urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] + ## An array of addresses to gather stats about. Specify an ip or hostname + ## with optional port and path + ## + ## Plugin can be configured in three modes (either can be used): + ## - http: the URL must start with http:// or https://, ie: + ## "http://localhost/status" + ## "http://192.168.130.1/status?full" + ## + ## - unixsocket: path to fpm socket, ie: + ## "/var/run/php5-fpm.sock" + ## or using a custom fpm status path: + ## "/var/run/php5-fpm.sock:fpm-custom-status-path" + ## + ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: + ## "fcgi://10.0.0.12:9000/status" + ## "cgi://10.0.10.12:9001/status" + ## + ## Example of multiple gathering from local socket and remove host + ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] urls = ["http://localhost/status"] ` diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index ab5df6e82..1798a5eb7 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -44,18 +44,18 @@ func (_ *Ping) Description() string { } var sampleConfig = ` - ### NOTE: this plugin forks the ping command. You may need to set capabilities - ### via setcap cap_net_raw+p /bin/ping + ## NOTE: this plugin forks the ping command. You may need to set capabilities + ## via setcap cap_net_raw+p /bin/ping - ### urls to ping + ## urls to ping urls = ["www.google.com"] # required - ### number of pings to send (ping -c ) + ## number of pings to send (ping -c ) count = 1 # required - ### interval, in s, at which to ping. 0 == default (ping -i ) + ## interval, in s, at which to ping. 0 == default (ping -i ) ping_interval = 0.0 - ### ping timeout, in s. 0 == no timeout (ping -t ) + ## ping timeout, in s. 0 == no timeout (ping -t ) timeout = 0.0 - ### interface to send ping from (ping -I ) + ## interface to send ping from (ping -I ) interface = "" ` diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index 660f1b318..fe2a56576 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -23,22 +23,22 @@ type Postgresql struct { var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true} var sampleConfig = ` - ### specify address via a url matching: - ### postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] - ### or a simple string: - ### host=localhost user=pqotest password=... sslmode=... dbname=app_production - ### - ### All connection parameters are optional. - ### - ### Without the dbname parameter, the driver will default to a database - ### with the same name as the user. This dbname is just for instantiating a - ### connection with the server and doesn't restrict the databases we are trying - ### to grab metrics for. - ### + ## specify address via a url matching: + ## postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] + ## or a simple string: + ## host=localhost user=pqotest password=... sslmode=... dbname=app_production + ## + ## All connection parameters are optional. + ## + ## Without the dbname parameter, the driver will default to a database + ## with the same name as the user. This dbname is just for instantiating a + ## connection with the server and doesn't restrict the databases we are trying + ## to grab metrics for. + ## address = "host=localhost user=postgres sslmode=disable" - ### A list of databases to pull metrics about. If not specified, metrics for all - ### databases are gathered. + ## A list of databases to pull metrics about. If not specified, metrics for all + ## databases are gathered. # databases = ["app_production", "testing"] ` diff --git a/plugins/inputs/powerdns/powerdns.go b/plugins/inputs/powerdns/powerdns.go index f011f8716..0824ff672 100644 --- a/plugins/inputs/powerdns/powerdns.go +++ b/plugins/inputs/powerdns/powerdns.go @@ -18,8 +18,8 @@ type Powerdns struct { } var sampleConfig = ` - ### An array of sockets to gather stats about. - ### Specify a path to unix socket. + ## An array of sockets to gather stats about. + ## Specify a path to unix socket. unix_sockets = ["/var/run/pdns.controlsocket"] ` diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 6305416b7..d3f18d5ea 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -30,15 +30,15 @@ func NewProcstat() *Procstat { } var sampleConfig = ` - ### Must specify one of: pid_file, exe, or pattern - ### PID file to monitor process + ## Must specify one of: pid_file, exe, or pattern + ## PID file to monitor process pid_file = "/var/run/nginx.pid" - ### executable name (ie, pgrep ) + ## executable name (ie, pgrep ) # exe = "nginx" - ### pattern as argument for pgrep (ie, pgrep -f ) + ## pattern as argument for pgrep (ie, pgrep -f ) # pattern = "nginx" - ### Field name prefix + ## Field name prefix prefix = "" ` diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index aea5c5f95..188e6b914 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -17,7 +17,7 @@ type Prometheus struct { } var sampleConfig = ` - ### An array of urls to scrape metrics from. + ## An array of urls to scrape metrics from. urls = ["http://localhost:9100/metrics"] ` diff --git a/plugins/inputs/puppetagent/puppetagent.go b/plugins/inputs/puppetagent/puppetagent.go index 882b1e3b8..f66aa989f 100644 --- a/plugins/inputs/puppetagent/puppetagent.go +++ b/plugins/inputs/puppetagent/puppetagent.go @@ -18,7 +18,7 @@ type PuppetAgent struct { } var sampleConfig = ` - ### Location of puppet last run summary file + ## Location of puppet last run summary file location = "/var/lib/puppet/state/last_run_summary.yaml" ` diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index 8b287204f..e51d65e15 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -107,8 +107,8 @@ var sampleConfig = ` # username = "guest" # password = "guest" - ### A list of nodes to pull metrics about. If not specified, metrics for - ### all nodes are gathered. + ## A list of nodes to pull metrics about. If not specified, metrics for + ## all nodes are gathered. # nodes = ["rabbit@node1", "rabbit@node2"] ` diff --git a/plugins/inputs/raindrops/raindrops.go b/plugins/inputs/raindrops/raindrops.go index 572422f59..fed22b693 100644 --- a/plugins/inputs/raindrops/raindrops.go +++ b/plugins/inputs/raindrops/raindrops.go @@ -21,7 +21,7 @@ type Raindrops struct { } var sampleConfig = ` - ### An array of raindrops middleware URI to gather stats. + ## An array of raindrops middleware URI to gather stats. urls = ["http://localhost:8080/_raindrops"] ` diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index 88420beac..b8862f6bc 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -19,14 +19,14 @@ type Redis struct { } var sampleConfig = ` - ### specify servers via a url matching: - ### [protocol://][:password]@address[:port] - ### e.g. - ### tcp://localhost:6379 - ### tcp://:password@192.168.99.100 - ### - ### If no servers are specified, then localhost is used as the host. - ### If no port is specified, 6379 is used + ## specify servers via a url matching: + ## [protocol://][:password]@address[:port] + ## e.g. + ## tcp://localhost:6379 + ## tcp://:password@192.168.99.100 + ## + ## If no servers are specified, then localhost is used as the host. + ## If no port is specified, 6379 is used servers = ["tcp://localhost:6379"] ` diff --git a/plugins/inputs/rethinkdb/rethinkdb.go b/plugins/inputs/rethinkdb/rethinkdb.go index 94d31fe5f..32237a80f 100644 --- a/plugins/inputs/rethinkdb/rethinkdb.go +++ b/plugins/inputs/rethinkdb/rethinkdb.go @@ -16,11 +16,11 @@ type RethinkDB struct { } var sampleConfig = ` - ### An array of URI to gather stats about. Specify an ip or hostname - ### with optional port add password. ie, - ### rethinkdb://user:auth_key@10.10.3.30:28105, - ### rethinkdb://10.10.3.33:18832, - ### 10.0.0.1:10000, etc. + ## An array of URI to gather stats about. Specify an ip or hostname + ## with optional port add password. ie, + ## rethinkdb://user:auth_key@10.10.3.30:28105, + ## rethinkdb://10.10.3.33:18832, + ## 10.0.0.1:10000, etc. servers = ["127.0.0.1:28015"] ` diff --git a/plugins/inputs/sensors/sensors.go b/plugins/inputs/sensors/sensors.go index 82cc7df89..b2c2919cc 100644 --- a/plugins/inputs/sensors/sensors.go +++ b/plugins/inputs/sensors/sensors.go @@ -20,15 +20,15 @@ func (_ *Sensors) Description() string { } var sensorsSampleConfig = ` - ### By default, telegraf gathers stats from all sensors detected by the - ### lm-sensors module. - ### - ### Only collect stats from the selected sensors. Sensors are listed as - ### :. This information can be found by running the - ### sensors command, e.g. sensors -u - ### - ### A * as the feature name will return all features of the chip - ### + ## By default, telegraf gathers stats from all sensors detected by the + ## lm-sensors module. + ## + ## Only collect stats from the selected sensors. Sensors are listed as + ## :. This information can be found by running the + ## sensors command, e.g. sensors -u + ## + ## A * as the feature name will return all features of the chip + ## # sensors = ["coretemp-isa-0000:Core 0", "coretemp-isa-0001:*"] ` diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 1932fed41..371bc2ad9 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -72,11 +72,11 @@ var initNode = Node{ var NameToOid = make(map[string]string) var sampleConfig = ` - ### Use 'oids.txt' file to translate oids to names - ### To generate 'oids.txt' you need to run: - ### snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt - ### Or if you have an other MIB folder with custom MIBs - ### snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt + ## Use 'oids.txt' file to translate oids to names + ## To generate 'oids.txt' you need to run: + ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt + ## Or if you have an other MIB folder with custom MIBs + ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt snmptranslate_file = "/tmp/oids.txt" [[inputs.snmp.host]] address = "192.168.2.2:161" diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 83d88b3c2..3b29a32c1 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -31,12 +31,12 @@ var queries MapQuery var defaultServer = "Server=.;app name=telegraf;log=1;" var sampleConfig = ` - ### Specify instances to monitor with a list of connection strings. - ### All connection parameters are optional. - ### By default, the host is localhost, listening on default port, TCP 1433. - ### for Windows, the user is the currently running AD user (SSO). - ### See https://github.com/denisenkom/go-mssqldb for detailed connection - ### parameters. + ## Specify instances to monitor with a list of connection strings. + ## All connection parameters are optional. + ## By default, the host is localhost, listening on default port, TCP 1433. + ## for Windows, the user is the currently running AD user (SSO). + ## See https://github.com/denisenkom/go-mssqldb for detailed connection + ## parameters. # servers = [ # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", # ] diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md index 294c12b84..5bb18657c 100644 --- a/plugins/inputs/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -5,39 +5,39 @@ ```toml # Statsd Server [[inputs.statsd]] - ### Address and port to host UDP listener on + ## Address and port to host UDP listener on service_address = ":8125" - ### Delete gauges every interval (default=false) + ## Delete gauges every interval (default=false) delete_gauges = false - ### Delete counters every interval (default=false) + ## Delete counters every interval (default=false) delete_counters = false - ### Delete sets every interval (default=false) + ## Delete sets every interval (default=false) delete_sets = false - ### Delete timings & histograms every interval (default=true) + ## Delete timings & histograms every interval (default=true) delete_timings = true - ### Percentiles to calculate for timing & histogram stats + ## Percentiles to calculate for timing & histogram stats percentiles = [90] - ### convert measurement names, "." to "_" and "-" to "__" + ## convert measurement names, "." to "_" and "-" to "__" convert_names = true - ### Statsd data translation templates, more info can be read here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite + ## Statsd data translation templates, more info can be read here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite # templates = [ # "cpu.* measurement*" # ] - ### Number of UDP messages allowed to queue up, once filled, - ### the statsd server will start dropping packets + ## Number of UDP messages allowed to queue up, once filled, + ## the statsd server will start dropping packets allowed_pending_messages = 10000 - ### Number of timing/histogram values to track per-measurement in the - ### calculation of percentiles. Raising this limit increases the accuracy - ### of percentiles but also increases the memory usage and cpu time. + ## Number of timing/histogram values to track per-measurement in the + ## calculation of percentiles. Raising this limit increases the accuracy + ## of percentiles but also increases the memory usage and cpu time. percentile_limit = 1000 - ### UDP packet size for the server to listen for. This will depend on the size - ### of the packets that the client is sending, which is usually 1500 bytes. + ## UDP packet size for the server to listen for. This will depend on the size + ## of the packets that the client is sending, which is usually 1500 bytes. udp_packet_size = 1500 ``` diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index bc792149a..830e9d25c 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -123,39 +123,39 @@ func (_ *Statsd) Description() string { } const sampleConfig = ` - ### Address and port to host UDP listener on + ## Address and port to host UDP listener on service_address = ":8125" - ### Delete gauges every interval (default=false) + ## Delete gauges every interval (default=false) delete_gauges = false - ### Delete counters every interval (default=false) + ## Delete counters every interval (default=false) delete_counters = false - ### Delete sets every interval (default=false) + ## Delete sets every interval (default=false) delete_sets = false - ### Delete timings & histograms every interval (default=true) + ## Delete timings & histograms every interval (default=true) delete_timings = true - ### Percentiles to calculate for timing & histogram stats + ## Percentiles to calculate for timing & histogram stats percentiles = [90] - ### convert measurement names, "." to "_" and "-" to "__" + ## convert measurement names, "." to "_" and "-" to "__" convert_names = true - ### Statsd data translation templates, more info can be read here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite + ## Statsd data translation templates, more info can be read here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite # templates = [ # "cpu.* measurement*" # ] - ### Number of UDP messages allowed to queue up, once filled, - ### the statsd server will start dropping packets + ## Number of UDP messages allowed to queue up, once filled, + ## the statsd server will start dropping packets allowed_pending_messages = 10000 - ### Number of timing/histogram values to track per-measurement in the - ### calculation of percentiles. Raising this limit increases the accuracy - ### of percentiles but also increases the memory usage and cpu time. + ## Number of timing/histogram values to track per-measurement in the + ## calculation of percentiles. Raising this limit increases the accuracy + ## of percentiles but also increases the memory usage and cpu time. percentile_limit = 1000 - ### UDP packet size for the server to listen for. This will depend on the size - ### of the packets that the client is sending, which is usually 1500 bytes. + ## UDP packet size for the server to listen for. This will depend on the size + ## of the packets that the client is sending, which is usually 1500 bytes. udp_packet_size = 1500 ` diff --git a/plugins/inputs/system/cpu.go b/plugins/inputs/system/cpu.go index 47b3368a7..bef2a28f4 100644 --- a/plugins/inputs/system/cpu.go +++ b/plugins/inputs/system/cpu.go @@ -28,11 +28,11 @@ func (_ *CPUStats) Description() string { } var sampleConfig = ` - ### Whether to report per-cpu stats or not + ## Whether to report per-cpu stats or not percpu = true - ### Whether to report total system cpu stats or not + ## Whether to report total system cpu stats or not totalcpu = true - ### Comment this line if you want the raw CPU time metrics + ## Comment this line if you want the raw CPU time metrics drop = ["time_*"] ` diff --git a/plugins/inputs/system/disk.go b/plugins/inputs/system/disk.go index b8c611427..0488c839a 100644 --- a/plugins/inputs/system/disk.go +++ b/plugins/inputs/system/disk.go @@ -21,8 +21,8 @@ func (_ *DiskStats) Description() string { } var diskSampleConfig = ` - ### By default, telegraf gather stats for all mountpoints. - ### Setting mountpoints will restrict the stats to the specified mountpoints. + ## By default, telegraf gather stats for all mountpoints. + ## Setting mountpoints will restrict the stats to the specified mountpoints. # mount_points = ["/"] ` diff --git a/plugins/inputs/system/net.go b/plugins/inputs/system/net.go index ea8b66266..f6bc05818 100644 --- a/plugins/inputs/system/net.go +++ b/plugins/inputs/system/net.go @@ -21,10 +21,10 @@ func (_ *NetIOStats) Description() string { } var netSampleConfig = ` - ### By default, telegraf gathers stats from any up interface (excluding loopback) - ### Setting interfaces will tell it to gather these explicit interfaces, - ### regardless of status. - ### + ## By default, telegraf gathers stats from any up interface (excluding loopback) + ## Setting interfaces will tell it to gather these explicit interfaces, + ## regardless of status. + ## # interfaces = ["eth0"] ` diff --git a/plugins/inputs/trig/trig.go b/plugins/inputs/trig/trig.go index 51879dfc1..647794f0a 100644 --- a/plugins/inputs/trig/trig.go +++ b/plugins/inputs/trig/trig.go @@ -13,7 +13,7 @@ type Trig struct { } var TrigConfig = ` - ### Set the amplitude + ## Set the amplitude amplitude = 10.0 ` diff --git a/plugins/inputs/twemproxy/twemproxy.go b/plugins/inputs/twemproxy/twemproxy.go index d5ae12dee..cda56943f 100644 --- a/plugins/inputs/twemproxy/twemproxy.go +++ b/plugins/inputs/twemproxy/twemproxy.go @@ -17,9 +17,9 @@ type Twemproxy struct { } var sampleConfig = ` - ### Twemproxy stats address and port (no scheme) + ## Twemproxy stats address and port (no scheme) addr = "localhost:22222" - ### Monitor pool name + ## Monitor pool name pools = ["redis_pool", "mc_pool"] ` diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index e243588a6..8279f1c7a 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -14,12 +14,12 @@ import ( ) var sampleConfig string = ` - ### By default this plugin returns basic CPU and Disk statistics. - ### See the README file for more examples. - ### Uncomment examples below or write your own as you see fit. If the system - ### being polled for data does not have the Object at startup of the Telegraf - ### agent, it will not be gathered. - ### Settings: + ## By default this plugin returns basic CPU and Disk statistics. + ## See the README file for more examples. + ## Uncomment examples below or write your own as you see fit. If the system + ## being polled for data does not have the Object at startup of the Telegraf + ## agent, it will not be gathered. + ## Settings: # PrintValid = false # Print All matching performance counters [[inputs.win_perf_counters.object]] diff --git a/plugins/inputs/zfs/zfs.go b/plugins/inputs/zfs/zfs.go index 57d1fece4..bcbe03e95 100644 --- a/plugins/inputs/zfs/zfs.go +++ b/plugins/inputs/zfs/zfs.go @@ -23,15 +23,15 @@ type poolInfo struct { } var sampleConfig = ` - ### ZFS kstat path - ### If not specified, then default is: + ## ZFS kstat path + ## If not specified, then default is: kstatPath = "/proc/spl/kstat/zfs" - ### By default, telegraf gather all zfs stats - ### If not specified, then default is: + ## By default, telegraf gather all zfs stats + ## If not specified, then default is: kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] - ### By default, don't gather zpool stats + ## By default, don't gather zpool stats poolMetrics = false ` diff --git a/plugins/inputs/zookeeper/zookeeper.go b/plugins/inputs/zookeeper/zookeeper.go index b18757cd6..0f2b2e06f 100644 --- a/plugins/inputs/zookeeper/zookeeper.go +++ b/plugins/inputs/zookeeper/zookeeper.go @@ -20,11 +20,11 @@ type Zookeeper struct { } var sampleConfig = ` - ### An array of address to gather stats about. Specify an ip or hostname - ### with port. ie localhost:2181, 10.0.0.1:2181, etc. + ## An array of address to gather stats about. Specify an ip or hostname + ## with port. ie localhost:2181, 10.0.0.1:2181, etc. - ### If no servers are specified, then localhost is used as the host. - ### If no port is specified, 2181 is used + ## If no servers are specified, then localhost is used as the host. + ## If no port is specified, 2181 is used servers = [":2181"] ` diff --git a/plugins/outputs/amon/amon.go b/plugins/outputs/amon/amon.go index 7d5cd5338..f88c2ddc5 100644 --- a/plugins/outputs/amon/amon.go +++ b/plugins/outputs/amon/amon.go @@ -22,13 +22,13 @@ type Amon struct { } var sampleConfig = ` - ### Amon Server Key + ## Amon Server Key server_key = "my-server-key" # required. - ### Amon Instance URL + ## Amon Instance URL amon_instance = "https://youramoninstance" # required - ### Connection timeout. + ## Connection timeout. # timeout = "5s" ` diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index ea80ad6a7..948007117 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -52,32 +52,32 @@ const ( ) var sampleConfig = ` - ### AMQP url + ## AMQP url url = "amqp://localhost:5672/influxdb" - ### AMQP exchange + ## AMQP exchange exchange = "telegraf" - ### Telegraf tag to use as a routing key - ### ie, if this tag exists, it's value will be used as the routing key + ## Telegraf tag to use as a routing key + ## ie, if this tag exists, it's value will be used as the routing key routing_tag = "host" - ### InfluxDB retention policy + ## InfluxDB retention policy # retention_policy = "default" - ### InfluxDB database + ## InfluxDB database # database = "telegraf" - ### InfluxDB precision + ## InfluxDB precision # precision = "s" - ### Optional SSL Config + ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" # ssl_cert = "/etc/telegraf/cert.pem" # ssl_key = "/etc/telegraf/key.pem" - ### Use SSL but skip chain & host verification + ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ### Data format to output. This can be "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + ## Data format to output. This can be "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ` diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index a2d0d7b10..42d98b5be 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -25,10 +25,10 @@ type CloudWatch struct { } var sampleConfig = ` - ### Amazon REGION + ## Amazon REGION region = 'us-east-1' - ### Namespace for the CloudWatch MetricDatums + ## Namespace for the CloudWatch MetricDatums namespace = 'InfluxData/Telegraf' ` diff --git a/plugins/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go index 208757284..5d6fab165 100644 --- a/plugins/outputs/datadog/datadog.go +++ b/plugins/outputs/datadog/datadog.go @@ -24,10 +24,10 @@ type Datadog struct { } var sampleConfig = ` - ### Datadog API key + ## Datadog API key apikey = "my-secret-key" # required. - ### Connection timeout. + ## Connection timeout. # timeout = "5s" ` diff --git a/plugins/outputs/file/file.go b/plugins/outputs/file/file.go index 3d431774c..e593e3cea 100644 --- a/plugins/outputs/file/file.go +++ b/plugins/outputs/file/file.go @@ -20,13 +20,13 @@ type File struct { } var sampleConfig = ` - ### Files to write to, "stdout" is a specially handled file. + ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/metrics.out"] - ### Data format to output. This can be "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + ## Data format to output. This can be "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ` diff --git a/plugins/outputs/graphite/graphite.go b/plugins/outputs/graphite/graphite.go index 29ac774f4..717ce06c8 100644 --- a/plugins/outputs/graphite/graphite.go +++ b/plugins/outputs/graphite/graphite.go @@ -23,11 +23,11 @@ type Graphite struct { } var sampleConfig = ` - ### TCP endpoint for your graphite instance. + ## TCP endpoint for your graphite instance. servers = ["localhost:2003"] - ### Prefix metrics name + ## Prefix metrics name prefix = "" - ### timeout in seconds for the write connection to graphite + ## timeout in seconds for the write connection to graphite timeout = 2 ` diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index cb235f903..683227717 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -41,32 +41,32 @@ type InfluxDB struct { } var sampleConfig = ` - ### The full HTTP or UDP endpoint URL for your InfluxDB instance. - ### Multiple urls can be specified as part of the same cluster, - ### this means that only ONE of the urls will be written to each interval. + ## The full HTTP or UDP endpoint URL for your InfluxDB instance. + ## Multiple urls can be specified as part of the same cluster, + ## this means that only ONE of the urls will be written to each interval. # urls = ["udp://localhost:8089"] # UDP endpoint example urls = ["http://localhost:8086"] # required - ### The target database for metrics (telegraf will create it if not exists) + ## The target database for metrics (telegraf will create it if not exists) database = "telegraf" # required - ### Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". - ### note: using "s" precision greatly improves InfluxDB compression + ## Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". + ## note: using "s" precision greatly improves InfluxDB compression precision = "s" - ### Connection timeout (for the connection with InfluxDB), formatted as a string. - ### If not provided, will default to 0 (no timeout) + ## Connection timeout (for the connection with InfluxDB), formatted as a string. + ## If not provided, will default to 0 (no timeout) # timeout = "5s" # username = "telegraf" # password = "metricsmetricsmetricsmetrics" - ### Set the user agent for HTTP POSTs (can be useful for log differentiation) + ## Set the user agent for HTTP POSTs (can be useful for log differentiation) # user_agent = "telegraf" - ### Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) + ## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) # udp_payload = 512 - ### Optional SSL Config + ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" # ssl_cert = "/etc/telegraf/cert.pem" # ssl_key = "/etc/telegraf/key.pem" - ### Use SSL but skip chain & host verification + ## Use SSL but skip chain & host verification # insecure_skip_verify = false ` diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 667212f62..8dea2b2a1 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -45,25 +45,25 @@ type Kafka struct { } var sampleConfig = ` - ### URLs of kafka brokers + ## URLs of kafka brokers brokers = ["localhost:9092"] - ### Kafka topic for producer messages + ## Kafka topic for producer messages topic = "telegraf" - ### Telegraf tag to use as a routing key - ### ie, if this tag exists, it's value will be used as the routing key + ## Telegraf tag to use as a routing key + ## ie, if this tag exists, it's value will be used as the routing key routing_tag = "host" - ### Optional SSL Config + ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" # ssl_cert = "/etc/telegraf/cert.pem" # ssl_key = "/etc/telegraf/key.pem" - ### Use SSL but skip chain & host verification + ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ### Data format to output. This can be "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + ## Data format to output. This can be "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ` diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index f293be5fd..01906a7f5 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -28,16 +28,16 @@ type KinesisOutput struct { } var sampleConfig = ` - ### Amazon REGION of kinesis endpoint. + ## Amazon REGION of kinesis endpoint. region = "ap-southeast-2" - ### Kinesis StreamName must exist prior to starting telegraf. + ## Kinesis StreamName must exist prior to starting telegraf. streamname = "StreamName" - ### PartitionKey as used for sharding data. + ## PartitionKey as used for sharding data. partitionkey = "PartitionKey" - ### format of the Data payload in the kinesis PutRecord, supported - ### String and Custom. + ## format of the Data payload in the kinesis PutRecord, supported + ## String and Custom. format = "string" - ### debug will show upstream aws messages. + ## debug will show upstream aws messages. debug = false ` diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go index 826926d16..3897e0b4f 100644 --- a/plugins/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -23,20 +23,20 @@ type Librato struct { } var sampleConfig = ` - ### Librator API Docs - ### http://dev.librato.com/v1/metrics-authentication + ## Librator API Docs + ## http://dev.librato.com/v1/metrics-authentication - ### Librato API user + ## Librato API user api_user = "telegraf@influxdb.com" # required. - ### Librato API token + ## Librato API token api_token = "my-secret-token" # required. - ### Tag Field to populate source attribute (optional) - ### This is typically the _hostname_ from which the metric was obtained. + ## Tag Field to populate source attribute (optional) + ## This is typically the _hostname_ from which the metric was obtained. source_tag = "hostname" - ### Connection timeout. + ## Connection timeout. # timeout = "5s" ` diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index ea638f3da..6f8abe954 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -16,26 +16,26 @@ import ( var sampleConfig = ` servers = ["localhost:1883"] # required. - ### MQTT outputs send metrics to this topic format - ### "///" - ### ex: prefix/web01.example.com/mem + ## MQTT outputs send metrics to this topic format + ## "///" + ## ex: prefix/web01.example.com/mem topic_prefix = "telegraf" - ### username and password to connect MQTT server. + ## username and password to connect MQTT server. # username = "telegraf" # password = "metricsmetricsmetricsmetrics" - ### Optional SSL Config + ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" # ssl_cert = "/etc/telegraf/cert.pem" # ssl_key = "/etc/telegraf/key.pem" - ### Use SSL but skip chain & host verification + ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ### Data format to output. This can be "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + ## Data format to output. This can be "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ` diff --git a/plugins/outputs/nsq/nsq.go b/plugins/outputs/nsq/nsq.go index ef23fab97..75b998484 100644 --- a/plugins/outputs/nsq/nsq.go +++ b/plugins/outputs/nsq/nsq.go @@ -19,15 +19,15 @@ type NSQ struct { } var sampleConfig = ` - ### Location of nsqd instance listening on TCP + ## Location of nsqd instance listening on TCP server = "localhost:4150" - ### NSQ topic for producer messages + ## NSQ topic for producer messages topic = "telegraf" - ### Data format to output. This can be "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + ## Data format to output. This can be "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ` diff --git a/plugins/outputs/opentsdb/opentsdb.go b/plugins/outputs/opentsdb/opentsdb.go index 2d58389e7..83a3429e3 100644 --- a/plugins/outputs/opentsdb/opentsdb.go +++ b/plugins/outputs/opentsdb/opentsdb.go @@ -22,17 +22,17 @@ type OpenTSDB struct { } var sampleConfig = ` - ### prefix for metrics keys + ## prefix for metrics keys prefix = "my.specific.prefix." ## Telnet Mode ## - ### DNS name of the OpenTSDB server in telnet mode + ## DNS name of the OpenTSDB server in telnet mode host = "opentsdb.example.com" - ### Port of the OpenTSDB server in telnet mode + ## Port of the OpenTSDB server in telnet mode port = 4242 - ### Debug true - Prints OpenTSDB communication + ## Debug true - Prints OpenTSDB communication debug = false ` diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index 48bdddde6..df546c192 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -16,7 +16,7 @@ type PrometheusClient struct { } var sampleConfig = ` - ### Address to listen on + ## Address to listen on # listen = ":9126" ` diff --git a/plugins/outputs/riemann/riemann.go b/plugins/outputs/riemann/riemann.go index d20441391..c805bbd00 100644 --- a/plugins/outputs/riemann/riemann.go +++ b/plugins/outputs/riemann/riemann.go @@ -21,11 +21,11 @@ type Riemann struct { } var sampleConfig = ` - ### URL of server + ## URL of server url = "localhost:5555" - ### transport protocol to use either tcp or udp + ## transport protocol to use either tcp or udp transport = "tcp" - ### separator to use between input name and field name in Riemann service name + ## separator to use between input name and field name in Riemann service name separator = " " ` From 187d1b853d97dce6d30204ff0a3e108e60e0050c Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 17 Feb 2016 21:57:33 -0700 Subject: [PATCH 047/287] Update Makefile to 'go install' rather than 'go build' --- Makefile | 24 +++------- internal/config/config.go | 4 +- scripts/build.py | 97 ++++++++++----------------------------- scripts/circle-test.sh | 8 ++-- 4 files changed, 34 insertions(+), 99 deletions(-) diff --git a/Makefile b/Makefile index 2efad7705..fc8d71de0 100644 --- a/Makefile +++ b/Makefile @@ -14,9 +14,7 @@ windows: prepare-windows build-windows # Only run the build (no dependency grabbing) build: - go build -o telegraf -ldflags \ - "-X main.Version=$(VERSION)" \ - ./cmd/telegraf/telegraf.go + go install -ldflags "-X main.Version=$(VERSION)" ./... build-windows: go build -o telegraf.exe -ldflags \ @@ -24,27 +22,17 @@ build-windows: ./cmd/telegraf/telegraf.go build-for-docker: - CGO_ENABLED=0 GOOS=linux go build -o telegraf -ldflags \ + CGO_ENABLED=0 GOOS=linux go -o telegraf -ldflags \ "-X main.Version=$(VERSION)" \ ./cmd/telegraf/telegraf.go # Build with race detector dev: prepare - go build -race -o telegraf -ldflags \ - "-X main.Version=$(VERSION)" \ - ./cmd/telegraf/telegraf.go + go build -race -ldflags "-X main.Version=$(VERSION)" ./... -# Build linux 64-bit, 32-bit and arm architectures -build-linux-bins: prepare - GOARCH=amd64 GOOS=linux go build -o telegraf_linux_amd64 \ - -ldflags "-X main.Version=$(VERSION)" \ - ./cmd/telegraf/telegraf.go - GOARCH=386 GOOS=linux go build -o telegraf_linux_386 \ - -ldflags "-X main.Version=$(VERSION)" \ - ./cmd/telegraf/telegraf.go - GOARCH=arm GOOS=linux go build -o telegraf_linux_arm \ - -ldflags "-X main.Version=$(VERSION)" \ - ./cmd/telegraf/telegraf.go +# run package script +package: + ./scripts/build.py --package --version="$(VERSION)" --platform=linux --arch=all --upload # Get dependencies and use gdm to checkout changesets prepare: diff --git a/internal/config/config.go b/internal/config/config.go index f47cf7ea7..fc374d628 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -133,9 +133,7 @@ func (c *Config) ListTags() string { return strings.Join(tags, " ") } -var header = `############################################################################### -# Telegraf Configuration # -############################################################################### +var header = `# Telegraf Configuration # Telegraf is entirely plugin driven. All metrics are gathered from the # declared inputs, and sent to the declared outputs. diff --git a/scripts/build.py b/scripts/build.py index 53c3e84e0..b25b44982 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -42,7 +42,7 @@ DESCRIPTION = "Plugin-driven server agent for reporting metrics into InfluxDB." # SCRIPT START prereqs = [ 'git', 'go' ] -optional_prereqs = [ 'gvm', 'fpm', 'rpmbuild' ] +optional_prereqs = [ 'fpm', 'rpmbuild' ] fpm_common_args = "-f -s dir --log error \ --vendor {} \ @@ -78,6 +78,14 @@ supported_packages = { "linux": [ "deb", "rpm", "tar", "zip" ], "windows": [ "tar", "zip" ], } +supported_tags = { + # "linux": { + # "amd64": ["sensors"] + # } +} +prereq_cmds = { + # "linux": "sudo apt-get install lm-sensors libsensors4-dev" +} def run(command, allow_failure=False, shell=False): out = None @@ -233,52 +241,6 @@ def upload_packages(packages, bucket_name=None, nightly=False): print("\t - Not uploading {}, already exists.".format(p)) print("") -def run_tests(race, parallel, timeout, no_vet): - get_command = "go get -d -t ./..." - print("Retrieving Go dependencies...") - sys.stdout.flush() - run(get_command) - print("Running tests:") - print("\tRace: ", race) - if parallel is not None: - print("\tParallel:", parallel) - if timeout is not None: - print("\tTimeout:", timeout) - sys.stdout.flush() - p = subprocess.Popen(["go", "fmt", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - out, err = p.communicate() - if len(out) > 0 or len(err) > 0: - print("Code not formatted. Please use 'go fmt ./...' to fix formatting errors.") - print(out) - print(err) - return False - if not no_vet: - p = subprocess.Popen(["go", "tool", "vet", "-composites=false", "./"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - out, err = p.communicate() - if len(out) > 0 or len(err) > 0: - print("Go vet failed. Please run 'go vet ./...' and fix any errors.") - print(out) - print(err) - return False - else: - print("Skipping go vet ...") - sys.stdout.flush() - test_command = "go test -v" - if race: - test_command += " -race" - if parallel is not None: - test_command += " -parallel {}".format(parallel) - if timeout is not None: - test_command += " -timeout {}".format(timeout) - test_command += " ./..." - code = os.system(test_command) - if code != 0: - print("Tests Failed") - return False - else: - print("Tests Passed") - return True - def build(version=None, branch=None, commit=None, @@ -335,6 +297,11 @@ def build(version=None, build_command += "go build -o {} ".format(os.path.join(outdir, b)) if race: build_command += "-race " + if platform in supported_tags: + if arch in supported_tags[platform]: + build_tags = supported_tags[platform][arch] + for build_tag in build_tags: + build_command += "-tags "+build_tag+" " go_version = get_go_version() if "1.4" in go_version: build_command += "-ldflags=\"-X main.buildTime '{}' ".format(datetime.datetime.utcnow().isoformat()) @@ -393,14 +360,10 @@ def package_scripts(build_root): shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf")) os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0o644) -def go_get(update=False): - get_command = None - if update: - get_command = "go get -u -f -d ./..." - else: - get_command = "go get -d ./..." +def go_get(): print("Retrieving Go dependencies...") - run(get_command) + run("go get github.com/sparrc/gdm") + run("gdm restore") def generate_md5_from_file(path): m = hashlib.md5() @@ -450,7 +413,7 @@ def build_packages(build_output, version, pkg_arch, nightly=False, rc=None, iter package_version = version package_iteration = iteration current_location = build_output[p][a] - + if package_type in ['zip', 'tar']: if nightly: name = '{}-nightly_{}_{}'.format(name, p, a) @@ -519,12 +482,9 @@ def print_usage(): print("\t --race \n\t\t- Whether the produced build should have race detection enabled.") print("\t --package \n\t\t- Whether the produced builds should be packaged for the target platform(s).") print("\t --nightly \n\t\t- Whether the produced build is a nightly (affects version information).") - print("\t --update \n\t\t- Whether dependencies should be updated prior to building.") - print("\t --test \n\t\t- Run Go tests. Will not produce a build.") print("\t --parallel \n\t\t- Run Go tests in parallel up to the count specified.") print("\t --timeout \n\t\t- Timeout for Go tests. Defaults to 480s.") print("\t --clean \n\t\t- Clean the build output directory prior to creating build.") - print("\t --no-get \n\t\t- Do not run `go get` before building.") print("\t --bucket=\n\t\t- Full path of the bucket to upload packages to (must also specify --upload).") print("\t --debug \n\t\t- Displays debug output.") print("") @@ -592,15 +552,9 @@ def main(): elif '--nightly' in arg: # Signifies that this is a nightly build. nightly = True - elif '--update' in arg: - # Signifies that dependencies should be updated. - update = True elif '--upload' in arg: # Signifies that the resulting packages should be uploaded to S3 upload = True - elif '--test' in arg: - # Run tests and exit - test = True elif '--parallel' in arg: # Set parallel for tests. parallel = int(arg.split("=")[1]) @@ -620,8 +574,6 @@ def main(): elif '--bucket' in arg: # The bucket to upload the packages to, relies on boto upload_bucket = arg.split("=")[1] - elif '--no-get' in arg: - run_get = False elif '--debug' in arg: print "[DEBUG] Using debug output" debug = True @@ -665,15 +617,10 @@ def main(): target_arch = 'i386' elif target_arch == 'x86_64': target_arch = 'amd64' - - build_output = {} - if test: - if not run_tests(race, parallel, timeout, no_vet): - return 1 - return 0 - if run_get: - go_get(update=update) + build_output = {} + + go_get() platforms = [] single_build = True @@ -684,6 +631,8 @@ def main(): platforms = [target_platform] for platform in platforms: + if platform in prereq_cmds: + run(prereq_cmds[platform]) build_output.update( { platform : {} } ) archs = [] if target_arch == "all": diff --git a/scripts/circle-test.sh b/scripts/circle-test.sh index d4f150c83..72f297f9f 100755 --- a/scripts/circle-test.sh +++ b/scripts/circle-test.sh @@ -61,13 +61,13 @@ exit_if_fail go test -race ./... # Simple Integration Tests # check that version was properly set -exit_if_fail "./telegraf -version | grep $VERSION" +exit_if_fail "telegraf -version | grep $VERSION" # check that one test cpu & mem output work tmpdir=$(mktemp -d) -./telegraf -sample-config > $tmpdir/config.toml -exit_if_fail ./telegraf -config $tmpdir/config.toml \ +telegraf -sample-config > $tmpdir/config.toml +exit_if_fail telegraf -config $tmpdir/config.toml \ -test -input-filter cpu:mem -mv ./telegraf $CIRCLE_ARTIFACTS +mv $GOPATH/bin/telegraf $CIRCLE_ARTIFACTS exit $rc From 3179829fa518be7c73d8dfda0aa20503f0fd2d73 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 18 Feb 2016 17:18:43 -0700 Subject: [PATCH 048/287] Update changelog for 0.10.3 --- CHANGELOG.md | 2 +- README.md | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c264ce94b..237183a85 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## v0.10.3 [unreleased] +## v0.10.3 [2016-02-18] ### Release Notes - Users of the `exec` and `kafka_consumer` (and the new `nats_consumer` diff --git a/README.md b/README.md index e15cb822b..d93494ecb 100644 --- a/README.md +++ b/README.md @@ -27,8 +27,8 @@ the [release blog post](https://influxdata.com/blog/announcing-telegraf-0-10-0/) ### Linux deb and rpm Packages: Latest: -* http://get.influxdb.org/telegraf/telegraf_0.10.2-1_amd64.deb -* http://get.influxdb.org/telegraf/telegraf-0.10.2-1.x86_64.rpm +* http://get.influxdb.org/telegraf/telegraf_0.10.3-1_amd64.deb +* http://get.influxdb.org/telegraf/telegraf-0.10.3-1.x86_64.rpm 0.2.x: * http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb @@ -52,9 +52,9 @@ for instructions, replacing the `influxdb` package name with `telegraf`. ### Linux tarballs: Latest: -* http://get.influxdb.org/telegraf/telegraf-0.10.2-1_linux_amd64.tar.gz -* http://get.influxdb.org/telegraf/telegraf-0.10.2-1_linux_i386.tar.gz -* http://get.influxdb.org/telegraf/telegraf-0.10.2-1_linux_arm.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.10.3-1_linux_amd64.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.10.3-1_linux_i386.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.10.3-1_linux_arm.tar.gz 0.2.x: * http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz @@ -66,13 +66,13 @@ Latest: To install the full directory structure with config file, run: ``` -sudo tar -C / -zxvf ./telegraf-0.10.2-1_linux_amd64.tar.gz +sudo tar -C / -zxvf ./telegraf-0.10.3-1_linux_amd64.tar.gz ``` To extract only the binary, run: ``` -tar -zxvf telegraf-0.10.2-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf +tar -zxvf telegraf-0.10.3-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf ``` ### Ansible Role: From f9b5767daea8468f456ae5ee75722b3a4cb0cf19 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 19 Feb 2016 11:56:33 -0700 Subject: [PATCH 049/287] Provide default args: percpu=true and totalcpu=true for cpu plugin Also if outputs.file is empty, write to stdout closes #720 --- plugins/inputs/system/cpu.go | 6 +++++- plugins/outputs/file/file.go | 5 +++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/system/cpu.go b/plugins/inputs/system/cpu.go index bef2a28f4..333339458 100644 --- a/plugins/inputs/system/cpu.go +++ b/plugins/inputs/system/cpu.go @@ -113,6 +113,10 @@ func totalCpuTime(t cpu.CPUTimesStat) float64 { func init() { inputs.Add("cpu", func() telegraf.Input { - return &CPUStats{ps: &systemPS{}} + return &CPUStats{ + PerCPU: true, + TotalCPU: true, + ps: &systemPS{}, + } }) } diff --git a/plugins/outputs/file/file.go b/plugins/outputs/file/file.go index e593e3cea..743c0f03f 100644 --- a/plugins/outputs/file/file.go +++ b/plugins/outputs/file/file.go @@ -36,6 +36,11 @@ func (f *File) SetSerializer(serializer serializers.Serializer) { func (f *File) Connect() error { writers := []io.Writer{} + + if len(f.Files) == 0 { + f.Files = []string{"stdout"} + } + for _, file := range f.Files { if file == "stdout" { writers = append(writers, os.Stdout) From 584a52ac21d54c7d9a524c38da1a32613621c915 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 19 Feb 2016 14:46:03 -0700 Subject: [PATCH 050/287] InfluxDB output should not default to 'no timeout' for http writes default to 5s instead, since even if it times out we will cache the points and move on closes #685 --- Makefile | 7 +++++-- etc/telegraf.conf | 6 +++--- plugins/outputs/influxdb/influxdb.go | 10 ++++++---- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/Makefile b/Makefile index fc8d71de0..84b3a1fe0 100644 --- a/Makefile +++ b/Makefile @@ -92,14 +92,17 @@ docker-kill: -docker rm nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann snmp # Run full unit tests using docker containers (includes setup and teardown) -test: docker-kill docker-run +test: vet docker-kill docker-run # Sleeping for kafka leadership election, TSDB setup, etc. sleep 60 # SUCCESS, running tests go test -race ./... # Run "short" unit tests -test-short: +test-short: vet go test -short ./... +vet: + go vet ./... + .PHONY: test diff --git a/etc/telegraf.conf b/etc/telegraf.conf index db87251d5..eaf66db96 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -67,9 +67,9 @@ # note: using second precision greatly helps InfluxDB compression precision = "s" - # Connection timeout (for the connection with InfluxDB), formatted as a string. - # If not provided, will default to 0 (no timeout) - # timeout = "5s" + ## Write timeout (for the InfluxDB client), formatted as a string. + ## If not provided, will default to 5s. 0s means no timeout (not recommended). + timeout = "5s" # username = "telegraf" # password = "metricsmetricsmetricsmetrics" # Set the user agent for HTTP POSTs (can be useful for log differentiation) diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 683227717..60d235511 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -52,9 +52,9 @@ var sampleConfig = ` ## note: using "s" precision greatly improves InfluxDB compression precision = "s" - ## Connection timeout (for the connection with InfluxDB), formatted as a string. - ## If not provided, will default to 0 (no timeout) - # timeout = "5s" + ## Write timeout (for the InfluxDB client), formatted as a string. + ## If not provided, will default to 5s. 0s means no timeout (not recommended). + timeout = "5s" # username = "telegraf" # password = "metricsmetricsmetricsmetrics" ## Set the user agent for HTTP POSTs (can be useful for log differentiation) @@ -185,6 +185,8 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { func init() { outputs.Add("influxdb", func() telegraf.Output { - return &InfluxDB{} + return &InfluxDB{ + Timeout: internal.Duration{Duration: time.Second * 5}, + } }) } From e4e174981d29c5e340a26ab6c3258ac2d6d83e11 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 19 Feb 2016 16:15:14 -0700 Subject: [PATCH 051/287] Skip snmp tests that require docker in short mode --- plugins/inputs/snmp/snmp_test.go | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 8b3f91380..22414fb79 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -69,6 +69,9 @@ func TestSNMPErrorBulk(t *testing.T) { } func TestSNMPGet1(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } get1 := Data{ Name: "oid1", Unit: "octets", @@ -104,6 +107,9 @@ func TestSNMPGet1(t *testing.T) { } func TestSNMPGet2(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } get1 := Data{ Name: "oid1", Oid: "ifNumber", @@ -139,6 +145,9 @@ func TestSNMPGet2(t *testing.T) { } func TestSNMPGet3(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } get1 := Data{ Name: "oid1", Unit: "octets", @@ -177,6 +186,9 @@ func TestSNMPGet3(t *testing.T) { } func TestSNMPEasyGet4(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } get1 := Data{ Name: "oid1", Unit: "octets", @@ -227,6 +239,9 @@ func TestSNMPEasyGet4(t *testing.T) { } func TestSNMPEasyGet5(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } get1 := Data{ Name: "oid1", Unit: "octets", @@ -277,6 +292,9 @@ func TestSNMPEasyGet5(t *testing.T) { } func TestSNMPEasyGet6(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } h := Host{ Address: testutil.GetLocalHost() + ":31161", Community: "telegraf", @@ -307,6 +325,9 @@ func TestSNMPEasyGet6(t *testing.T) { } func TestSNMPBulk1(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } bulk1 := Data{ Name: "oid1", Unit: "octets", From 53c130b704550f6a2b96e181ee3fe60de0fb635c Mon Sep 17 00:00:00 2001 From: Jason Coene Date: Fri, 19 Feb 2016 14:30:40 -0600 Subject: [PATCH 052/287] Add riak plugin --- README.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/riak/README.md | 76 +++++++++ plugins/inputs/riak/riak.go | 196 ++++++++++++++++++++++ plugins/inputs/riak/riak_test.go | 276 +++++++++++++++++++++++++++++++ 5 files changed, 550 insertions(+) create mode 100644 plugins/inputs/riak/README.md create mode 100644 plugins/inputs/riak/riak.go create mode 100644 plugins/inputs/riak/riak_test.go diff --git a/README.md b/README.md index d93494ecb..9b1e7b171 100644 --- a/README.md +++ b/README.md @@ -187,6 +187,7 @@ Currently implemented sources: * raindrops * redis * rethinkdb +* riak * sql server (microsoft) * twemproxy * zfs diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 74331e54b..79deb7c99 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -40,6 +40,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/raindrops" _ "github.com/influxdata/telegraf/plugins/inputs/redis" _ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb" + _ "github.com/influxdata/telegraf/plugins/inputs/riak" _ "github.com/influxdata/telegraf/plugins/inputs/sensors" _ "github.com/influxdata/telegraf/plugins/inputs/snmp" _ "github.com/influxdata/telegraf/plugins/inputs/sqlserver" diff --git a/plugins/inputs/riak/README.md b/plugins/inputs/riak/README.md new file mode 100644 index 000000000..3aa39ae09 --- /dev/null +++ b/plugins/inputs/riak/README.md @@ -0,0 +1,76 @@ +# Riak Plugin + +The Riak plugin gathers metrics from one or more riak instances. + +### Configuration: + +```toml +# Description +[[inputs.riak]] + # Specify a list of one or more riak http servers + servers = ["http://localhost:8098"] +``` + +### Measurements & Fields: + +Riak provides one measurement named "riak", with the following fields: + +- cpu_avg1 +- cpu_avg15 +- cpu_avg5 +- memory_code +- memory_ets +- memory_processes +- memory_system +- memory_total +- node_get_fsm_objsize_100 +- node_get_fsm_objsize_95 +- node_get_fsm_objsize_99 +- node_get_fsm_objsize_mean +- node_get_fsm_objsize_median +- node_get_fsm_siblings_100 +- node_get_fsm_siblings_95 +- node_get_fsm_siblings_99 +- node_get_fsm_siblings_mean +- node_get_fsm_siblings_median +- node_get_fsm_time_100 +- node_get_fsm_time_95 +- node_get_fsm_time_99 +- node_get_fsm_time_mean +- node_get_fsm_time_median +- node_gets +- node_gets_total +- node_put_fsm_time_100 +- node_put_fsm_time_95 +- node_put_fsm_time_99 +- node_put_fsm_time_mean +- node_put_fsm_time_median +- node_puts +- node_puts_total +- pbc_active +- pbc_connects +- pbc_connects_total +- vnode_gets +- vnode_gets_total +- vnode_index_reads +- vnode_index_reads_total +- vnode_index_writes +- vnode_index_writes_total +- vnode_puts +- vnode_puts_total + +Measurements of time (such as node_get_fsm_time_mean) are measured in nanoseconds. + +### Tags: + +All measurements have the following tags: + +- server (the host:port of the given server address, ex. `127.0.0.1:8087`) +- nodename (the internal node name received, ex. `riak@127.0.0.1` ) + +### Example Output: + +``` +$ ./telegraf -config telegraf.conf -input-filter riak -test +> riak,nodename=riak@127.0.0.1,server=localhost:8098 cpu_avg1=31i,cpu_avg15=69i,cpu_avg5=51i,memory_code=11563738i,memory_ets=5925872i,memory_processes=30236069i,memory_system=93074971i,memory_total=123311040i,node_get_fsm_objsize_100=0i,node_get_fsm_objsize_95=0i,node_get_fsm_objsize_99=0i,node_get_fsm_objsize_mean=0i,node_get_fsm_objsize_median=0i,node_get_fsm_siblings_100=0i,node_get_fsm_siblings_95=0i,node_get_fsm_siblings_99=0i,node_get_fsm_siblings_mean=0i,node_get_fsm_siblings_median=0i,node_get_fsm_time_100=0i,node_get_fsm_time_95=0i,node_get_fsm_time_99=0i,node_get_fsm_time_mean=0i,node_get_fsm_time_median=0i,node_gets=0i,node_gets_total=19i,node_put_fsm_time_100=0i,node_put_fsm_time_95=0i,node_put_fsm_time_99=0i,node_put_fsm_time_mean=0i,node_put_fsm_time_median=0i,node_puts=0i,node_puts_total=0i,pbc_active=0i,pbc_connects=0i,pbc_connects_total=20i,vnode_gets=0i,vnode_gets_total=57i,vnode_index_reads=0i,vnode_index_reads_total=0i,vnode_index_writes=0i,vnode_index_writes_total=0i,vnode_puts=0i,vnode_puts_total=0i 1455913392622482332 +```gt \ No newline at end of file diff --git a/plugins/inputs/riak/riak.go b/plugins/inputs/riak/riak.go new file mode 100644 index 000000000..6750c75a0 --- /dev/null +++ b/plugins/inputs/riak/riak.go @@ -0,0 +1,196 @@ +package riak + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Type Riak gathers statistics from one or more Riak instances +type Riak struct { + // Servers is a slice of servers as http addresses (ex. http://127.0.0.1:8098) + Servers []string + + client *http.Client +} + +// NewRiak return a new instance of Riak with a default http client +func NewRiak() *Riak { + return &Riak{client: http.DefaultClient} +} + +// Type riakStats represents the data that is received from Riak +type riakStats struct { + CpuAvg1 int64 `json:"cpu_avg1"` + CpuAvg15 int64 `json:"cpu_avg15"` + CpuAvg5 int64 `json:"cpu_avg5"` + MemoryCode int64 `json:"memory_code"` + MemoryEts int64 `json:"memory_ets"` + MemoryProcesses int64 `json:"memory_processes"` + MemorySystem int64 `json:"memory_system"` + MemoryTotal int64 `json:"memory_total"` + NodeGetFsmObjsize100 int64 `json:"node_get_fsm_objsize_100"` + NodeGetFsmObjsize95 int64 `json:"node_get_fsm_objsize_95"` + NodeGetFsmObjsize99 int64 `json:"node_get_fsm_objsize_99"` + NodeGetFsmObjsizeMean int64 `json:"node_get_fsm_objsize_mean"` + NodeGetFsmObjsizeMedian int64 `json:"node_get_fsm_objsize_median"` + NodeGetFsmSiblings100 int64 `json:"node_get_fsm_siblings_100"` + NodeGetFsmSiblings95 int64 `json:"node_get_fsm_siblings_95"` + NodeGetFsmSiblings99 int64 `json:"node_get_fsm_siblings_99"` + NodeGetFsmSiblingsMean int64 `json:"node_get_fsm_siblings_mean"` + NodeGetFsmSiblingsMedian int64 `json:"node_get_fsm_siblings_median"` + NodeGetFsmTime100 int64 `json:"node_get_fsm_time_100"` + NodeGetFsmTime95 int64 `json:"node_get_fsm_time_95"` + NodeGetFsmTime99 int64 `json:"node_get_fsm_time_99"` + NodeGetFsmTimeMean int64 `json:"node_get_fsm_time_mean"` + NodeGetFsmTimeMedian int64 `json:"node_get_fsm_time_median"` + NodeGets int64 `json:"node_gets"` + NodeGetsTotal int64 `json:"node_gets_total"` + Nodename string `json:"nodename"` + NodePutFsmTime100 int64 `json:"node_put_fsm_time_100"` + NodePutFsmTime95 int64 `json:"node_put_fsm_time_95"` + NodePutFsmTime99 int64 `json:"node_put_fsm_time_99"` + NodePutFsmTimeMean int64 `json:"node_put_fsm_time_mean"` + NodePutFsmTimeMedian int64 `json:"node_put_fsm_time_median"` + NodePuts int64 `json:"node_puts"` + NodePutsTotal int64 `json:"node_puts_total"` + PbcActive int64 `json:"pbc_active"` + PbcConnects int64 `json:"pbc_connects"` + PbcConnectsTotal int64 `json:"pbc_connects_total"` + VnodeGets int64 `json:"vnode_gets"` + VnodeGetsTotal int64 `json:"vnode_gets_total"` + VnodeIndexReads int64 `json:"vnode_index_reads"` + VnodeIndexReadsTotal int64 `json:"vnode_index_reads_total"` + VnodeIndexWrites int64 `json:"vnode_index_writes"` + VnodeIndexWritesTotal int64 `json:"vnode_index_writes_total"` + VnodePuts int64 `json:"vnode_puts"` + VnodePutsTotal int64 `json:"vnode_puts_total"` +} + +// A sample configuration to only gather stats from localhost, default port. +const sampleConfig = ` + # Specify a list of one or more riak http servers + servers = ["http://localhost:8098"] +` + +// Returns a sample configuration for the plugin +func (r *Riak) SampleConfig() string { + return sampleConfig +} + +// Returns a description of the plugin +func (r *Riak) Description() string { + return "Read metrics one or many Riak servers" +} + +// Reads stats from all configured servers. +func (r *Riak) Gather(acc telegraf.Accumulator) error { + // Default to a single server at localhost (default port) if none specified + if len(r.Servers) == 0 { + r.Servers = []string{"http://127.0.0.1:8098"} + } + + // Range over all servers, gathering stats. Returns early in case of any error. + for _, s := range r.Servers { + if err := r.gatherServer(s, acc); err != nil { + return err + } + } + + return nil +} + +// Gathers stats from a single server, adding them to the accumulator +func (r *Riak) gatherServer(s string, acc telegraf.Accumulator) error { + // Parse the given URL to extract the server tag + u, err := url.Parse(s) + if err != nil { + return fmt.Errorf("riak unable to parse given server url %s: %s", s, err) + } + + // Perform the GET request to the riak /stats endpoint + resp, err := r.client.Get(s + "/stats") + if err != nil { + return err + } + defer resp.Body.Close() + + // Successful responses will always return status code 200 + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("riak responded with unexepcted status code %d", resp.StatusCode) + } + + // Decode the response JSON into a new stats struct + stats := &riakStats{} + if err := json.NewDecoder(resp.Body).Decode(stats); err != nil { + return fmt.Errorf("unable to decode riak response: %s", err) + } + + // Build a map of tags + tags := map[string]string{ + "nodename": stats.Nodename, + "server": u.Host, + } + + // Build a map of field values + fields := map[string]interface{}{ + "cpu_avg1": stats.CpuAvg1, + "cpu_avg15": stats.CpuAvg15, + "cpu_avg5": stats.CpuAvg5, + "memory_code": stats.MemoryCode, + "memory_ets": stats.MemoryEts, + "memory_processes": stats.MemoryProcesses, + "memory_system": stats.MemorySystem, + "memory_total": stats.MemoryTotal, + "node_get_fsm_objsize_100": stats.NodeGetFsmObjsize100, + "node_get_fsm_objsize_95": stats.NodeGetFsmObjsize95, + "node_get_fsm_objsize_99": stats.NodeGetFsmObjsize99, + "node_get_fsm_objsize_mean": stats.NodeGetFsmObjsizeMean, + "node_get_fsm_objsize_median": stats.NodeGetFsmObjsizeMedian, + "node_get_fsm_siblings_100": stats.NodeGetFsmSiblings100, + "node_get_fsm_siblings_95": stats.NodeGetFsmSiblings95, + "node_get_fsm_siblings_99": stats.NodeGetFsmSiblings99, + "node_get_fsm_siblings_mean": stats.NodeGetFsmSiblingsMean, + "node_get_fsm_siblings_median": stats.NodeGetFsmSiblingsMedian, + "node_get_fsm_time_100": stats.NodeGetFsmTime100, + "node_get_fsm_time_95": stats.NodeGetFsmTime95, + "node_get_fsm_time_99": stats.NodeGetFsmTime99, + "node_get_fsm_time_mean": stats.NodeGetFsmTimeMean, + "node_get_fsm_time_median": stats.NodeGetFsmTimeMedian, + "node_gets": stats.NodeGets, + "node_gets_total": stats.NodeGetsTotal, + "node_put_fsm_time_100": stats.NodePutFsmTime100, + "node_put_fsm_time_95": stats.NodePutFsmTime95, + "node_put_fsm_time_99": stats.NodePutFsmTime99, + "node_put_fsm_time_mean": stats.NodePutFsmTimeMean, + "node_put_fsm_time_median": stats.NodePutFsmTimeMedian, + "node_puts": stats.NodePuts, + "node_puts_total": stats.NodePutsTotal, + "pbc_active": stats.PbcActive, + "pbc_connects": stats.PbcConnects, + "pbc_connects_total": stats.PbcConnectsTotal, + "vnode_gets": stats.VnodeGets, + "vnode_gets_total": stats.VnodeGetsTotal, + "vnode_index_reads": stats.VnodeIndexReads, + "vnode_index_reads_total": stats.VnodeIndexReadsTotal, + "vnode_index_writes": stats.VnodeIndexWrites, + "vnode_index_writes_total": stats.VnodeIndexWritesTotal, + "vnode_puts": stats.VnodePuts, + "vnode_puts_total": stats.VnodePutsTotal, + } + + // Accumulate the tags and values + acc.AddFields("riak", fields, tags) + + return nil +} + +func init() { + inputs.Add("riak", func() telegraf.Input { + return NewRiak() + }) +} diff --git a/plugins/inputs/riak/riak_test.go b/plugins/inputs/riak/riak_test.go new file mode 100644 index 000000000..f92a98973 --- /dev/null +++ b/plugins/inputs/riak/riak_test.go @@ -0,0 +1,276 @@ +package riak + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/require" +) + +func TestRiak(t *testing.T) { + // Create a test server with the const response JSON + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintln(w, response) + })) + defer ts.Close() + + // Parse the URL of the test server, used to verify the expected host + u, err := url.Parse(ts.URL) + require.NoError(t, err) + + // Create a new Riak instance with our given test server + riak := NewRiak() + riak.Servers = []string{ts.URL} + + // Create a test accumulator + acc := &testutil.Accumulator{} + + // Gather data from the test server + err = riak.Gather(acc) + require.NoError(t, err) + + // Expect the correct values for all known keys + expectFields := map[string]interface{}{ + "cpu_avg1": int64(504), + "cpu_avg15": int64(294), + "cpu_avg5": int64(325), + "memory_code": int64(12329143), + "memory_ets": int64(17330176), + "memory_processes": int64(58454730), + "memory_system": int64(120401678), + "memory_total": int64(178856408), + "node_get_fsm_objsize_100": int64(73596), + "node_get_fsm_objsize_95": int64(36663), + "node_get_fsm_objsize_99": int64(51552), + "node_get_fsm_objsize_mean": int64(13241), + "node_get_fsm_objsize_median": int64(10365), + "node_get_fsm_siblings_100": int64(1), + "node_get_fsm_siblings_95": int64(1), + "node_get_fsm_siblings_99": int64(1), + "node_get_fsm_siblings_mean": int64(1), + "node_get_fsm_siblings_median": int64(1), + "node_get_fsm_time_100": int64(230445), + "node_get_fsm_time_95": int64(24259), + "node_get_fsm_time_99": int64(96653), + "node_get_fsm_time_mean": int64(6851), + "node_get_fsm_time_median": int64(2368), + "node_gets": int64(1116), + "node_gets_total": int64(1026058217), + "node_put_fsm_time_100": int64(267390), + "node_put_fsm_time_95": int64(38286), + "node_put_fsm_time_99": int64(84422), + "node_put_fsm_time_mean": int64(10832), + "node_put_fsm_time_median": int64(4085), + "node_puts": int64(1155), + "node_puts_total": int64(444895769), + "pbc_active": int64(360), + "pbc_connects": int64(120), + "pbc_connects_total": int64(66793268), + "vnode_gets": int64(14629), + "vnode_gets_total": int64(3748432761), + "vnode_index_reads": int64(20), + "vnode_index_reads_total": int64(3438296), + "vnode_index_writes": int64(4293), + "vnode_index_writes_total": int64(1515986619), + "vnode_puts": int64(4308), + "vnode_puts_total": int64(1519062272), + } + + // Expect the correct values for all tags + expectTags := map[string]string{ + "nodename": "riak@127.0.0.1", + "server": u.Host, + } + + acc.AssertContainsTaggedFields(t, "riak", expectFields, expectTags) +} + +var response = ` +{ + "riak_kv_stat_ts": 1455908558, + "vnode_gets": 14629, + "vnode_gets_total": 3748432761, + "vnode_puts": 4308, + "vnode_puts_total": 1519062272, + "vnode_index_refreshes": 0, + "vnode_index_refreshes_total": 0, + "vnode_index_reads": 20, + "vnode_index_reads_total": 3438296, + "vnode_index_writes": 4293, + "vnode_index_writes_total": 1515986619, + "vnode_index_writes_postings": 1, + "vnode_index_writes_postings_total": 265613, + "vnode_index_deletes": 0, + "vnode_index_deletes_total": 0, + "vnode_index_deletes_postings": 0, + "vnode_index_deletes_postings_total": 1, + "node_gets": 1116, + "node_gets_total": 1026058217, + "node_get_fsm_siblings_mean": 1, + "node_get_fsm_siblings_median": 1, + "node_get_fsm_siblings_95": 1, + "node_get_fsm_siblings_99": 1, + "node_get_fsm_siblings_100": 1, + "node_get_fsm_objsize_mean": 13241, + "node_get_fsm_objsize_median": 10365, + "node_get_fsm_objsize_95": 36663, + "node_get_fsm_objsize_99": 51552, + "node_get_fsm_objsize_100": 73596, + "node_get_fsm_time_mean": 6851, + "node_get_fsm_time_median": 2368, + "node_get_fsm_time_95": 24259, + "node_get_fsm_time_99": 96653, + "node_get_fsm_time_100": 230445, + "node_puts": 1155, + "node_puts_total": 444895769, + "node_put_fsm_time_mean": 10832, + "node_put_fsm_time_median": 4085, + "node_put_fsm_time_95": 38286, + "node_put_fsm_time_99": 84422, + "node_put_fsm_time_100": 267390, + "read_repairs": 2, + "read_repairs_total": 7918375, + "coord_redirs_total": 118238575, + "executing_mappers": 0, + "precommit_fail": 0, + "postcommit_fail": 0, + "index_fsm_create": 0, + "index_fsm_create_error": 0, + "index_fsm_active": 0, + "list_fsm_create": 0, + "list_fsm_create_error": 0, + "list_fsm_active": 0, + "pbc_active": 360, + "pbc_connects": 120, + "pbc_connects_total": 66793268, + "late_put_fsm_coordinator_ack": 152, + "node_get_fsm_active": 1, + "node_get_fsm_active_60s": 1029, + "node_get_fsm_in_rate": 21, + "node_get_fsm_out_rate": 21, + "node_get_fsm_rejected": 0, + "node_get_fsm_rejected_60s": 0, + "node_get_fsm_rejected_total": 0, + "node_put_fsm_active": 69, + "node_put_fsm_active_60s": 1053, + "node_put_fsm_in_rate": 30, + "node_put_fsm_out_rate": 31, + "node_put_fsm_rejected": 0, + "node_put_fsm_rejected_60s": 0, + "node_put_fsm_rejected_total": 0, + "read_repairs_primary_outofdate_one": 4, + "read_repairs_primary_outofdate_count": 14761552, + "read_repairs_primary_notfound_one": 0, + "read_repairs_primary_notfound_count": 65879, + "read_repairs_fallback_outofdate_one": 0, + "read_repairs_fallback_outofdate_count": 23761, + "read_repairs_fallback_notfound_one": 0, + "read_repairs_fallback_notfound_count": 455697, + "leveldb_read_block_error": 0, + "riak_pipe_stat_ts": 1455908558, + "pipeline_active": 0, + "pipeline_create_count": 0, + "pipeline_create_one": 0, + "pipeline_create_error_count": 0, + "pipeline_create_error_one": 0, + "cpu_nprocs": 362, + "cpu_avg1": 504, + "cpu_avg5": 325, + "cpu_avg15": 294, + "mem_total": 33695432704, + "mem_allocated": 33454874624, + "nodename": "riak@127.0.0.1", + "connected_nodes": [], + "sys_driver_version": "2.0", + "sys_global_heaps_size": 0, + "sys_heap_type": "private", + "sys_logical_processors": 8, + "sys_otp_release": "R15B01", + "sys_process_count": 2201, + "sys_smp_support": true, + "sys_system_version": "Erlang R15B01 (erts-5.9.1) [source] [64-bit] [smp:8:8] [async-threads:64] [kernel-poll:true]", + "sys_system_architecture": "x86_64-unknown-linux-gnu", + "sys_threads_enabled": true, + "sys_thread_pool_size": 64, + "sys_wordsize": 8, + "ring_members": [ + "riak@127.0.0.1" + ], + "ring_num_partitions": 256, + "ring_ownership": "[{'riak@127.0.0.1',256}]", + "ring_creation_size": 256, + "storage_backend": "riak_kv_eleveldb_backend", + "erlydtl_version": "0.7.0", + "riak_control_version": "1.4.12-0-g964c5db", + "cluster_info_version": "1.2.4", + "riak_search_version": "1.4.12-0-g7fe0e00", + "merge_index_version": "1.3.2-0-gcb38ee7", + "riak_kv_version": "1.4.12-0-gc6bbd66", + "sidejob_version": "0.2.0", + "riak_api_version": "1.4.12-0-gd9e1cc8", + "riak_pipe_version": "1.4.12-0-g986a226", + "riak_core_version": "1.4.10", + "bitcask_version": "1.6.8-0-gea14cb0", + "basho_stats_version": "1.0.3", + "webmachine_version": "1.10.4-0-gfcff795", + "mochiweb_version": "1.5.1p6", + "inets_version": "5.9", + "erlang_js_version": "1.2.2", + "runtime_tools_version": "1.8.8", + "os_mon_version": "2.2.9", + "riak_sysmon_version": "1.1.3", + "ssl_version": "5.0.1", + "public_key_version": "0.15", + "crypto_version": "2.1", + "sasl_version": "2.2.1", + "lager_version": "2.0.1", + "goldrush_version": "0.1.5", + "compiler_version": "4.8.1", + "syntax_tools_version": "1.6.8", + "stdlib_version": "1.18.1", + "kernel_version": "2.15.1", + "memory_total": 178856408, + "memory_processes": 58454730, + "memory_processes_used": 58371238, + "memory_system": 120401678, + "memory_atom": 586345, + "memory_atom_used": 563485, + "memory_binary": 48677920, + "memory_code": 12329143, + "memory_ets": 17330176, + "riak_core_stat_ts": 1455908559, + "ignored_gossip_total": 0, + "rings_reconciled_total": 5459, + "rings_reconciled": 0, + "gossip_received": 6, + "rejected_handoffs": 94, + "handoff_timeouts": 0, + "dropped_vnode_requests_total": 0, + "converge_delay_min": 0, + "converge_delay_max": 0, + "converge_delay_mean": 0, + "converge_delay_last": 0, + "rebalance_delay_min": 0, + "rebalance_delay_max": 0, + "rebalance_delay_mean": 0, + "rebalance_delay_last": 0, + "riak_kv_vnodes_running": 16, + "riak_kv_vnodeq_min": 0, + "riak_kv_vnodeq_median": 0, + "riak_kv_vnodeq_mean": 0, + "riak_kv_vnodeq_max": 0, + "riak_kv_vnodeq_total": 0, + "riak_pipe_vnodes_running": 16, + "riak_pipe_vnodeq_min": 0, + "riak_pipe_vnodeq_median": 0, + "riak_pipe_vnodeq_mean": 0, + "riak_pipe_vnodeq_max": 0, + "riak_pipe_vnodeq_total": 0 +} +` From 9edc25999e1efb12c51f0c1494aea0b2915e5cd1 Mon Sep 17 00:00:00 2001 From: Jason Coene Date: Fri, 19 Feb 2016 14:43:05 -0600 Subject: [PATCH 053/287] Minor formatting improvements closes #727 --- CHANGELOG.md | 7 +++++++ plugins/inputs/riak/README.md | 4 ++-- plugins/inputs/riak/riak_test.go | 1 - 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 237183a85..9b79b64dc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +## v0.10.4 [unreleased] + +### Features +- [#727](https://github.com/influxdata/telegraf/pull/727): riak input, thanks @jcoene! + +### Bugfixes + ## v0.10.3 [2016-02-18] ### Release Notes diff --git a/plugins/inputs/riak/README.md b/plugins/inputs/riak/README.md index 3aa39ae09..07f2eb09d 100644 --- a/plugins/inputs/riak/README.md +++ b/plugins/inputs/riak/README.md @@ -66,11 +66,11 @@ Measurements of time (such as node_get_fsm_time_mean) are measured in nanosecond All measurements have the following tags: - server (the host:port of the given server address, ex. `127.0.0.1:8087`) -- nodename (the internal node name received, ex. `riak@127.0.0.1` ) +- nodename (the internal node name received, ex. `riak@127.0.0.1`) ### Example Output: ``` $ ./telegraf -config telegraf.conf -input-filter riak -test > riak,nodename=riak@127.0.0.1,server=localhost:8098 cpu_avg1=31i,cpu_avg15=69i,cpu_avg5=51i,memory_code=11563738i,memory_ets=5925872i,memory_processes=30236069i,memory_system=93074971i,memory_total=123311040i,node_get_fsm_objsize_100=0i,node_get_fsm_objsize_95=0i,node_get_fsm_objsize_99=0i,node_get_fsm_objsize_mean=0i,node_get_fsm_objsize_median=0i,node_get_fsm_siblings_100=0i,node_get_fsm_siblings_95=0i,node_get_fsm_siblings_99=0i,node_get_fsm_siblings_mean=0i,node_get_fsm_siblings_median=0i,node_get_fsm_time_100=0i,node_get_fsm_time_95=0i,node_get_fsm_time_99=0i,node_get_fsm_time_mean=0i,node_get_fsm_time_median=0i,node_gets=0i,node_gets_total=19i,node_put_fsm_time_100=0i,node_put_fsm_time_95=0i,node_put_fsm_time_99=0i,node_put_fsm_time_mean=0i,node_put_fsm_time_median=0i,node_puts=0i,node_puts_total=0i,pbc_active=0i,pbc_connects=0i,pbc_connects_total=20i,vnode_gets=0i,vnode_gets_total=57i,vnode_index_reads=0i,vnode_index_reads_total=0i,vnode_index_writes=0i,vnode_index_writes_total=0i,vnode_puts=0i,vnode_puts_total=0i 1455913392622482332 -```gt \ No newline at end of file +``` \ No newline at end of file diff --git a/plugins/inputs/riak/riak_test.go b/plugins/inputs/riak/riak_test.go index f92a98973..49da4e7ea 100644 --- a/plugins/inputs/riak/riak_test.go +++ b/plugins/inputs/riak/riak_test.go @@ -8,7 +8,6 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) From c9d0ae7cf3c90d853b4b6a750430f1484037a71a Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Sat, 20 Feb 2016 11:52:43 -0700 Subject: [PATCH 054/287] Circle script: create packages if commit is tagged --- circle.yml | 3 +++ scripts/circle-test.sh | 8 +++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/circle.yml b/circle.yml index d86d46dba..67c29ef4a 100644 --- a/circle.yml +++ b/circle.yml @@ -12,6 +12,9 @@ machine: dependencies: override: - docker info + post: + - gem install fpm + - sudo apt-get install -y rpm python-boto test: override: diff --git a/scripts/circle-test.sh b/scripts/circle-test.sh index 72f297f9f..ab0956518 100755 --- a/scripts/circle-test.sh +++ b/scripts/circle-test.sh @@ -70,4 +70,10 @@ exit_if_fail telegraf -config $tmpdir/config.toml \ mv $GOPATH/bin/telegraf $CIRCLE_ARTIFACTS -exit $rc +eval "git describe --exact-match HEAD" +if [ $? -eq 0 ]; then + tag=$(git describe --exact-match HEAD) + echo $tag + exit_if_fail ./scripts/build.py --package --version=$tag --platform=linux --arch=all --upload + mv build $CIRCLE_ARTIFACTS +fi From 6994d4a7120db0fdb4ac450cad5d340ad676ab18 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Sun, 21 Feb 2016 10:41:46 -0700 Subject: [PATCH 055/287] Turn GOGC on for packaging, use go 1.5.3 --- circle.yml | 6 +++--- scripts/circle-test.sh | 2 ++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/circle.yml b/circle.yml index 67c29ef4a..8fd255a78 100644 --- a/circle.yml +++ b/circle.yml @@ -4,9 +4,9 @@ machine: post: - sudo service zookeeper stop - go version - - go version | grep 1.5.2 || sudo rm -rf /usr/local/go - - wget https://storage.googleapis.com/golang/go1.5.2.linux-amd64.tar.gz - - sudo tar -C /usr/local -xzf go1.5.2.linux-amd64.tar.gz + - go version | grep 1.5.3 || sudo rm -rf /usr/local/go + - wget https://storage.googleapis.com/golang/go1.5.3.linux-amd64.tar.gz + - sudo tar -C /usr/local -xzf go1.5.3.linux-amd64.tar.gz - go version dependencies: diff --git a/scripts/circle-test.sh b/scripts/circle-test.sh index ab0956518..16812b980 100755 --- a/scripts/circle-test.sh +++ b/scripts/circle-test.sh @@ -72,6 +72,8 @@ mv $GOPATH/bin/telegraf $CIRCLE_ARTIFACTS eval "git describe --exact-match HEAD" if [ $? -eq 0 ]; then + # Turn GOGC back on for making packages + export GOGC=100 tag=$(git describe --exact-match HEAD) echo $tag exit_if_fail ./scripts/build.py --package --version=$tag --platform=linux --arch=all --upload From a18f535f2169cd22f80f7ccfe3ad45fbf43fbc42 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Sun, 21 Feb 2016 16:00:41 -0700 Subject: [PATCH 056/287] Circle script: unset GOGC so it uses default --- scripts/circle-test.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/scripts/circle-test.sh b/scripts/circle-test.sh index 16812b980..863fc396f 100755 --- a/scripts/circle-test.sh +++ b/scripts/circle-test.sh @@ -72,8 +72,7 @@ mv $GOPATH/bin/telegraf $CIRCLE_ARTIFACTS eval "git describe --exact-match HEAD" if [ $? -eq 0 ]; then - # Turn GOGC back on for making packages - export GOGC=100 + unset GOGC tag=$(git describe --exact-match HEAD) echo $tag exit_if_fail ./scripts/build.py --package --version=$tag --platform=linux --arch=all --upload From 636dc27ead80e7f1cb69834c9af61a48ee8f454a Mon Sep 17 00:00:00 2001 From: Marcin Jasion Date: Sat, 13 Feb 2016 19:00:42 +0100 Subject: [PATCH 057/287] Dns query input plugin --- Godeps | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/dns/README.md | 51 ++++++++++++ plugins/inputs/dns/dns.go | 143 +++++++++++++++++++++++++++++++++ plugins/inputs/dns/dns_test.go | 127 +++++++++++++++++++++++++++++ 5 files changed, 323 insertions(+) create mode 100644 plugins/inputs/dns/README.md create mode 100644 plugins/inputs/dns/dns.go create mode 100644 plugins/inputs/dns/dns_test.go diff --git a/Godeps b/Godeps index d0d2194c6..d2ac1857f 100644 --- a/Godeps +++ b/Godeps @@ -50,3 +50,4 @@ gopkg.in/dancannon/gorethink.v1 6f088135ff288deb9d5546f4c71919207f891a70 gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715 gopkg.in/mgo.v2 03c9f3ee4c14c8e51ee521a6a7d0425658dd6f64 gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4 +github.com/miekg/dns e0d84d97e59bcb6561eae269c4e94d25b66822cb \ No newline at end of file diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 79deb7c99..e6cf6b377 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -6,6 +6,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/bcache" _ "github.com/influxdata/telegraf/plugins/inputs/couchdb" _ "github.com/influxdata/telegraf/plugins/inputs/disque" + _ "github.com/influxdata/telegraf/plugins/inputs/dns" _ "github.com/influxdata/telegraf/plugins/inputs/docker" _ "github.com/influxdata/telegraf/plugins/inputs/dovecot" _ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch" diff --git a/plugins/inputs/dns/README.md b/plugins/inputs/dns/README.md new file mode 100644 index 000000000..eb2f99d6c --- /dev/null +++ b/plugins/inputs/dns/README.md @@ -0,0 +1,51 @@ +# DNS Input Plugin + +The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wikipedia.org/wiki/Dig_\(command\)) + +### Configuration: + +``` +# Sample Config: +[[inputs.dns]] + ### Domains or subdomains to query + domains = ["mjasion.pl"] # required + + ### servers to query + servers = ["8.8.8.8"] # required + + ### Query record type. Posible values: A, CNAME, MX, TXT, NS. Default is "A" + recordType = "A" # optional + + ### Dns server port. 53 is default + port = 53 # optional + + ### Query timeout in seconds. Default is 2 seconds + timeout = 2 # optional +``` + +For querying more than one record type make: + +``` +[[inputs.dns]] + domains = ["mjasion.pl"] + servers = ["8.8.8.8", "8.8.4.4"] + recordType = "A" + +[[inputs.dns]] + domains = ["mjasion.pl"] + servers = ["8.8.8.8", "8.8.4.4"] + recordType = "MX" +``` + +### Tags: + +- server +- domain +- recordType + +### Example output: + +``` +./telegraf -config telegraf.conf -test -input-filter dns -test +> dns,domain=mjasion.pl,recordType=A,server=8.8.8.8 value=25.236181 1455452083165126877 +``` diff --git a/plugins/inputs/dns/dns.go b/plugins/inputs/dns/dns.go new file mode 100644 index 000000000..d8440f104 --- /dev/null +++ b/plugins/inputs/dns/dns.go @@ -0,0 +1,143 @@ +package dns + +import ( + "errors" + "fmt" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/miekg/dns" + "net" + "strconv" + "time" +) + +type Dns struct { + // Domains or subdomains to query + Domains []string + + // Server to query + Servers []string + + // Record type + RecordType string + + // DNS server port number + Port int + + // Dns query timeout in seconds. 0 means no timeout + Timeout int +} + +var sampleConfig = ` + ### Domains or subdomains to query + domains = ["mjasion.pl"] # required + + ### servers to query + servers = ["8.8.8.8"] # required + + ### Query record type. Posible values: A, CNAME, MX, TXT, NS. Default is "A" + recordType = "A" # optional + + ### Dns server port. 53 is default + port = 53 # optional + + ### Query timeout in seconds. Default is 2 seconds + timeout = 2 # optional +` + +func (d *Dns) SampleConfig() string { + return sampleConfig +} + +func (d *Dns) Description() string { + return "Query given DNS server and gives statistics" +} +func (d *Dns) Gather(acc telegraf.Accumulator) error { + d.setDefaultValues() + for _, domain := range d.Domains { + for _, server := range d.Servers { + dnsQueryTime, err := d.getDnsQueryTime(domain, server) + if err != nil { + return err + } + tags := map[string]string{ + "server": server, + "domain": domain, + "recordType": d.RecordType, + } + + acc.Add("dns", dnsQueryTime, tags) + } + } + + return nil +} + +func (d *Dns) setDefaultValues() { + if len(d.RecordType) == 0 { + d.RecordType = "A" + } + if d.Port == 0 { + d.Port = 53 + } + if d.Timeout == 0 { + d.Timeout = 2 + } +} + +func (d *Dns) getDnsQueryTime(domain string, server string) (float64, error) { + dnsQueryTime := float64(0) + + c := new(dns.Client) + c.ReadTimeout = time.Duration(d.Timeout) * time.Second + + m := new(dns.Msg) + recordType, err := d.parseRecordType() + if err != nil { + return dnsQueryTime, err + } + m.SetQuestion(dns.Fqdn(domain), recordType) + m.RecursionDesired = true + + start_time := time.Now() + r, _, err := c.Exchange(m, net.JoinHostPort(server, strconv.Itoa(d.Port))) + queryDuration := time.Since(start_time) + + if err != nil { + return dnsQueryTime, err + } + if r.Rcode != dns.RcodeSuccess { + return dnsQueryTime, errors.New(fmt.Sprintf("Invalid answer name %s after %s query for %s\n", domain, d.RecordType, domain)) + } + + dnsQueryTime = float64(queryDuration.Nanoseconds()) / 1e6 + return dnsQueryTime, nil +} + +func (d *Dns) parseRecordType() (uint16, error) { + var recordType uint16 + var error error + + switch d.RecordType { + case "A": + recordType = dns.TypeA + case "CNAME": + recordType = dns.TypeCNAME + case "MX": + recordType = dns.TypeMX + case "NS": + recordType = dns.TypeNS + case "TXT": + recordType = dns.TypeTXT + default: + error = errors.New(fmt.Sprintf("Record type %s not recognized", d.RecordType)) + } + + return recordType, error +} + +func init() { + inputs.Add("dns", func() telegraf.Input { + return &Dns{} + }) +} diff --git a/plugins/inputs/dns/dns_test.go b/plugins/inputs/dns/dns_test.go new file mode 100644 index 000000000..038ff8e40 --- /dev/null +++ b/plugins/inputs/dns/dns_test.go @@ -0,0 +1,127 @@ +package dns + +import ( + "github.com/influxdata/telegraf/testutil" + "github.com/miekg/dns" + "github.com/stretchr/testify/assert" + "testing" + "time" +) + +var servers = []string{"8.8.8.8"} +var domains = []string{"mjasion.pl"} + +func TestGathering(t *testing.T) { + var dnsConfig = Dns{ + Servers: servers, + Domains: domains, + } + var acc testutil.Accumulator + + dnsConfig.Gather(&acc) + metric, _ := acc.Get("dns") + queryTime, _ := metric.Fields["value"].(float64) + + assert.NotEqual(t, 0, queryTime) +} + +func TestGatheringMxRecord(t *testing.T) { + var dnsConfig = Dns{ + Servers: servers, + Domains: domains, + } + var acc testutil.Accumulator + dnsConfig.RecordType = "MX" + + dnsConfig.Gather(&acc) + metric, _ := acc.Get("dns") + queryTime, _ := metric.Fields["value"].(float64) + + assert.NotEqual(t, 0, queryTime) +} + +func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) { + var dnsConfig = Dns{ + Servers: servers, + Domains: domains, + } + var acc testutil.Accumulator + tags := map[string]string{ + "server": "8.8.8.8", + "domain": "mjasion.pl", + "recordType": "A", + } + fields := map[string]interface{}{} + + dnsConfig.Gather(&acc) + metric, _ := acc.Get("dns") + queryTime, _ := metric.Fields["value"].(float64) + + fields["value"] = queryTime + acc.AssertContainsTaggedFields(t, "dns", fields, tags) +} + +func TestGatheringTimeout(t *testing.T) { + var dnsConfig = Dns{ + Servers: servers, + Domains: domains, + } + var acc testutil.Accumulator + dnsConfig.Port = 60054 + dnsConfig.Timeout = 1 + var err error + + channel := make(chan error, 1) + go func() { + channel <- dnsConfig.Gather(&acc) + }() + select { + case res := <-channel: + err = res + case <-time.After(time.Second * 2): + err = nil + } + + assert.Error(t, err) + assert.Contains(t, err.Error(), "i/o timeout") +} + +func TestSettingDefaultValues(t *testing.T) { + dnsConfig := Dns{} + + dnsConfig.setDefaultValues() + + assert.Equal(t, "A", dnsConfig.RecordType, "Default record type not equal 'A'") + assert.Equal(t, 53, dnsConfig.Port, "Default port number not equal 53") + assert.Equal(t, 2, dnsConfig.Timeout, "Default timeout not equal 2") +} + +func TestRecordTypeParser(t *testing.T) { + var dnsConfig = Dns{} + var recordType uint16 + var err error + + dnsConfig.RecordType = "A" + recordType, err = dnsConfig.parseRecordType() + assert.Equal(t, dns.TypeA, recordType) + + dnsConfig.RecordType = "CNAME" + recordType, err = dnsConfig.parseRecordType() + assert.Equal(t, dns.TypeCNAME, recordType) + + dnsConfig.RecordType = "MX" + recordType, err = dnsConfig.parseRecordType() + assert.Equal(t, dns.TypeMX, recordType) + + dnsConfig.RecordType = "NS" + recordType, err = dnsConfig.parseRecordType() + assert.Equal(t, dns.TypeNS, recordType) + + dnsConfig.RecordType = "TXT" + recordType, err = dnsConfig.parseRecordType() + assert.Equal(t, dns.TypeTXT, recordType) + + dnsConfig.RecordType = "nil" + recordType, err = dnsConfig.parseRecordType() + assert.Error(t, err) +} From d43d6f2b13a3aae10c61db4e595bf71968af1ea0 Mon Sep 17 00:00:00 2001 From: Marcin Jasion Date: Mon, 15 Feb 2016 16:10:04 +0100 Subject: [PATCH 058/287] renamed plugin to dns_query and value to query_time_ms small polishings added more record types - AAAA and ANY --- README.md | 2 ++ plugins/inputs/dns/README.md | 12 ++++---- plugins/inputs/dns/{dns.go => dns_query.go} | 29 ++++++++++--------- .../dns/{dns_test.go => dns_query_test.go} | 22 +++++++++----- 4 files changed, 38 insertions(+), 27 deletions(-) rename plugins/inputs/dns/{dns.go => dns_query.go} (79%) rename plugins/inputs/dns/{dns_test.go => dns_query_test.go} (83%) diff --git a/README.md b/README.md index 9b1e7b171..34a3df7ed 100644 --- a/README.md +++ b/README.md @@ -157,6 +157,8 @@ Currently implemented sources: * bcache * couchdb * disque +* dns + * query time * docker * dovecot * elasticsearch diff --git a/plugins/inputs/dns/README.md b/plugins/inputs/dns/README.md index eb2f99d6c..564743d9c 100644 --- a/plugins/inputs/dns/README.md +++ b/plugins/inputs/dns/README.md @@ -1,4 +1,4 @@ -# DNS Input Plugin +# DNS Query Input Plugin The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wikipedia.org/wiki/Dig_\(command\)) @@ -6,7 +6,7 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi ``` # Sample Config: -[[inputs.dns]] +[[inputs.dns_query]] ### Domains or subdomains to query domains = ["mjasion.pl"] # required @@ -26,12 +26,12 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi For querying more than one record type make: ``` -[[inputs.dns]] +[[inputs.dns_query]] domains = ["mjasion.pl"] servers = ["8.8.8.8", "8.8.4.4"] recordType = "A" -[[inputs.dns]] +[[inputs.dns_query]] domains = ["mjasion.pl"] servers = ["8.8.8.8", "8.8.4.4"] recordType = "MX" @@ -46,6 +46,6 @@ For querying more than one record type make: ### Example output: ``` -./telegraf -config telegraf.conf -test -input-filter dns -test -> dns,domain=mjasion.pl,recordType=A,server=8.8.8.8 value=25.236181 1455452083165126877 +./telegraf -config telegraf.conf -test -input-filter dns_query -test +> dns,domain=mjasion.pl,record_type=A,server=8.8.8.8 query_time_ms=36.327025 1455548824989943491 ``` diff --git a/plugins/inputs/dns/dns.go b/plugins/inputs/dns/dns_query.go similarity index 79% rename from plugins/inputs/dns/dns.go rename to plugins/inputs/dns/dns_query.go index d8440f104..5ca58d880 100644 --- a/plugins/inputs/dns/dns.go +++ b/plugins/inputs/dns/dns_query.go @@ -19,7 +19,7 @@ type Dns struct { Servers []string // Record type - RecordType string + RecordType string `toml:"record_type"` // DNS server port number Port int @@ -35,8 +35,8 @@ var sampleConfig = ` ### servers to query servers = ["8.8.8.8"] # required - ### Query record type. Posible values: A, CNAME, MX, TXT, NS. Default is "A" - recordType = "A" # optional + ### Query record type. Posible values: A, AAAA, CNAME, MX, TXT, NS, ANY. Default is "A" + record_type = "A" # optional ### Dns server port. 53 is default port = 53 # optional @@ -61,12 +61,13 @@ func (d *Dns) Gather(acc telegraf.Accumulator) error { return err } tags := map[string]string{ - "server": server, - "domain": domain, - "recordType": d.RecordType, + "server": server, + "domain": domain, + "record_type": d.RecordType, } - acc.Add("dns", dnsQueryTime, tags) + fields := map[string]interface{}{"query_time_ms": dnsQueryTime} + acc.AddFields("dns", fields, tags) } } @@ -99,18 +100,14 @@ func (d *Dns) getDnsQueryTime(domain string, server string) (float64, error) { m.SetQuestion(dns.Fqdn(domain), recordType) m.RecursionDesired = true - start_time := time.Now() - r, _, err := c.Exchange(m, net.JoinHostPort(server, strconv.Itoa(d.Port))) - queryDuration := time.Since(start_time) - + r, rtt, err := c.Exchange(m, net.JoinHostPort(server, strconv.Itoa(d.Port))) if err != nil { return dnsQueryTime, err } if r.Rcode != dns.RcodeSuccess { return dnsQueryTime, errors.New(fmt.Sprintf("Invalid answer name %s after %s query for %s\n", domain, d.RecordType, domain)) } - - dnsQueryTime = float64(queryDuration.Nanoseconds()) / 1e6 + dnsQueryTime = float64(rtt.Nanoseconds()) / 1e6 return dnsQueryTime, nil } @@ -121,6 +118,10 @@ func (d *Dns) parseRecordType() (uint16, error) { switch d.RecordType { case "A": recordType = dns.TypeA + case "AAAA": + recordType = dns.TypeAAAA + case "ANY": + recordType = dns.TypeANY case "CNAME": recordType = dns.TypeCNAME case "MX": @@ -137,7 +138,7 @@ func (d *Dns) parseRecordType() (uint16, error) { } func init() { - inputs.Add("dns", func() telegraf.Input { + inputs.Add("dns_query", func() telegraf.Input { return &Dns{} }) } diff --git a/plugins/inputs/dns/dns_test.go b/plugins/inputs/dns/dns_query_test.go similarity index 83% rename from plugins/inputs/dns/dns_test.go rename to plugins/inputs/dns/dns_query_test.go index 038ff8e40..44a62d708 100644 --- a/plugins/inputs/dns/dns_test.go +++ b/plugins/inputs/dns/dns_query_test.go @@ -20,7 +20,7 @@ func TestGathering(t *testing.T) { dnsConfig.Gather(&acc) metric, _ := acc.Get("dns") - queryTime, _ := metric.Fields["value"].(float64) + queryTime, _ := metric.Fields["query_time_ms"].(float64) assert.NotEqual(t, 0, queryTime) } @@ -35,7 +35,7 @@ func TestGatheringMxRecord(t *testing.T) { dnsConfig.Gather(&acc) metric, _ := acc.Get("dns") - queryTime, _ := metric.Fields["value"].(float64) + queryTime, _ := metric.Fields["query_time_ms"].(float64) assert.NotEqual(t, 0, queryTime) } @@ -47,17 +47,17 @@ func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) { } var acc testutil.Accumulator tags := map[string]string{ - "server": "8.8.8.8", - "domain": "mjasion.pl", - "recordType": "A", + "server": "8.8.8.8", + "domain": "mjasion.pl", + "record_type": "A", } fields := map[string]interface{}{} dnsConfig.Gather(&acc) metric, _ := acc.Get("dns") - queryTime, _ := metric.Fields["value"].(float64) + queryTime, _ := metric.Fields["query_time_ms"].(float64) - fields["value"] = queryTime + fields["query_time_ms"] = queryTime acc.AssertContainsTaggedFields(t, "dns", fields, tags) } @@ -105,6 +105,14 @@ func TestRecordTypeParser(t *testing.T) { recordType, err = dnsConfig.parseRecordType() assert.Equal(t, dns.TypeA, recordType) + dnsConfig.RecordType = "AAAA" + recordType, err = dnsConfig.parseRecordType() + assert.Equal(t, dns.TypeAAAA, recordType) + + dnsConfig.RecordType = "ANY" + recordType, err = dnsConfig.parseRecordType() + assert.Equal(t, dns.TypeANY, recordType) + dnsConfig.RecordType = "CNAME" recordType, err = dnsConfig.parseRecordType() assert.Equal(t, dns.TypeCNAME, recordType) From bb50d7edb47fd95aa9d939f3e2a86100a336f60b Mon Sep 17 00:00:00 2001 From: Marcin Jasion Date: Sun, 21 Feb 2016 18:43:24 +0100 Subject: [PATCH 059/287] dns_query plugin fixups: - renamed plugin to dns_query - domains are optional - new record types closes #694 --- CHANGELOG.md | 1 + Makefile | 2 +- README.md | 3 +- plugins/inputs/all/all.go | 2 +- plugins/inputs/{dns => dns_query}/README.md | 28 +++--- .../inputs/{dns => dns_query}/dns_query.go | 51 ++++++---- .../{dns => dns_query}/dns_query_test.go | 93 ++++++++++++++----- 7 files changed, 122 insertions(+), 58 deletions(-) rename plugins/inputs/{dns => dns_query}/README.md (55%) rename plugins/inputs/{dns => dns_query}/dns_query.go (70%) rename plugins/inputs/{dns => dns_query}/dns_query_test.go (52%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9b79b64dc..1a55c029c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ### Features - [#727](https://github.com/influxdata/telegraf/pull/727): riak input, thanks @jcoene! +- [#694](https://github.com/influxdata/telegraf/pull/694): DNS Query input, thanks @mjasion! ### Bugfixes diff --git a/Makefile b/Makefile index 84b3a1fe0..ef316bd03 100644 --- a/Makefile +++ b/Makefile @@ -105,4 +105,4 @@ test-short: vet vet: go vet ./... -.PHONY: test +.PHONY: test test-short vet build default diff --git a/README.md b/README.md index 34a3df7ed..001974ba3 100644 --- a/README.md +++ b/README.md @@ -157,8 +157,7 @@ Currently implemented sources: * bcache * couchdb * disque -* dns - * query time +* dns query time * docker * dovecot * elasticsearch diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index e6cf6b377..5af18fcff 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -6,7 +6,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/bcache" _ "github.com/influxdata/telegraf/plugins/inputs/couchdb" _ "github.com/influxdata/telegraf/plugins/inputs/disque" - _ "github.com/influxdata/telegraf/plugins/inputs/dns" + _ "github.com/influxdata/telegraf/plugins/inputs/dns_query" _ "github.com/influxdata/telegraf/plugins/inputs/docker" _ "github.com/influxdata/telegraf/plugins/inputs/dovecot" _ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch" diff --git a/plugins/inputs/dns/README.md b/plugins/inputs/dns_query/README.md similarity index 55% rename from plugins/inputs/dns/README.md rename to plugins/inputs/dns_query/README.md index 564743d9c..34b285c37 100644 --- a/plugins/inputs/dns/README.md +++ b/plugins/inputs/dns_query/README.md @@ -7,45 +7,45 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi ``` # Sample Config: [[inputs.dns_query]] - ### Domains or subdomains to query - domains = ["mjasion.pl"] # required - - ### servers to query + ## servers to query servers = ["8.8.8.8"] # required - ### Query record type. Posible values: A, CNAME, MX, TXT, NS. Default is "A" - recordType = "A" # optional + ## Domains or subdomains to query. "." (root) is default + domains = ["."] # optional - ### Dns server port. 53 is default + ## Query record type. Posible values: A, AAAA, ANY, CNAME, MX, NS, PTR, SOA, SPF, SRV, TXT. Default is "NS" + record_type = "A" # optional + + ## Dns server port. 53 is default port = 53 # optional - ### Query timeout in seconds. Default is 2 seconds + ## Query timeout in seconds. Default is 2 seconds timeout = 2 # optional ``` For querying more than one record type make: - + ``` [[inputs.dns_query]] domains = ["mjasion.pl"] servers = ["8.8.8.8", "8.8.4.4"] - recordType = "A" + record_type = "A" [[inputs.dns_query]] domains = ["mjasion.pl"] servers = ["8.8.8.8", "8.8.4.4"] - recordType = "MX" + record_type = "MX" ``` ### Tags: -- server +- server - domain -- recordType +- record_type ### Example output: ``` ./telegraf -config telegraf.conf -test -input-filter dns_query -test -> dns,domain=mjasion.pl,record_type=A,server=8.8.8.8 query_time_ms=36.327025 1455548824989943491 +> dns_query,domain=mjasion.pl,record_type=A,server=8.8.8.8 query_time_ms=67.189842 1456082743585760680 ``` diff --git a/plugins/inputs/dns/dns_query.go b/plugins/inputs/dns_query/dns_query.go similarity index 70% rename from plugins/inputs/dns/dns_query.go rename to plugins/inputs/dns_query/dns_query.go index 5ca58d880..397482a98 100644 --- a/plugins/inputs/dns/dns_query.go +++ b/plugins/inputs/dns_query/dns_query.go @@ -1,4 +1,4 @@ -package dns +package dns_query import ( "errors" @@ -11,7 +11,7 @@ import ( "time" ) -type Dns struct { +type DnsQuery struct { // Domains or subdomains to query Domains []string @@ -29,30 +29,30 @@ type Dns struct { } var sampleConfig = ` - ### Domains or subdomains to query - domains = ["mjasion.pl"] # required - - ### servers to query + ## servers to query servers = ["8.8.8.8"] # required - ### Query record type. Posible values: A, AAAA, CNAME, MX, TXT, NS, ANY. Default is "A" + ## Domains or subdomains to query. "."(root) is default + domains = ["."] # optional + + ## Query record type. Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. Default is "NS" record_type = "A" # optional - ### Dns server port. 53 is default + ## Dns server port. 53 is default port = 53 # optional - ### Query timeout in seconds. Default is 2 seconds + ## Query timeout in seconds. Default is 2 seconds timeout = 2 # optional ` -func (d *Dns) SampleConfig() string { +func (d *DnsQuery) SampleConfig() string { return sampleConfig } -func (d *Dns) Description() string { +func (d *DnsQuery) Description() string { return "Query given DNS server and gives statistics" } -func (d *Dns) Gather(acc telegraf.Accumulator) error { +func (d *DnsQuery) Gather(acc telegraf.Accumulator) error { d.setDefaultValues() for _, domain := range d.Domains { for _, server := range d.Servers { @@ -67,26 +67,33 @@ func (d *Dns) Gather(acc telegraf.Accumulator) error { } fields := map[string]interface{}{"query_time_ms": dnsQueryTime} - acc.AddFields("dns", fields, tags) + acc.AddFields("dns_query", fields, tags) } } return nil } -func (d *Dns) setDefaultValues() { +func (d *DnsQuery) setDefaultValues() { if len(d.RecordType) == 0 { - d.RecordType = "A" + d.RecordType = "NS" } + + if len(d.Domains) == 0 { + d.Domains = []string{"."} + d.RecordType = "NS" + } + if d.Port == 0 { d.Port = 53 } + if d.Timeout == 0 { d.Timeout = 2 } } -func (d *Dns) getDnsQueryTime(domain string, server string) (float64, error) { +func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, error) { dnsQueryTime := float64(0) c := new(dns.Client) @@ -111,7 +118,7 @@ func (d *Dns) getDnsQueryTime(domain string, server string) (float64, error) { return dnsQueryTime, nil } -func (d *Dns) parseRecordType() (uint16, error) { +func (d *DnsQuery) parseRecordType() (uint16, error) { var recordType uint16 var error error @@ -128,6 +135,14 @@ func (d *Dns) parseRecordType() (uint16, error) { recordType = dns.TypeMX case "NS": recordType = dns.TypeNS + case "PTR": + recordType = dns.TypePTR + case "SOA": + recordType = dns.TypeSOA + case "SPF": + recordType = dns.TypeSPF + case "SRV": + recordType = dns.TypeSRV case "TXT": recordType = dns.TypeTXT default: @@ -139,6 +154,6 @@ func (d *Dns) parseRecordType() (uint16, error) { func init() { inputs.Add("dns_query", func() telegraf.Input { - return &Dns{} + return &DnsQuery{} }) } diff --git a/plugins/inputs/dns/dns_query_test.go b/plugins/inputs/dns_query/dns_query_test.go similarity index 52% rename from plugins/inputs/dns/dns_query_test.go rename to plugins/inputs/dns_query/dns_query_test.go index 44a62d708..ff923dae3 100644 --- a/plugins/inputs/dns/dns_query_test.go +++ b/plugins/inputs/dns_query/dns_query_test.go @@ -1,4 +1,4 @@ -package dns +package dns_query import ( "github.com/influxdata/telegraf/testutil" @@ -12,21 +12,21 @@ var servers = []string{"8.8.8.8"} var domains = []string{"mjasion.pl"} func TestGathering(t *testing.T) { - var dnsConfig = Dns{ + var dnsConfig = DnsQuery{ Servers: servers, Domains: domains, } var acc testutil.Accumulator dnsConfig.Gather(&acc) - metric, _ := acc.Get("dns") + metric, _ := acc.Get("dns_query") queryTime, _ := metric.Fields["query_time_ms"].(float64) assert.NotEqual(t, 0, queryTime) } func TestGatheringMxRecord(t *testing.T) { - var dnsConfig = Dns{ + var dnsConfig = DnsQuery{ Servers: servers, Domains: domains, } @@ -34,14 +34,36 @@ func TestGatheringMxRecord(t *testing.T) { dnsConfig.RecordType = "MX" dnsConfig.Gather(&acc) - metric, _ := acc.Get("dns") + metric, _ := acc.Get("dns_query") queryTime, _ := metric.Fields["query_time_ms"].(float64) assert.NotEqual(t, 0, queryTime) } +func TestGatheringRootDomain(t *testing.T) { + var dnsConfig = DnsQuery{ + Servers: servers, + Domains: []string{"."}, + RecordType: "MX", + } + var acc testutil.Accumulator + tags := map[string]string{ + "server": "8.8.8.8", + "domain": ".", + "record_type": "MX", + } + fields := map[string]interface{}{} + + dnsConfig.Gather(&acc) + metric, _ := acc.Get("dns_query") + queryTime, _ := metric.Fields["query_time_ms"].(float64) + + fields["query_time_ms"] = queryTime + acc.AssertContainsTaggedFields(t, "dns_query", fields, tags) +} + func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) { - var dnsConfig = Dns{ + var dnsConfig = DnsQuery{ Servers: servers, Domains: domains, } @@ -49,20 +71,20 @@ func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) { tags := map[string]string{ "server": "8.8.8.8", "domain": "mjasion.pl", - "record_type": "A", + "record_type": "NS", } fields := map[string]interface{}{} dnsConfig.Gather(&acc) - metric, _ := acc.Get("dns") + metric, _ := acc.Get("dns_query") queryTime, _ := metric.Fields["query_time_ms"].(float64) fields["query_time_ms"] = queryTime - acc.AssertContainsTaggedFields(t, "dns", fields, tags) + acc.AssertContainsTaggedFields(t, "dns_query", fields, tags) } func TestGatheringTimeout(t *testing.T) { - var dnsConfig = Dns{ + var dnsConfig = DnsQuery{ Servers: servers, Domains: domains, } @@ -87,49 +109,76 @@ func TestGatheringTimeout(t *testing.T) { } func TestSettingDefaultValues(t *testing.T) { - dnsConfig := Dns{} + dnsConfig := DnsQuery{} dnsConfig.setDefaultValues() - assert.Equal(t, "A", dnsConfig.RecordType, "Default record type not equal 'A'") + assert.Equal(t, []string{"."}, dnsConfig.Domains, "Default domain not equal \".\"") + assert.Equal(t, "NS", dnsConfig.RecordType, "Default record type not equal 'NS'") assert.Equal(t, 53, dnsConfig.Port, "Default port number not equal 53") assert.Equal(t, 2, dnsConfig.Timeout, "Default timeout not equal 2") + + dnsConfig = DnsQuery{Domains: []string{"."}} + + dnsConfig.setDefaultValues() + + assert.Equal(t, "NS", dnsConfig.RecordType, "Default record type not equal 'NS'") } func TestRecordTypeParser(t *testing.T) { - var dnsConfig = Dns{} + var dnsConfig = DnsQuery{} var recordType uint16 - var err error dnsConfig.RecordType = "A" - recordType, err = dnsConfig.parseRecordType() + recordType, _ = dnsConfig.parseRecordType() assert.Equal(t, dns.TypeA, recordType) dnsConfig.RecordType = "AAAA" - recordType, err = dnsConfig.parseRecordType() + recordType, _ = dnsConfig.parseRecordType() assert.Equal(t, dns.TypeAAAA, recordType) dnsConfig.RecordType = "ANY" - recordType, err = dnsConfig.parseRecordType() + recordType, _ = dnsConfig.parseRecordType() assert.Equal(t, dns.TypeANY, recordType) dnsConfig.RecordType = "CNAME" - recordType, err = dnsConfig.parseRecordType() + recordType, _ = dnsConfig.parseRecordType() assert.Equal(t, dns.TypeCNAME, recordType) dnsConfig.RecordType = "MX" - recordType, err = dnsConfig.parseRecordType() + recordType, _ = dnsConfig.parseRecordType() assert.Equal(t, dns.TypeMX, recordType) dnsConfig.RecordType = "NS" - recordType, err = dnsConfig.parseRecordType() + recordType, _ = dnsConfig.parseRecordType() assert.Equal(t, dns.TypeNS, recordType) + dnsConfig.RecordType = "PTR" + recordType, _ = dnsConfig.parseRecordType() + assert.Equal(t, dns.TypePTR, recordType) + + dnsConfig.RecordType = "SOA" + recordType, _ = dnsConfig.parseRecordType() + assert.Equal(t, dns.TypeSOA, recordType) + + dnsConfig.RecordType = "SPF" + recordType, _ = dnsConfig.parseRecordType() + assert.Equal(t, dns.TypeSPF, recordType) + + dnsConfig.RecordType = "SRV" + recordType, _ = dnsConfig.parseRecordType() + assert.Equal(t, dns.TypeSRV, recordType) + dnsConfig.RecordType = "TXT" - recordType, err = dnsConfig.parseRecordType() + recordType, _ = dnsConfig.parseRecordType() assert.Equal(t, dns.TypeTXT, recordType) +} + +func TestRecordTypeParserError(t *testing.T) { + var dnsConfig = DnsQuery{} + var err error dnsConfig.RecordType = "nil" - recordType, err = dnsConfig.parseRecordType() + _, err = dnsConfig.parseRecordType() assert.Error(t, err) } From 29016822fde9b3bce5aa1ccff7bb42f220e5fff5 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Sun, 21 Feb 2016 16:35:56 -0700 Subject: [PATCH 060/287] Sensors input currently only available if built from source --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 001974ba3..9977588fa 100644 --- a/README.md +++ b/README.md @@ -189,12 +189,12 @@ Currently implemented sources: * redis * rethinkdb * riak +* sensors (only available if built from source) +* snmp * sql server (microsoft) * twemproxy * zfs * zookeeper -* sensors -* snmp * win_perf_counters (windows performance counters) * system * cpu From 9ce8d788351533b0c8ac9d456ff466b5cd56c533 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 22 Feb 2016 11:40:27 -0700 Subject: [PATCH 061/287] Set running output quiet mode in agent connect func closes #701 --- CHANGELOG.md | 1 + agent/agent.go | 2 ++ internal/config/config.go | 1 - 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1a55c029c..b85e200a6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ - [#694](https://github.com/influxdata/telegraf/pull/694): DNS Query input, thanks @mjasion! ### Bugfixes +- [#701](https://github.com/influxdata/telegraf/pull/701): output write count shouldnt print in quiet mode. ## v0.10.3 [2016-02-18] diff --git a/agent/agent.go b/agent/agent.go index 42ade45f2..8a8800cc2 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -44,6 +44,8 @@ func NewAgent(config *config.Config) (*Agent, error) { // Connect connects to all configured outputs func (a *Agent) Connect() error { for _, o := range a.Config.Outputs { + o.Quiet = a.Config.Agent.Quiet + switch ot := o.Output.(type) { case telegraf.ServiceOutput: if err := ot.Start(); err != nil { diff --git a/internal/config/config.go b/internal/config/config.go index fc374d628..2dfe13580 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -429,7 +429,6 @@ func (c *Config) addOutput(name string, table *ast.Table) error { ro.MetricBufferLimit = c.Agent.MetricBufferLimit } ro.FlushBufferWhenFull = c.Agent.FlushBufferWhenFull - ro.Quiet = c.Agent.Quiet c.Outputs = append(c.Outputs, ro) return nil } From d00550c45f56e92cd158ec654fe33cf89e0f8849 Mon Sep 17 00:00:00 2001 From: Thibault Cohen Date: Sat, 20 Feb 2016 00:35:12 -0500 Subject: [PATCH 062/287] Add metric pass/drop filter --- agent/accumulator.go | 11 ++- docs/CONFIGURATION.md | 28 +++++-- internal/config/config.go | 44 +++++++++- internal/config/config_test.go | 12 ++- internal/config/testdata/single_plugin.toml | 6 +- .../config/testdata/subconfig/memcached.conf | 2 + internal/models/filter.go | 49 +++++++++-- internal/models/filter_test.go | 84 +++++++++++++++++-- 8 files changed, 202 insertions(+), 34 deletions(-) diff --git a/agent/accumulator.go b/agent/accumulator.go index 9361ad82e..b04ff2b53 100644 --- a/agent/accumulator.go +++ b/agent/accumulator.go @@ -43,6 +43,11 @@ func (ac *accumulator) Add( ) { fields := make(map[string]interface{}) fields["value"] = value + + if !ac.inputConfig.Filter.ShouldNamePass(measurement) { + return + } + ac.AddFields(measurement, fields, tags, t...) } @@ -56,6 +61,10 @@ func (ac *accumulator) AddFields( return } + if !ac.inputConfig.Filter.ShouldNamePass(measurement) { + return + } + if !ac.inputConfig.Filter.ShouldTagsPass(tags) { return } @@ -92,7 +101,7 @@ func (ac *accumulator) AddFields( for k, v := range fields { // Filter out any filtered fields if ac.inputConfig != nil { - if !ac.inputConfig.Filter.ShouldPass(k) { + if !ac.inputConfig.Filter.ShouldFieldsPass(k) { continue } } diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index f4214b5d4..a35c98efc 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -58,10 +58,14 @@ you can configure that here. There are also filters that can be configured per input: -* **pass**: An array of strings that is used to filter metrics generated by the +* **namepass**: An array of strings that is used to filter metrics generated by the +current input. Each string in the array is tested as a glob match against +measurement names and if it matches, the field is emitted. +* **namedrop**: The inverse of pass, if a measurement name matches, it is not emitted. +* **fieldpass**: An array of strings that is used to filter metrics generated by the current input. Each string in the array is tested as a glob match against field names and if it matches, the field is emitted. -* **drop**: The inverse of pass, if a field name matches, it is not emitted. +* **fielddrop**: The inverse of pass, if a field name matches, it is not emitted. * **tagpass**: tag names and arrays of strings that are used to filter measurements by the current input. Each string in the array is tested as a glob match against the tag name, and if it matches the measurement is emitted. @@ -117,18 +121,32 @@ fields which begin with `time_`. path = [ "/opt", "/home*" ] ``` -#### Input Config: pass and drop +#### Input Config: fieldpass and fielddrop ```toml # Drop all metrics for guest & steal CPU usage [[inputs.cpu]] percpu = false totalcpu = true - drop = ["usage_guest", "usage_steal"] + fielddrop = ["usage_guest", "usage_steal"] # Only store inode related metrics for disks [[inputs.disk]] - pass = ["inodes*"] + fieldpass = ["inodes*"] +``` + +#### Input Config: namepass and namedrop + +```toml +# Drop all metrics about containers for kubelet +[[inputs.prometheus]] + urls = ["http://kube-node-1:4194/metrics"] + namedrop = ["container_"] + +# Only store rest client related metrics for kubelet +[[inputs.prometheus]] + urls = ["http://kube-node-1:4194/metrics"] + namepass = ["rest_client_"] ``` #### Input config: prefix, suffix, and override diff --git a/internal/config/config.go b/internal/config/config.go index 2dfe13580..45c085e88 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -483,12 +483,12 @@ func (c *Config) addInput(name string, table *ast.Table) error { func buildFilter(tbl *ast.Table) internal_models.Filter { f := internal_models.Filter{} - if node, ok := tbl.Fields["pass"]; ok { + if node, ok := tbl.Fields["namepass"]; ok { if kv, ok := node.(*ast.KeyValue); ok { if ary, ok := kv.Value.(*ast.Array); ok { for _, elem := range ary.Value { if str, ok := elem.(*ast.String); ok { - f.Pass = append(f.Pass, str.Value) + f.NamePass = append(f.NamePass, str.Value) f.IsActive = true } } @@ -496,12 +496,12 @@ func buildFilter(tbl *ast.Table) internal_models.Filter { } } - if node, ok := tbl.Fields["drop"]; ok { + if node, ok := tbl.Fields["namedrop"]; ok { if kv, ok := node.(*ast.KeyValue); ok { if ary, ok := kv.Value.(*ast.Array); ok { for _, elem := range ary.Value { if str, ok := elem.(*ast.String); ok { - f.Drop = append(f.Drop, str.Value) + f.NameDrop = append(f.NameDrop, str.Value) f.IsActive = true } } @@ -509,6 +509,38 @@ func buildFilter(tbl *ast.Table) internal_models.Filter { } } + fields := []string{"pass", "fieldpass"} + for _, field := range fields { + if node, ok := tbl.Fields[field]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + f.FieldPass = append(f.FieldPass, str.Value) + f.IsActive = true + } + } + } + } + } + } + + fields = []string{"drop", "fielddrop"} + for _, field := range fields { + if node, ok := tbl.Fields[field]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + f.FieldDrop = append(f.FieldDrop, str.Value) + f.IsActive = true + } + } + } + } + } + } + if node, ok := tbl.Fields["tagpass"]; ok { if subtbl, ok := node.(*ast.Table); ok { for name, val := range subtbl.Fields { @@ -547,6 +579,10 @@ func buildFilter(tbl *ast.Table) internal_models.Filter { } } + delete(tbl.Fields, "namedrop") + delete(tbl.Fields, "namepass") + delete(tbl.Fields, "fielddrop") + delete(tbl.Fields, "fieldpass") delete(tbl.Fields, "drop") delete(tbl.Fields, "pass") delete(tbl.Fields, "tagdrop") diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 0e9f2c967..f0add8b98 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -23,8 +23,10 @@ func TestConfig_LoadSingleInput(t *testing.T) { mConfig := &internal_models.InputConfig{ Name: "memcached", Filter: internal_models.Filter{ - Drop: []string{"other", "stuff"}, - Pass: []string{"some", "strings"}, + NameDrop: []string{"metricname2"}, + NamePass: []string{"metricname1"}, + FieldDrop: []string{"other", "stuff"}, + FieldPass: []string{"some", "strings"}, TagDrop: []internal_models.TagFilter{ internal_models.TagFilter{ Name: "badtag", @@ -66,8 +68,10 @@ func TestConfig_LoadDirectory(t *testing.T) { mConfig := &internal_models.InputConfig{ Name: "memcached", Filter: internal_models.Filter{ - Drop: []string{"other", "stuff"}, - Pass: []string{"some", "strings"}, + NameDrop: []string{"metricname2"}, + NamePass: []string{"metricname1"}, + FieldDrop: []string{"other", "stuff"}, + FieldPass: []string{"some", "strings"}, TagDrop: []internal_models.TagFilter{ internal_models.TagFilter{ Name: "badtag", diff --git a/internal/config/testdata/single_plugin.toml b/internal/config/testdata/single_plugin.toml index 6670f6b2f..664937b25 100644 --- a/internal/config/testdata/single_plugin.toml +++ b/internal/config/testdata/single_plugin.toml @@ -1,7 +1,9 @@ [[inputs.memcached]] servers = ["localhost"] - pass = ["some", "strings"] - drop = ["other", "stuff"] + namepass = ["metricname1"] + namedrop = ["metricname2"] + fieldpass = ["some", "strings"] + fielddrop = ["other", "stuff"] interval = "5s" [inputs.memcached.tagpass] goodtag = ["mytag"] diff --git a/internal/config/testdata/subconfig/memcached.conf b/internal/config/testdata/subconfig/memcached.conf index 4c43febc7..2cd07d15d 100644 --- a/internal/config/testdata/subconfig/memcached.conf +++ b/internal/config/testdata/subconfig/memcached.conf @@ -1,5 +1,7 @@ [[inputs.memcached]] servers = ["192.168.1.1"] + namepass = ["metricname1"] + namedrop = ["metricname2"] pass = ["some", "strings"] drop = ["other", "stuff"] interval = "5s" diff --git a/internal/models/filter.go b/internal/models/filter.go index 9b4f2ba90..48143d3ac 100644 --- a/internal/models/filter.go +++ b/internal/models/filter.go @@ -15,8 +15,11 @@ type TagFilter struct { // Filter containing drop/pass and tagdrop/tagpass rules type Filter struct { - Drop []string - Pass []string + NameDrop []string + NamePass []string + + FieldDrop []string + FieldPass []string TagDrop []TagFilter TagPass []TagFilter @@ -25,17 +28,17 @@ type Filter struct { } func (f Filter) ShouldMetricPass(metric telegraf.Metric) bool { - if f.ShouldPass(metric.Name()) && f.ShouldTagsPass(metric.Tags()) { + if f.ShouldFieldsPass(metric.Name()) && f.ShouldTagsPass(metric.Tags()) { return true } return false } -// ShouldPass returns true if the metric should pass, false if should drop +// ShouldFieldsPass returns true if the metric should pass, false if should drop // based on the drop/pass filter parameters -func (f Filter) ShouldPass(key string) bool { - if f.Pass != nil { - for _, pat := range f.Pass { +func (f Filter) ShouldNamePass(key string) bool { + if f.NamePass != nil { + for _, pat := range f.NamePass { // TODO remove HasPrefix check, leaving it for now for legacy support. // Cam, 2015-12-07 if strings.HasPrefix(key, pat) || internal.Glob(pat, key) { @@ -45,8 +48,36 @@ func (f Filter) ShouldPass(key string) bool { return false } - if f.Drop != nil { - for _, pat := range f.Drop { + if f.NameDrop != nil { + for _, pat := range f.NameDrop { + // TODO remove HasPrefix check, leaving it for now for legacy support. + // Cam, 2015-12-07 + if strings.HasPrefix(key, pat) || internal.Glob(pat, key) { + return false + } + } + + return true + } + return true +} + +// ShouldFieldsPass returns true if the metric should pass, false if should drop +// based on the drop/pass filter parameters +func (f Filter) ShouldFieldsPass(key string) bool { + if f.FieldPass != nil { + for _, pat := range f.FieldPass { + // TODO remove HasPrefix check, leaving it for now for legacy support. + // Cam, 2015-12-07 + if strings.HasPrefix(key, pat) || internal.Glob(pat, key) { + return true + } + } + return false + } + + if f.FieldDrop != nil { + for _, pat := range f.FieldDrop { // TODO remove HasPrefix check, leaving it for now for legacy support. // Cam, 2015-12-07 if strings.HasPrefix(key, pat) || internal.Glob(pat, key) { diff --git a/internal/models/filter_test.go b/internal/models/filter_test.go index 320c38407..c69398494 100644 --- a/internal/models/filter_test.go +++ b/internal/models/filter_test.go @@ -18,15 +18,15 @@ func TestFilter_Empty(t *testing.T) { } for _, measurement := range measurements { - if !f.ShouldPass(measurement) { + if !f.ShouldFieldsPass(measurement) { t.Errorf("Expected measurement %s to pass", measurement) } } } -func TestFilter_Pass(t *testing.T) { +func TestFilter_NamePass(t *testing.T) { f := Filter{ - Pass: []string{"foo*", "cpu_usage_idle"}, + NamePass: []string{"foo*", "cpu_usage_idle"}, } passes := []string{ @@ -45,21 +45,21 @@ func TestFilter_Pass(t *testing.T) { } for _, measurement := range passes { - if !f.ShouldPass(measurement) { + if !f.ShouldNamePass(measurement) { t.Errorf("Expected measurement %s to pass", measurement) } } for _, measurement := range drops { - if f.ShouldPass(measurement) { + if f.ShouldNamePass(measurement) { t.Errorf("Expected measurement %s to drop", measurement) } } } -func TestFilter_Drop(t *testing.T) { +func TestFilter_NameDrop(t *testing.T) { f := Filter{ - Drop: []string{"foo*", "cpu_usage_idle"}, + NameDrop: []string{"foo*", "cpu_usage_idle"}, } drops := []string{ @@ -78,13 +78,79 @@ func TestFilter_Drop(t *testing.T) { } for _, measurement := range passes { - if !f.ShouldPass(measurement) { + if !f.ShouldNamePass(measurement) { t.Errorf("Expected measurement %s to pass", measurement) } } for _, measurement := range drops { - if f.ShouldPass(measurement) { + if f.ShouldNamePass(measurement) { + t.Errorf("Expected measurement %s to drop", measurement) + } + } +} + +func TestFilter_FieldPass(t *testing.T) { + f := Filter{ + FieldPass: []string{"foo*", "cpu_usage_idle"}, + } + + passes := []string{ + "foo", + "foo_bar", + "foo.bar", + "foo-bar", + "cpu_usage_idle", + } + + drops := []string{ + "bar", + "barfoo", + "bar_foo", + "cpu_usage_busy", + } + + for _, measurement := range passes { + if !f.ShouldFieldsPass(measurement) { + t.Errorf("Expected measurement %s to pass", measurement) + } + } + + for _, measurement := range drops { + if f.ShouldFieldsPass(measurement) { + t.Errorf("Expected measurement %s to drop", measurement) + } + } +} + +func TestFilter_FieldDrop(t *testing.T) { + f := Filter{ + FieldDrop: []string{"foo*", "cpu_usage_idle"}, + } + + drops := []string{ + "foo", + "foo_bar", + "foo.bar", + "foo-bar", + "cpu_usage_idle", + } + + passes := []string{ + "bar", + "barfoo", + "bar_foo", + "cpu_usage_busy", + } + + for _, measurement := range passes { + if !f.ShouldFieldsPass(measurement) { + t.Errorf("Expected measurement %s to pass", measurement) + } + } + + for _, measurement := range drops { + if f.ShouldFieldsPass(measurement) { t.Errorf("Expected measurement %s to drop", measurement) } } From 5aef725c13b6c6ccfa52e528ecb37f83777d18df Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 22 Feb 2016 13:35:06 -0700 Subject: [PATCH 063/287] Change pass/drop to namepass/namedrop for outputs closes #730 --- docs/CONFIGURATION.md | 6 +++--- etc/telegraf.conf | 2 +- internal/config/config.go | 10 +++++++++- internal/models/filter.go | 2 +- plugins/inputs/system/cpu.go | 2 +- 5 files changed, 15 insertions(+), 7 deletions(-) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index a35c98efc..58dbdf261 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -209,7 +209,7 @@ configuring each output sink is different, but examples can be found by running `telegraf -sample-config`. Outputs also support the same configurable options as inputs -(pass, drop, tagpass, tagdrop) +(namepass, namedrop, tagpass, tagdrop) ```toml [[outputs.influxdb]] @@ -217,14 +217,14 @@ Outputs also support the same configurable options as inputs database = "telegraf" precision = "s" # Drop all measurements that start with "aerospike" - drop = ["aerospike*"] + namedrop = ["aerospike*"] [[outputs.influxdb]] urls = [ "http://localhost:8086" ] database = "telegraf-aerospike-data" precision = "s" # Only accept aerospike data: - pass = ["aerospike*"] + namepass = ["aerospike*"] [[outputs.influxdb]] urls = [ "http://localhost:8086" ] diff --git a/etc/telegraf.conf b/etc/telegraf.conf index eaf66db96..f626eabc8 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -89,7 +89,7 @@ # Whether to report total system cpu stats or not totalcpu = true # Comment this line if you want the raw CPU time metrics - drop = ["time_*"] + fielddrop = ["time_*"] # Read metrics about disk usage by mount point [[inputs.disk]] diff --git a/internal/config/config.go b/internal/config/config.go index 45c085e88..09f878f50 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -477,7 +477,8 @@ func (c *Config) addInput(name string, table *ast.Table) error { return nil } -// buildFilter builds a Filter (tagpass/tagdrop/pass/drop) to +// buildFilter builds a Filter +// (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to // be inserted into the internal_models.OutputConfig/internal_models.InputConfig to be used for prefix // filtering on tags and measurements func buildFilter(tbl *ast.Table) internal_models.Filter { @@ -752,5 +753,12 @@ func buildOutput(name string, tbl *ast.Table) (*internal_models.OutputConfig, er Name: name, Filter: buildFilter(tbl), } + // Outputs don't support FieldDrop/FieldPass, so set to NameDrop/NamePass + if len(oc.Filter.FieldDrop) > 0 { + oc.Filter.NameDrop = oc.Filter.FieldDrop + } + if len(oc.Filter.FieldPass) > 0 { + oc.Filter.NamePass = oc.Filter.FieldPass + } return oc, nil } diff --git a/internal/models/filter.go b/internal/models/filter.go index 48143d3ac..e2b1377f4 100644 --- a/internal/models/filter.go +++ b/internal/models/filter.go @@ -28,7 +28,7 @@ type Filter struct { } func (f Filter) ShouldMetricPass(metric telegraf.Metric) bool { - if f.ShouldFieldsPass(metric.Name()) && f.ShouldTagsPass(metric.Tags()) { + if f.ShouldNamePass(metric.Name()) && f.ShouldTagsPass(metric.Tags()) { return true } return false diff --git a/plugins/inputs/system/cpu.go b/plugins/inputs/system/cpu.go index 333339458..035b8e1f5 100644 --- a/plugins/inputs/system/cpu.go +++ b/plugins/inputs/system/cpu.go @@ -33,7 +33,7 @@ var sampleConfig = ` ## Whether to report total system cpu stats or not totalcpu = true ## Comment this line if you want the raw CPU time metrics - drop = ["time_*"] + fielddrop = ["time_*"] ` func (_ *CPUStats) SampleConfig() string { From ed684be18d919d2fdced0d984f43a3c0c20560fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20DEHAY?= Date: Fri, 19 Feb 2016 19:25:17 +0100 Subject: [PATCH 064/287] Adding pgrep user support --- plugins/inputs/procstat/procstat.go | 33 +++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index d3f18d5ea..c1747b1e9 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -19,6 +19,7 @@ type Procstat struct { Exe string Pattern string Prefix string + User string pidmap map[int32]*process.Process } @@ -37,6 +38,8 @@ var sampleConfig = ` # exe = "nginx" ## pattern as argument for pgrep (ie, pgrep -f ) # pattern = "nginx" + ## user as argument for pgrep (ie, pgrep -u ) + # user = "nginx" ## Field name prefix prefix = "" @@ -53,8 +56,8 @@ func (_ *Procstat) Description() string { func (p *Procstat) Gather(acc telegraf.Accumulator) error { err := p.createProcesses() if err != nil { - log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] %s", - p.Exe, p.PidFile, p.Pattern, err.Error()) + log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s", + p.Exe, p.PidFile, p.Pattern, p.User, err.Error()) } else { for _, proc := range p.pidmap { p := NewSpecProcessor(p.Prefix, acc, proc) @@ -103,6 +106,8 @@ func (p *Procstat) getAllPids() ([]int32, error) { pids, err = pidsFromExe(p.Exe) } else if p.Pattern != "" { pids, err = pidsFromPattern(p.Pattern) + } else if p.User != "" { + pids, err = pidsFromUser(p.User) } else { err = fmt.Errorf("Either exe, pid_file or pattern has to be specified") } @@ -175,6 +180,30 @@ func pidsFromPattern(pattern string) ([]int32, error) { return out, outerr } +func pidsFromUser(user string) ([]int32, error) { + var out []int32 + var outerr error + bin, err := exec.LookPath("pgrep") + if err != nil { + return out, fmt.Errorf("Couldn't find pgrep binary: %s", err) + } + pgrep, err := exec.Command(bin, "-u", user).Output() + if err != nil { + return out, fmt.Errorf("Failed to execute %s. Error: '%s'", bin, err) + } else { + pids := strings.Fields(string(pgrep)) + for _, pid := range pids { + ipid, err := strconv.Atoi(pid) + if err == nil { + out = append(out, int32(ipid)) + } else { + outerr = err + } + } + } + return out, outerr +} + func init() { inputs.Add("procstat", func() telegraf.Input { return NewProcstat() From 9687f71a176a7b9b549f223b5f160ce14d90f7f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20DEHAY?= Date: Fri, 19 Feb 2016 19:27:47 +0100 Subject: [PATCH 065/287] README updated for pgrep user support closes #724 --- CHANGELOG.md | 1 + plugins/inputs/procstat/README.md | 3 ++- plugins/inputs/procstat/procstat.go | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b85e200a6..4524b9817 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ### Features - [#727](https://github.com/influxdata/telegraf/pull/727): riak input, thanks @jcoene! - [#694](https://github.com/influxdata/telegraf/pull/694): DNS Query input, thanks @mjasion! +- [#724](https://github.com/influxdata/telegraf/pull/724): username matching for procstat input, thanks @zorel! ### Bugfixes - [#701](https://github.com/influxdata/telegraf/pull/701): output write count shouldnt print in quiet mode. diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md index 0c37af509..90552c2a6 100644 --- a/plugins/inputs/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -7,7 +7,8 @@ individual process using their /proc data. The plugin will tag processes by their PID and their process name. -Processes can be specified either by pid file or by executable name. Procstat +Processes can be specified either by pid file, by executable name, by command +line pattern matching, or by username (in this order or priority. Procstat plugin will use `pgrep` when executable name is provided to obtain the pid. Proctstas plugin will transmit IO, memory, cpu, file descriptor related measurements for every process specified. A prefix can be set to isolate diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index c1747b1e9..e5ae207fe 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -56,7 +56,7 @@ func (_ *Procstat) Description() string { func (p *Procstat) Gather(acc telegraf.Accumulator) error { err := p.createProcesses() if err != nil { - log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s", + log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s", p.Exe, p.PidFile, p.Pattern, p.User, err.Error()) } else { for _, proc := range p.pidmap { From 47ad73cc891dc6c9ae2cc4a53a7ebf5881e7229b Mon Sep 17 00:00:00 2001 From: Pierre Fersing Date: Mon, 22 Feb 2016 18:29:10 +0100 Subject: [PATCH 066/287] Ignore boring filesystems from disk plugin Modern Linux has a lots of boring filesystem (tmpfs on /dev, devpts on /dev/pts, lots of cgroup on /sys/fs/cgroup/*, ...). * Ignore filesystem with 0 bytes (this cover cgroup, devpts and other). * Add IgnoreFS to ignore additional FS by their type. Add tmpfs and devtmpfs as default ignored type. --- etc/telegraf.conf | 4 ++++ plugins/inputs/system/disk.go | 11 ++++++++++- plugins/inputs/system/disk_test.go | 6 +++--- plugins/inputs/system/mock_PS.go | 4 ++-- plugins/inputs/system/ps.go | 19 +++++++++++++++---- 5 files changed, 34 insertions(+), 10 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index f626eabc8..d8a295442 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -97,6 +97,10 @@ # Setting mountpoints will restrict the stats to the specified mountpoints. # mount_points=["/"] + # Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually + # present on /run, /var/run, /dev/shm or /dev). + ignore_fs = ["tmpfs", "devtmpfs"] + # Read metrics about disk IO by device [[inputs.diskio]] # By default, telegraf will gather stats for all devices including diff --git a/plugins/inputs/system/disk.go b/plugins/inputs/system/disk.go index 0488c839a..f93558de2 100644 --- a/plugins/inputs/system/disk.go +++ b/plugins/inputs/system/disk.go @@ -14,6 +14,7 @@ type DiskStats struct { Mountpoints []string MountPoints []string + IgnoreFS []string `toml:"ignore_fs"` } func (_ *DiskStats) Description() string { @@ -24,6 +25,10 @@ var diskSampleConfig = ` ## By default, telegraf gather stats for all mountpoints. ## Setting mountpoints will restrict the stats to the specified mountpoints. # mount_points = ["/"] + + # Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually + # present on /run, /var/run, /dev/shm or /dev). + ignore_fs = ["tmpfs", "devtmpfs"] ` func (_ *DiskStats) SampleConfig() string { @@ -36,12 +41,16 @@ func (s *DiskStats) Gather(acc telegraf.Accumulator) error { s.MountPoints = s.Mountpoints } - disks, err := s.ps.DiskUsage(s.MountPoints) + disks, err := s.ps.DiskUsage(s.MountPoints, s.IgnoreFS) if err != nil { return fmt.Errorf("error getting disk usage info: %s", err) } for _, du := range disks { + if du.Total == 0 { + // Skip dummy filesystem (procfs, cgroupfs, ...) + continue + } tags := map[string]string{ "path": du.Path, "fstype": du.Fstype, diff --git a/plugins/inputs/system/disk_test.go b/plugins/inputs/system/disk_test.go index 86537be23..0a722148b 100644 --- a/plugins/inputs/system/disk_test.go +++ b/plugins/inputs/system/disk_test.go @@ -50,9 +50,9 @@ func TestDiskStats(t *testing.T) { }, } - mps.On("DiskUsage", []string(nil)).Return(duAll, nil) - mps.On("DiskUsage", []string{"/", "/dev"}).Return(duFiltered, nil) - mps.On("DiskUsage", []string{"/", "/home"}).Return(duAll, nil) + mps.On("DiskUsage", []string(nil), []string(nil)).Return(duAll, nil) + mps.On("DiskUsage", []string{"/", "/dev"}, []string(nil)).Return(duFiltered, nil) + mps.On("DiskUsage", []string{"/", "/home"}, []string(nil)).Return(duAll, nil) err = (&DiskStats{ps: &mps}).Gather(&acc) require.NoError(t, err) diff --git a/plugins/inputs/system/mock_PS.go b/plugins/inputs/system/mock_PS.go index 6e9a5f93e..fd6afda0f 100644 --- a/plugins/inputs/system/mock_PS.go +++ b/plugins/inputs/system/mock_PS.go @@ -33,8 +33,8 @@ func (m *MockPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) { return r0, r1 } -func (m *MockPS) DiskUsage(mountPointFilter []string) ([]*disk.DiskUsageStat, error) { - ret := m.Called(mountPointFilter) +func (m *MockPS) DiskUsage(mountPointFilter []string, fstypeExclude []string) ([]*disk.DiskUsageStat, error) { + ret := m.Called(mountPointFilter, fstypeExclude) r0 := ret.Get(0).([]*disk.DiskUsageStat) r1 := ret.Error(1) diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go index 0a505bfc4..f1a1b27d7 100644 --- a/plugins/inputs/system/ps.go +++ b/plugins/inputs/system/ps.go @@ -14,7 +14,7 @@ import ( type PS interface { CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) - DiskUsage(mountPointFilter []string) ([]*disk.DiskUsageStat, error) + DiskUsage(mountPointFilter []string, fstypeExclude []string) ([]*disk.DiskUsageStat, error) NetIO() ([]net.NetIOCountersStat, error) NetProto() ([]net.NetProtoCountersStat, error) DiskIO() (map[string]disk.DiskIOCountersStat, error) @@ -53,6 +53,7 @@ func (s *systemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) { func (s *systemPS) DiskUsage( mountPointFilter []string, + fstypeExclude []string, ) ([]*disk.DiskUsageStat, error) { parts, err := disk.DiskPartitions(true) if err != nil { @@ -60,9 +61,13 @@ func (s *systemPS) DiskUsage( } // Make a "set" out of the filter slice - filterSet := make(map[string]bool) + mountPointFilterSet := make(map[string]bool) for _, filter := range mountPointFilter { - filterSet[filter] = true + mountPointFilterSet[filter] = true + } + fstypeExcludeSet := make(map[string]bool) + for _, filter := range fstypeExclude { + fstypeExcludeSet[filter] = true } var usage []*disk.DiskUsageStat @@ -71,7 +76,7 @@ func (s *systemPS) DiskUsage( if len(mountPointFilter) > 0 { // If the mount point is not a member of the filter set, // don't gather info on it. - _, ok := filterSet[p.Mountpoint] + _, ok := mountPointFilterSet[p.Mountpoint] if !ok { continue } @@ -81,6 +86,12 @@ func (s *systemPS) DiskUsage( if err != nil { return nil, err } + // If the mount point is a member of the exclude set, + // don't gather info on it. + _, ok := fstypeExcludeSet[p.Fstype] + if ok { + continue + } du.Fstype = p.Fstype usage = append(usage, du) } From 2a6ff168195b764e36f9c07084c98db8d9ea62c3 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 22 Feb 2016 14:43:00 -0700 Subject: [PATCH 067/287] Fix up config panic points for naoina/toml support closes #736 --- CHANGELOG.md | 1 + internal/config/config.go | 21 +++++++++------------ plugins/inputs/system/disk.go | 12 ++++++------ 3 files changed, 16 insertions(+), 18 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4524b9817..0f5b72e28 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ - [#727](https://github.com/influxdata/telegraf/pull/727): riak input, thanks @jcoene! - [#694](https://github.com/influxdata/telegraf/pull/694): DNS Query input, thanks @mjasion! - [#724](https://github.com/influxdata/telegraf/pull/724): username matching for procstat input, thanks @zorel! +- [#736](https://github.com/influxdata/telegraf/pull/736): Ignore dummy filesystems from disk plugin. Thanks @PierreF! ### Bugfixes - [#701](https://github.com/influxdata/telegraf/pull/701): output write count shouldnt print in quiet mode. diff --git a/internal/config/config.go b/internal/config/config.go index 09f878f50..b5b73e06e 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -185,25 +185,22 @@ var header = `# Telegraf Configuration hostname = "" -############################################################################### -# OUTPUTS # -############################################################################### +# +# OUTPUTS: +# ` var pluginHeader = ` - -############################################################################### -# INPUTS # -############################################################################### - +# +# INPUTS: +# ` var serviceInputHeader = ` - -############################################################################### -# SERVICE INPUTS # -############################################################################### +# +# SERVICE INPUTS: +# ` // PrintSampleConfig prints the sample config diff --git a/plugins/inputs/system/disk.go b/plugins/inputs/system/disk.go index f93558de2..5784a7322 100644 --- a/plugins/inputs/system/disk.go +++ b/plugins/inputs/system/disk.go @@ -26,8 +26,8 @@ var diskSampleConfig = ` ## Setting mountpoints will restrict the stats to the specified mountpoints. # mount_points = ["/"] - # Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually - # present on /run, /var/run, /dev/shm or /dev). + ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually + ## present on /run, /var/run, /dev/shm or /dev). ignore_fs = ["tmpfs", "devtmpfs"] ` @@ -88,11 +88,11 @@ func (_ *DiskIOStats) Description() string { } var diskIoSampleConfig = ` - # By default, telegraf will gather stats for all devices including - # disk partitions. - # Setting devices will restrict the stats to the specified devices. + ## By default, telegraf will gather stats for all devices including + ## disk partitions. + ## Setting devices will restrict the stats to the specified devices. # devices = ["sda", "sdb"] - # Uncomment the following line if you do not need disk serial numbers. + ## Uncomment the following line if you do not need disk serial numbers. # skip_serial_number = true ` From 8362aa9d66dc49e2e88b3bb4abf140e549bc07a5 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 22 Feb 2016 15:12:35 -0700 Subject: [PATCH 068/287] Some windows build script fixes --- scripts/build.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/build.py b/scripts/build.py index b25b44982..9aca120cd 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -70,7 +70,7 @@ targets = { supported_builds = { 'darwin': [ "amd64", "i386" ], - 'windows': [ "amd64", "i386", "arm" ], + 'windows': [ "amd64", "i386" ], 'linux': [ "amd64", "i386", "arm" ] } supported_packages = { @@ -287,6 +287,8 @@ def build(version=None, print("Starting build...") for b, c in targets.items(): + if platform == 'windows': + b = b + '.exe' print("\t- Building '{}'...".format(os.path.join(outdir, b))) build_command = "" build_command += "GOOS={} GOARCH={} ".format(platform, arch) From 54ee44839c0ae76f4998a7a61f9388cd32242a79 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 22 Feb 2016 16:59:45 -0700 Subject: [PATCH 069/287] Put arm deb and rpm downloads on readme --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 9977588fa..9f3716789 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,10 @@ Latest: * http://get.influxdb.org/telegraf/telegraf_0.10.3-1_amd64.deb * http://get.influxdb.org/telegraf/telegraf-0.10.3-1.x86_64.rpm +Latest (arm): +* http://get.influxdb.org/telegraf/telegraf_0.10.3-1_arm.deb +* http://get.influxdb.org/telegraf/telegraf-0.10.3-1.arm.rpm + 0.2.x: * http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb * http://get.influxdb.org/telegraf/telegraf-0.2.4-1.x86_64.rpm From 918c3fb2602de24613d246273ce975d41ff11b06 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 23 Feb 2016 09:34:01 -0700 Subject: [PATCH 070/287] httpjson test real response from issue #729 --- plugins/inputs/httpjson/httpjson.go | 3 +- plugins/inputs/httpjson/httpjson_test.go | 138 +++++++++++++++++++++++ testutil/accumulator.go | 16 +-- 3 files changed, 142 insertions(+), 15 deletions(-) diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index d5dddd7d4..fb19a9c1d 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -23,7 +23,8 @@ type HttpJson struct { TagKeys []string Parameters map[string]string Headers map[string]string - client HTTPClient + + client HTTPClient } type HTTPClient interface { diff --git a/plugins/inputs/httpjson/httpjson_test.go b/plugins/inputs/httpjson/httpjson_test.go index f5f81c7c3..972ffb83c 100644 --- a/plugins/inputs/httpjson/httpjson_test.go +++ b/plugins/inputs/httpjson/httpjson_test.go @@ -1,8 +1,10 @@ package httpjson import ( + "fmt" "io/ioutil" "net/http" + "net/http/httptest" "strings" "testing" @@ -27,6 +29,75 @@ const validJSON = ` "another_list": [4] }` +const validJSON2 = `{ + "user":{ + "hash_rate":0, + "expected_24h_rewards":0, + "total_rewards":0.000595109232, + "paid_rewards":0, + "unpaid_rewards":0.000595109232, + "past_24h_rewards":0, + "total_work":"5172625408", + "blocks_found":0 + }, + "workers":{ + "brminer.1":{ + "hash_rate":0, + "hash_rate_24h":0, + "valid_shares":"6176", + "stale_shares":"0", + "invalid_shares":"0", + "rewards":4.5506464e-5, + "rewards_24h":0, + "reset_time":1455409950 + }, + "brminer.2":{ + "hash_rate":0, + "hash_rate_24h":0, + "valid_shares":"0", + "stale_shares":"0", + "invalid_shares":"0", + "rewards":0, + "rewards_24h":0, + "reset_time":1455936726 + }, + "brminer.3":{ + "hash_rate":0, + "hash_rate_24h":0, + "valid_shares":"0", + "stale_shares":"0", + "invalid_shares":"0", + "rewards":0, + "rewards_24h":0, + "reset_time":1455936733 + } + }, + "pool":{ + "hash_rate":114100000, + "active_users":843, + "total_work":"5015346808842682368", + "pps_ratio":1.04, + "pps_rate":7.655e-9 + }, + "network":{ + "hash_rate":1426117703, + "block_number":944895, + "time_per_block":156, + "difficulty":51825.72835216, + "next_difficulty":51916.15249019, + "retarget_time":95053 + }, + "market":{ + "ltc_btc":0.00798, + "ltc_usd":3.37801, + "ltc_eur":3.113, + "ltc_gbp":2.32807, + "ltc_rub":241.796, + "ltc_cny":21.3883, + "btc_usd":422.852 + } +}` + const validJSONTags = ` { "value": 15, @@ -149,6 +220,73 @@ func TestHttpJson200(t *testing.T) { } } +// Test litecoin sample output +func TestHttpJsonLiteCoin(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintln(w, validJSON2) + })) + defer ts.Close() + + a := HttpJson{ + Servers: []string{ts.URL}, + Name: "", + Method: "GET", + client: RealHTTPClient{client: &http.Client{}}, + } + + var acc testutil.Accumulator + err := a.Gather(&acc) + require.NoError(t, err) + + // remove response_time from gathered fields because it's non-deterministic + delete(acc.Metrics[0].Fields, "response_time") + + fields := map[string]interface{}{ + "market_btc_usd": float64(422.852), + "market_ltc_btc": float64(0.00798), + "market_ltc_cny": float64(21.3883), + "market_ltc_eur": float64(3.113), + "market_ltc_gbp": float64(2.32807), + "market_ltc_rub": float64(241.796), + "market_ltc_usd": float64(3.37801), + "network_block_number": float64(944895), + "network_difficulty": float64(51825.72835216), + "network_hash_rate": float64(1.426117703e+09), + "network_next_difficulty": float64(51916.15249019), + "network_retarget_time": float64(95053), + "network_time_per_block": float64(156), + "pool_active_users": float64(843), + "pool_hash_rate": float64(1.141e+08), + "pool_pps_rate": float64(7.655e-09), + "pool_pps_ratio": float64(1.04), + "user_blocks_found": float64(0), + "user_expected_24h_rewards": float64(0), + "user_hash_rate": float64(0), + "user_paid_rewards": float64(0), + "user_past_24h_rewards": float64(0), + "user_total_rewards": float64(0.000595109232), + "user_unpaid_rewards": float64(0.000595109232), + "workers_brminer.1_hash_rate": float64(0), + "workers_brminer.1_hash_rate_24h": float64(0), + "workers_brminer.1_reset_time": float64(1.45540995e+09), + "workers_brminer.1_rewards": float64(4.5506464e-05), + "workers_brminer.1_rewards_24h": float64(0), + "workers_brminer.2_hash_rate": float64(0), + "workers_brminer.2_hash_rate_24h": float64(0), + "workers_brminer.2_reset_time": float64(1.455936726e+09), + "workers_brminer.2_rewards": float64(0), + "workers_brminer.2_rewards_24h": float64(0), + "workers_brminer.3_hash_rate": float64(0), + "workers_brminer.3_hash_rate_24h": float64(0), + "workers_brminer.3_reset_time": float64(1.455936733e+09), + "workers_brminer.3_rewards": float64(0), + "workers_brminer.3_rewards_24h": float64(0), + } + + acc.AssertContainsFields(t, "httpjson", fields) +} + // Test response to HTTP 500 func TestHttpJson500(t *testing.T) { httpjson := genMockHttpJson(validJSON, 500) diff --git a/testutil/accumulator.go b/testutil/accumulator.go index cb56d8d28..9b6fb2373 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -133,13 +133,7 @@ func (a *Accumulator) AssertContainsTaggedFields( } if p.Measurement == measurement { - if !reflect.DeepEqual(fields, p.Fields) { - pActual, _ := json.MarshalIndent(p.Fields, "", " ") - pExp, _ := json.MarshalIndent(fields, "", " ") - msg := fmt.Sprintf("Actual:\n%s\n(%T) \nExpected:\n%s\n(%T)", - string(pActual), p.Fields, string(pExp), fields) - assert.Fail(t, msg) - } + assert.Equal(t, fields, p.Fields) return } } @@ -156,13 +150,7 @@ func (a *Accumulator) AssertContainsFields( defer a.Unlock() for _, p := range a.Metrics { if p.Measurement == measurement { - if !reflect.DeepEqual(fields, p.Fields) { - pActual, _ := json.MarshalIndent(p.Fields, "", " ") - pExp, _ := json.MarshalIndent(fields, "", " ") - msg := fmt.Sprintf("Actual:\n%s\n(%T) \nExpected:\n%s\n(%T)", - string(pActual), p.Fields, string(pExp), fields) - assert.Fail(t, msg) - } + assert.Equal(t, fields, p.Fields) return } } From 69e4f16b1375ca527a1b67c00cafe4aaea1d27e1 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 23 Feb 2016 10:07:56 -0700 Subject: [PATCH 071/287] Fix bad http GET parameter encoding, add unit test --- plugins/inputs/httpjson/httpjson.go | 2 +- plugins/inputs/httpjson/httpjson_test.go | 16 ++++++++++++---- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index fb19a9c1d..727fb4344 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -188,10 +188,10 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) { switch { case h.Method == "GET": - requestURL.RawQuery = params.Encode() for k, v := range h.Parameters { params.Add(k, v) } + requestURL.RawQuery = params.Encode() case h.Method == "POST": requestURL.RawQuery = "" diff --git a/plugins/inputs/httpjson/httpjson_test.go b/plugins/inputs/httpjson/httpjson_test.go index 972ffb83c..6a98bbad3 100644 --- a/plugins/inputs/httpjson/httpjson_test.go +++ b/plugins/inputs/httpjson/httpjson_test.go @@ -222,17 +222,25 @@ func TestHttpJson200(t *testing.T) { // Test litecoin sample output func TestHttpJsonLiteCoin(t *testing.T) { + params := map[string]string{ + "api_key": "mykey", + } ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + err := r.ParseForm() + assert.NoError(t, err) + key := r.Form.Get("api_key") + assert.Equal(t, "mykey", key) w.WriteHeader(http.StatusOK) fmt.Fprintln(w, validJSON2) })) defer ts.Close() a := HttpJson{ - Servers: []string{ts.URL}, - Name: "", - Method: "GET", - client: RealHTTPClient{client: &http.Client{}}, + Servers: []string{ts.URL}, + Name: "", + Method: "GET", + Parameters: params, + client: RealHTTPClient{client: &http.Client{}}, } var acc testutil.Accumulator From 3e8f96a463c74b98a4848fa10ee6743bb7a13aa0 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 23 Feb 2016 10:54:28 -0700 Subject: [PATCH 072/287] httpjson: add unit test to verify that POST params get passed --- plugins/inputs/httpjson/httpjson.go | 7 +- plugins/inputs/httpjson/httpjson_test.go | 151 ++++++++++++++++++++++- 2 files changed, 149 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index 727fb4344..c055f66de 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -1,7 +1,6 @@ package httpjson import ( - "bytes" "errors" "fmt" "io/ioutil" @@ -183,11 +182,10 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) { return "", -1, fmt.Errorf("Invalid server URL \"%s\"", serverURL) } - params := url.Values{} data := url.Values{} - switch { case h.Method == "GET": + params := requestURL.Query() for k, v := range h.Parameters { params.Add(k, v) } @@ -201,7 +199,8 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) { } // Create + send request - req, err := http.NewRequest(h.Method, requestURL.String(), bytes.NewBufferString(data.Encode())) + req, err := http.NewRequest(h.Method, requestURL.String(), + strings.NewReader(data.Encode())) if err != nil { return "", -1, err } diff --git a/plugins/inputs/httpjson/httpjson_test.go b/plugins/inputs/httpjson/httpjson_test.go index 6a98bbad3..b6b57a167 100644 --- a/plugins/inputs/httpjson/httpjson_test.go +++ b/plugins/inputs/httpjson/httpjson_test.go @@ -220,15 +220,82 @@ func TestHttpJson200(t *testing.T) { } } -// Test litecoin sample output -func TestHttpJsonLiteCoin(t *testing.T) { +// Test that GET Parameters from the url string are applied properly +func TestHttpJsonGET_URL(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + key := r.FormValue("api_key") + assert.Equal(t, "mykey", key) + w.WriteHeader(http.StatusOK) + fmt.Fprintln(w, validJSON2) + })) + defer ts.Close() + + a := HttpJson{ + Servers: []string{ts.URL + "?api_key=mykey"}, + Name: "", + Method: "GET", + client: RealHTTPClient{client: &http.Client{}}, + } + + var acc testutil.Accumulator + err := a.Gather(&acc) + require.NoError(t, err) + + // remove response_time from gathered fields because it's non-deterministic + delete(acc.Metrics[0].Fields, "response_time") + + fields := map[string]interface{}{ + "market_btc_usd": float64(422.852), + "market_ltc_btc": float64(0.00798), + "market_ltc_cny": float64(21.3883), + "market_ltc_eur": float64(3.113), + "market_ltc_gbp": float64(2.32807), + "market_ltc_rub": float64(241.796), + "market_ltc_usd": float64(3.37801), + "network_block_number": float64(944895), + "network_difficulty": float64(51825.72835216), + "network_hash_rate": float64(1.426117703e+09), + "network_next_difficulty": float64(51916.15249019), + "network_retarget_time": float64(95053), + "network_time_per_block": float64(156), + "pool_active_users": float64(843), + "pool_hash_rate": float64(1.141e+08), + "pool_pps_rate": float64(7.655e-09), + "pool_pps_ratio": float64(1.04), + "user_blocks_found": float64(0), + "user_expected_24h_rewards": float64(0), + "user_hash_rate": float64(0), + "user_paid_rewards": float64(0), + "user_past_24h_rewards": float64(0), + "user_total_rewards": float64(0.000595109232), + "user_unpaid_rewards": float64(0.000595109232), + "workers_brminer.1_hash_rate": float64(0), + "workers_brminer.1_hash_rate_24h": float64(0), + "workers_brminer.1_reset_time": float64(1.45540995e+09), + "workers_brminer.1_rewards": float64(4.5506464e-05), + "workers_brminer.1_rewards_24h": float64(0), + "workers_brminer.2_hash_rate": float64(0), + "workers_brminer.2_hash_rate_24h": float64(0), + "workers_brminer.2_reset_time": float64(1.455936726e+09), + "workers_brminer.2_rewards": float64(0), + "workers_brminer.2_rewards_24h": float64(0), + "workers_brminer.3_hash_rate": float64(0), + "workers_brminer.3_hash_rate_24h": float64(0), + "workers_brminer.3_reset_time": float64(1.455936733e+09), + "workers_brminer.3_rewards": float64(0), + "workers_brminer.3_rewards_24h": float64(0), + } + + acc.AssertContainsFields(t, "httpjson", fields) +} + +// Test that GET Parameters are applied properly +func TestHttpJsonGET(t *testing.T) { params := map[string]string{ "api_key": "mykey", } ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - err := r.ParseForm() - assert.NoError(t, err) - key := r.Form.Get("api_key") + key := r.FormValue("api_key") assert.Equal(t, "mykey", key) w.WriteHeader(http.StatusOK) fmt.Fprintln(w, validJSON2) @@ -295,6 +362,80 @@ func TestHttpJsonLiteCoin(t *testing.T) { acc.AssertContainsFields(t, "httpjson", fields) } +// Test that POST Parameters are applied properly +func TestHttpJsonPOST(t *testing.T) { + params := map[string]string{ + "api_key": "mykey", + } + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, err := ioutil.ReadAll(r.Body) + assert.NoError(t, err) + assert.Equal(t, "api_key=mykey", string(body)) + w.WriteHeader(http.StatusOK) + fmt.Fprintln(w, validJSON2) + })) + defer ts.Close() + + a := HttpJson{ + Servers: []string{ts.URL}, + Name: "", + Method: "POST", + Parameters: params, + client: RealHTTPClient{client: &http.Client{}}, + } + + var acc testutil.Accumulator + err := a.Gather(&acc) + require.NoError(t, err) + + // remove response_time from gathered fields because it's non-deterministic + delete(acc.Metrics[0].Fields, "response_time") + + fields := map[string]interface{}{ + "market_btc_usd": float64(422.852), + "market_ltc_btc": float64(0.00798), + "market_ltc_cny": float64(21.3883), + "market_ltc_eur": float64(3.113), + "market_ltc_gbp": float64(2.32807), + "market_ltc_rub": float64(241.796), + "market_ltc_usd": float64(3.37801), + "network_block_number": float64(944895), + "network_difficulty": float64(51825.72835216), + "network_hash_rate": float64(1.426117703e+09), + "network_next_difficulty": float64(51916.15249019), + "network_retarget_time": float64(95053), + "network_time_per_block": float64(156), + "pool_active_users": float64(843), + "pool_hash_rate": float64(1.141e+08), + "pool_pps_rate": float64(7.655e-09), + "pool_pps_ratio": float64(1.04), + "user_blocks_found": float64(0), + "user_expected_24h_rewards": float64(0), + "user_hash_rate": float64(0), + "user_paid_rewards": float64(0), + "user_past_24h_rewards": float64(0), + "user_total_rewards": float64(0.000595109232), + "user_unpaid_rewards": float64(0.000595109232), + "workers_brminer.1_hash_rate": float64(0), + "workers_brminer.1_hash_rate_24h": float64(0), + "workers_brminer.1_reset_time": float64(1.45540995e+09), + "workers_brminer.1_rewards": float64(4.5506464e-05), + "workers_brminer.1_rewards_24h": float64(0), + "workers_brminer.2_hash_rate": float64(0), + "workers_brminer.2_hash_rate_24h": float64(0), + "workers_brminer.2_reset_time": float64(1.455936726e+09), + "workers_brminer.2_rewards": float64(0), + "workers_brminer.2_rewards_24h": float64(0), + "workers_brminer.3_hash_rate": float64(0), + "workers_brminer.3_hash_rate_24h": float64(0), + "workers_brminer.3_reset_time": float64(1.455936733e+09), + "workers_brminer.3_rewards": float64(0), + "workers_brminer.3_rewards_24h": float64(0), + } + + acc.AssertContainsFields(t, "httpjson", fields) +} + // Test response to HTTP 500 func TestHttpJson500(t *testing.T) { httpjson := genMockHttpJson(validJSON, 500) From 83c27cc7b133dd3e6cb9fd9e9efd3ecf8c1f003d Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 23 Feb 2016 11:10:28 -0700 Subject: [PATCH 073/287] dns query: Don't use mjasion.pl for unit tests, check errs --- CHANGELOG.md | 1 + plugins/inputs/dns_query/dns_query_test.go | 28 ++++++++++++++-------- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0f5b72e28..05fc7b09d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ ### Bugfixes - [#701](https://github.com/influxdata/telegraf/pull/701): output write count shouldnt print in quiet mode. +- [#746](https://github.com/influxdata/telegraf/pull/746): httpjson plugin: Fix HTTP GET parameters. ## v0.10.3 [2016-02-18] diff --git a/plugins/inputs/dns_query/dns_query_test.go b/plugins/inputs/dns_query/dns_query_test.go index ff923dae3..076db5fab 100644 --- a/plugins/inputs/dns_query/dns_query_test.go +++ b/plugins/inputs/dns_query/dns_query_test.go @@ -9,7 +9,7 @@ import ( ) var servers = []string{"8.8.8.8"} -var domains = []string{"mjasion.pl"} +var domains = []string{"google.com"} func TestGathering(t *testing.T) { var dnsConfig = DnsQuery{ @@ -18,8 +18,10 @@ func TestGathering(t *testing.T) { } var acc testutil.Accumulator - dnsConfig.Gather(&acc) - metric, _ := acc.Get("dns_query") + err := dnsConfig.Gather(&acc) + assert.NoError(t, err) + metric, ok := acc.Get("dns_query") + assert.True(t, ok) queryTime, _ := metric.Fields["query_time_ms"].(float64) assert.NotEqual(t, 0, queryTime) @@ -33,8 +35,10 @@ func TestGatheringMxRecord(t *testing.T) { var acc testutil.Accumulator dnsConfig.RecordType = "MX" - dnsConfig.Gather(&acc) - metric, _ := acc.Get("dns_query") + err := dnsConfig.Gather(&acc) + assert.NoError(t, err) + metric, ok := acc.Get("dns_query") + assert.True(t, ok) queryTime, _ := metric.Fields["query_time_ms"].(float64) assert.NotEqual(t, 0, queryTime) @@ -54,8 +58,10 @@ func TestGatheringRootDomain(t *testing.T) { } fields := map[string]interface{}{} - dnsConfig.Gather(&acc) - metric, _ := acc.Get("dns_query") + err := dnsConfig.Gather(&acc) + assert.NoError(t, err) + metric, ok := acc.Get("dns_query") + assert.True(t, ok) queryTime, _ := metric.Fields["query_time_ms"].(float64) fields["query_time_ms"] = queryTime @@ -70,13 +76,15 @@ func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) { var acc testutil.Accumulator tags := map[string]string{ "server": "8.8.8.8", - "domain": "mjasion.pl", + "domain": "google.com", "record_type": "NS", } fields := map[string]interface{}{} - dnsConfig.Gather(&acc) - metric, _ := acc.Get("dns_query") + err := dnsConfig.Gather(&acc) + assert.NoError(t, err) + metric, ok := acc.Get("dns_query") + assert.True(t, ok) queryTime, _ := metric.Fields["query_time_ms"].(float64) fields["query_time_ms"] = queryTime From 1847ce3f3d304e27eb92e8a68e60308defe7ddbc Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 22 Feb 2016 15:16:46 -0700 Subject: [PATCH 074/287] Experimental windows build process changes --- Godeps_windows | 8 +-- etc/telegraf_windows.conf | 120 ++++++++++++++++++++++++++++++++++++++ scripts/build.py | 101 +++++++++++++++++++------------- scripts/circle-test.sh | 1 + 4 files changed, 185 insertions(+), 45 deletions(-) create mode 100644 etc/telegraf_windows.conf diff --git a/Godeps_windows b/Godeps_windows index 034fb4fec..dd46184ec 100644 --- a/Godeps_windows +++ b/Godeps_windows @@ -1,4 +1,4 @@ -git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git dbd8d5c40a582eb9adacde36b47932b3a3ad0034 +git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git 617c801af238c3af2d9e72c5d4a0f02edad03ce5 github.com/Shopify/sarama d37c73f2b2bce85f7fa16b6a550d26c5372892ef github.com/Sirupsen/logrus f7f79f729e0fbe2fcc061db48a9ba0263f588252 github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5 @@ -21,18 +21,18 @@ github.com/gorilla/context 1c83b3eabd45b6d76072b66b746c20815fb2872d github.com/gorilla/mux 26a6070f849969ba72b72256e9f14cf519751690 github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478 github.com/influxdata/config bae7cb98197d842374d3b8403905924094930f24 -github.com/influxdata/influxdb a9552fdd91361819a792f337e5d9998859732a67 -github.com/influxdb/influxdb a9552fdd91361819a792f337e5d9998859732a67 +github.com/influxdata/influxdb ef571fc104dc24b77cd3710c156cd95e5cfd7aa5 github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264 github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38 github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f github.com/lxn/win 9a7734ea4db26bc593d52f6a8a957afdad39c5c1 github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453 +github.com/miekg/dns e0d84d97e59bcb6561eae269c4e94d25b66822cb github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504 github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9 +github.com/nats-io/nats 6a83f1a633cfbfd90aa648ac99fb38c06a8b40df github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f -github.com/pborman/uuid dee7705ef7b324f27ceb85a121c61f2c2e8ce988 github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2 github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf new file mode 100644 index 000000000..df35261d7 --- /dev/null +++ b/etc/telegraf_windows.conf @@ -0,0 +1,120 @@ +# Telegraf configuration + +# Telegraf is entirely plugin driven. All metrics are gathered from the +# declared inputs, and sent to the declared outputs. + +# Plugins must be declared in here to be active. +# To deactivate a plugin, comment out the name and any variables. + +# Use 'telegraf -config telegraf.conf -test' to see what metrics a config +# file would generate. + +# Global tags can be specified here in key="value" format. +[global_tags] + # dc = "us-east-1" # will tag all metrics with dc=us-east-1 + # rack = "1a" + +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "10s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will cache metric_buffer_limit metrics for each output, and will + ## flush this buffer on a successful write. + metric_buffer_limit = 10000 + ## Flush the buffer whenever full, regardless of flush_interval. + flush_buffer_when_full = true + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Default flushing interval for all outputs. You shouldn't set this below + ## interval. Maximum flush_interval will be flush_interval + flush_jitter + flush_interval = "10s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## Run telegraf in debug mode + debug = false + ## Run telegraf in quiet mode + quiet = false + ## Override default hostname, if empty use os.Hostname() + hostname = "" + + +############################################################################### +# OUTPUTS # +############################################################################### + +# Configuration for influxdb server to send metrics to +[[outputs.influxdb]] + # The full HTTP or UDP endpoint URL for your InfluxDB instance. + # Multiple urls can be specified but it is assumed that they are part of the same + # cluster, this means that only ONE of the urls will be written to each interval. + # urls = ["udp://localhost:8089"] # UDP endpoint example + urls = ["http://localhost:8086"] # required + # The target database for metrics (telegraf will create it if not exists) + database = "telegraf" # required + # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". + # note: using second precision greatly helps InfluxDB compression + precision = "s" + + ## Write timeout (for the InfluxDB client), formatted as a string. + ## If not provided, will default to 5s. 0s means no timeout (not recommended). + timeout = "5s" + # username = "telegraf" + # password = "metricsmetricsmetricsmetrics" + # Set the user agent for HTTP POSTs (can be useful for log differentiation) + # user_agent = "telegraf" + # Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) + # udp_payload = 512 + + +############################################################################### +# INPUTS # +############################################################################### + +# Read metrics about cpu usage +[[inputs.cpu]] + # Whether to report per-cpu stats or not + percpu = true + # Whether to report total system cpu stats or not + totalcpu = true + # Comment this line if you want the raw CPU time metrics + fielddrop = ["time_*"] + +# Read metrics about disk usage by mount point +[[inputs.disk]] + # By default, telegraf gather stats for all mountpoints. + # Setting mountpoints will restrict the stats to the specified mountpoints. + # mount_points=["/"] + + # Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually + # present on /run, /var/run, /dev/shm or /dev). + ignore_fs = ["tmpfs", "devtmpfs"] + +# Read metrics about disk IO by device +[[inputs.diskio]] + # By default, telegraf will gather stats for all devices including + # disk partitions. + # Setting devices will restrict the stats to the specified devices. + # devices = ["sda", "sdb"] + # Uncomment the following line if you do not need disk serial numbers. + # skip_serial_number = true + +# Read metrics about memory usage +[[inputs.mem]] + # no configuration + +# Read metrics about swap memory usage +[[inputs.swap]] + # no configuration + diff --git a/scripts/build.py b/scripts/build.py index 9aca120cd..1465e36f3 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -30,6 +30,7 @@ INIT_SCRIPT = "scripts/init.sh" SYSTEMD_SCRIPT = "scripts/telegraf.service" LOGROTATE_SCRIPT = "etc/logrotate.d/telegraf" DEFAULT_CONFIG = "etc/telegraf.conf" +DEFAULT_WINDOWS_CONFIG = "etc/telegraf_windows.conf" POSTINST_SCRIPT = "scripts/post-install.sh" PREINST_SCRIPT = "scripts/pre-install.sh" @@ -76,7 +77,7 @@ supported_builds = { supported_packages = { "darwin": [ "tar", "zip" ], "linux": [ "deb", "rpm", "tar", "zip" ], - "windows": [ "tar", "zip" ], + "windows": [ "zip" ], } supported_tags = { # "linux": { @@ -351,20 +352,25 @@ def create_package_fs(build_root): create_dir(os.path.join(build_root, d)) os.chmod(os.path.join(build_root, d), 0o755) -def package_scripts(build_root): +def package_scripts(build_root, windows=False): print("\t- Copying scripts and sample configuration to build directory") - shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1])) - os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644) - shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1])) - os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644) - shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf")) - os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0o644) - shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf")) - os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0o644) + if windows: + shutil.copyfile(DEFAULT_WINDOWS_CONFIG, os.path.join(build_root, "telegraf.conf")) + os.chmod(os.path.join(build_root, "telegraf.conf"), 0o644) + else: + shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1])) + os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644) + shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1])) + os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644) + shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf")) + os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0o644) + shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf")) + os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0o644) def go_get(): print("Retrieving Go dependencies...") run("go get github.com/sparrc/gdm") + run("gdm restore -f Godeps_windows") run("gdm restore") def generate_md5_from_file(path): @@ -395,15 +401,18 @@ def build_packages(build_output, version, pkg_arch, nightly=False, rc=None, iter build_root = os.path.join(tmp_build_dir, p, a) # Create directory tree to mimic file system of package create_dir(build_root) - create_package_fs(build_root) - # Copy in packaging and miscellaneous scripts - package_scripts(build_root) + if p == 'windows': + package_scripts(build_root, windows=True) + else: + create_package_fs(build_root) + # Copy in packaging and miscellaneous scripts + package_scripts(build_root) # Copy newly-built binaries to packaging directory for b in targets: if p == 'windows': b = b + '.exe' fr = os.path.join(current_location, b) - to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], b) + to = os.path.join(build_root, b) print("\t- [{}][{}] - Moving from '{}' to '{}'".format(p, a, fr, to)) copy_file(fr, to) # Package the directory structure @@ -431,34 +440,44 @@ def build_packages(build_output, version, pkg_arch, nightly=False, rc=None, iter a = pkg_arch if a == '386': a = 'i386' - fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format( - fpm_common_args, - name, - a, - package_type, - package_version, - package_iteration, - build_root, - current_location) - if pkg_arch is not None: - a = saved_a - if package_type == "rpm": - fpm_command += "--depends coreutils " - fpm_command += "--depends lsof" - out = run(fpm_command, shell=True) - matches = re.search(':path=>"(.*)"', out) - outfile = None - if matches is not None: - outfile = matches.groups()[0] - if outfile is None: - print("[ COULD NOT DETERMINE OUTPUT ]") - else: - # Strip nightly version (the unix epoch) from filename - if nightly and package_type in ['deb', 'rpm']: - outfile = rename_file(outfile, outfile.replace("{}-{}".format(version, iteration), "nightly")) - outfiles.append(os.path.join(os.getcwd(), outfile)) - # Display MD5 hash for generated package + if package_type == 'zip': + zip_command = "cd {} && zip {}.zip ./*".format( + build_root, + name) + run(zip_command, shell=True) + run("mv {}.zip {}".format(os.path.join(build_root, name), current_location), shell=True) + outfile = os.path.join(current_location, name+".zip") + outfiles.append(outfile) print("\t\tMD5 = {}".format(generate_md5_from_file(outfile))) + else: + fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format( + fpm_common_args, + name, + a, + package_type, + package_version, + package_iteration, + build_root, + current_location) + if pkg_arch is not None: + a = saved_a + if package_type == "rpm": + fpm_command += "--depends coreutils " + fpm_command += "--depends lsof" + out = run(fpm_command, shell=True) + matches = re.search(':path=>"(.*)"', out) + outfile = None + if matches is not None: + outfile = matches.groups()[0] + if outfile is None: + print("[ COULD NOT DETERMINE OUTPUT ]") + else: + # Strip nightly version (the unix epoch) from filename + if nightly and package_type in ['deb', 'rpm']: + outfile = rename_file(outfile, outfile.replace("{}-{}".format(version, iteration), "nightly")) + outfiles.append(os.path.join(os.getcwd(), outfile)) + # Display MD5 hash for generated package + print("\t\tMD5 = {}".format(generate_md5_from_file(outfile))) print("") if debug: print("[DEBUG] package outfiles: {}".format(outfiles)) diff --git a/scripts/circle-test.sh b/scripts/circle-test.sh index 863fc396f..91511b050 100755 --- a/scripts/circle-test.sh +++ b/scripts/circle-test.sh @@ -76,5 +76,6 @@ if [ $? -eq 0 ]; then tag=$(git describe --exact-match HEAD) echo $tag exit_if_fail ./scripts/build.py --package --version=$tag --platform=linux --arch=all --upload + exit_if_fail ./scripts/build.py --package --version=$tag --platform=windows --arch=all --upload mv build $CIRCLE_ARTIFACTS fi From 85c4f753ad1fbd174f7c03c494b45463772d09c2 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 23 Feb 2016 15:40:02 -0700 Subject: [PATCH 075/287] modify Windows default conf to use win perf over WMI --- etc/telegraf_windows.conf | 90 +++++++++++++++++++++++++++++---------- 1 file changed, 67 insertions(+), 23 deletions(-) diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index df35261d7..7e66cb209 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -82,39 +82,83 @@ # INPUTS # ############################################################################### +# Windows Performance Counters plugin. +# These are the recommended method of monitoring system metrics on windows, +# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI, +# which utilizes a lot of system resources. +# +# See more configuration examples at: +# https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters + +[[inputs.win_perf_counters]] + [[inputs.win_perf_counters.object]] + # Processor usage, alternative to native, reports on a per core. + ObjectName = "Processor" + Instances = ["*"] + Counters = ["% Idle Time", "% Interrupt Time", "% Privileged Time", "% User Time", "% Processor Time"] + Measurement = "win_cpu" + #IncludeTotal=false #Set to true to include _Total instance when querying for all (*). + + [[inputs.win_perf_counters.object]] + # Disk times and queues + ObjectName = "LogicalDisk" + Instances = ["*"] + Counters = ["% Idle Time", "% Disk Time","% Disk Read Time", "% Disk Write Time", "% User Time", "Current Disk Queue Length"] + Measurement = "win_disk" + #IncludeTotal=false #Set to true to include _Total instance when querying for all (*). + + [[inputs.win_perf_counters.object]] + ObjectName = "System" + Counters = ["Context Switches/sec","System Calls/sec"] + Instances = ["------"] + Measurement = "win_system" + #IncludeTotal=false #Set to true to include _Total instance when querying for all (*). + + [[inputs.win_perf_counters.object]] + # Example query where the Instance portion must be removed to get data back, such as from the Memory object. + ObjectName = "Memory" + Counters = ["Available Bytes","Cache Faults/sec","Demand Zero Faults/sec","Page Faults/sec","Pages/sec","Transition Faults/sec","Pool Nonpaged Bytes","Pool Paged Bytes"] + Instances = ["------"] # Use 6 x - to remove the Instance bit from the query. + Measurement = "win_mem" + #IncludeTotal=false #Set to true to include _Total instance when querying for all (*). + + +# Windows system plugins using WMI (disabled by default, using +# win_perf_counters over WMI is recommended) + # Read metrics about cpu usage -[[inputs.cpu]] - # Whether to report per-cpu stats or not - percpu = true - # Whether to report total system cpu stats or not - totalcpu = true - # Comment this line if you want the raw CPU time metrics - fielddrop = ["time_*"] +#[[inputs.cpu]] + ## Whether to report per-cpu stats or not + #percpu = true + ## Whether to report total system cpu stats or not + #totalcpu = true + ## Comment this line if you want the raw CPU time metrics + #fielddrop = ["time_*"] # Read metrics about disk usage by mount point -[[inputs.disk]] - # By default, telegraf gather stats for all mountpoints. - # Setting mountpoints will restrict the stats to the specified mountpoints. - # mount_points=["/"] +#[[inputs.disk]] + ## By default, telegraf gather stats for all mountpoints. + ## Setting mountpoints will restrict the stats to the specified mountpoints. + ## mount_points=["/"] - # Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually - # present on /run, /var/run, /dev/shm or /dev). - ignore_fs = ["tmpfs", "devtmpfs"] + ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually + ## present on /run, /var/run, /dev/shm or /dev). + #ignore_fs = ["tmpfs", "devtmpfs"] # Read metrics about disk IO by device -[[inputs.diskio]] - # By default, telegraf will gather stats for all devices including - # disk partitions. - # Setting devices will restrict the stats to the specified devices. - # devices = ["sda", "sdb"] - # Uncomment the following line if you do not need disk serial numbers. - # skip_serial_number = true +#[[inputs.diskio]] + ## By default, telegraf will gather stats for all devices including + ## disk partitions. + ## Setting devices will restrict the stats to the specified devices. + ## devices = ["sda", "sdb"] + ## Uncomment the following line if you do not need disk serial numbers. + ## skip_serial_number = true # Read metrics about memory usage -[[inputs.mem]] +#[[inputs.mem]] # no configuration # Read metrics about swap memory usage -[[inputs.swap]] +#[[inputs.swap]] # no configuration From e983d35c2508fd7141eb0e361d39b59bedc8fe37 Mon Sep 17 00:00:00 2001 From: Matt Heath Date: Mon, 22 Feb 2016 15:58:06 +0000 Subject: [PATCH 076/287] Add support for multiple field names for timers closes #737 --- CHANGELOG.md | 1 + plugins/inputs/statsd/statsd.go | 71 +++++++++++------- plugins/inputs/statsd/statsd_test.go | 106 ++++++++++++++++++++++++++- 3 files changed, 146 insertions(+), 32 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 05fc7b09d..e46b495ff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ - [#694](https://github.com/influxdata/telegraf/pull/694): DNS Query input, thanks @mjasion! - [#724](https://github.com/influxdata/telegraf/pull/724): username matching for procstat input, thanks @zorel! - [#736](https://github.com/influxdata/telegraf/pull/736): Ignore dummy filesystems from disk plugin. Thanks @PierreF! +- [#737](https://github.com/influxdata/telegraf/pull/737): Support multiple fields for statsd input. Thanks @mattheath! ### Bugfixes - [#701](https://github.com/influxdata/telegraf/pull/701): output write count shouldnt print in quiet mode. diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 830e9d25c..a16e78b5c 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -17,7 +17,11 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -const UDP_PACKET_SIZE int = 1500 +const ( + UDP_PACKET_SIZE int = 1500 + + defaultFieldName = "value" +) var dropwarn = "ERROR: Message queue full. Discarding line [%s] " + "You may want to increase allowed_pending_messages in the config\n" @@ -113,9 +117,9 @@ type cachedcounter struct { } type cachedtimings struct { - name string - stats RunningStats - tags map[string]string + name string + fields map[string]RunningStats + tags map[string]string } func (_ *Statsd) Description() string { @@ -169,16 +173,26 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error { now := time.Now() for _, metric := range s.timings { + // Defining a template to parse field names for timers allows us to split + // out multiple fields per timer. In this case we prefix each stat with the + // field name and store these all in a single measurement. fields := make(map[string]interface{}) - fields["mean"] = metric.stats.Mean() - fields["stddev"] = metric.stats.Stddev() - fields["upper"] = metric.stats.Upper() - fields["lower"] = metric.stats.Lower() - fields["count"] = metric.stats.Count() - for _, percentile := range s.Percentiles { - name := fmt.Sprintf("%v_percentile", percentile) - fields[name] = metric.stats.Percentile(percentile) + for fieldName, stats := range metric.fields { + var prefix string + if fieldName != defaultFieldName { + prefix = fieldName + "_" + } + fields[prefix+"mean"] = stats.Mean() + fields[prefix+"stddev"] = stats.Stddev() + fields[prefix+"upper"] = stats.Upper() + fields[prefix+"lower"] = stats.Lower() + fields[prefix+"count"] = stats.Count() + for _, percentile := range s.Percentiles { + name := fmt.Sprintf("%s%v_percentile", prefix, percentile) + fields[name] = stats.Percentile(percentile) + } } + acc.AddFields(metric.name, fields, metric.tags, now) } if s.DeleteTimings { @@ -370,11 +384,6 @@ func (s *Statsd) parseStatsdLine(line string) error { // Parse the name & tags from bucket m.name, m.field, m.tags = s.parseName(m.bucket) - // fields are not supported for timings, so if specified combine into - // the name - if (m.mtype == "ms" || m.mtype == "h") && m.field != "value" { - m.name += "_" + m.field - } switch m.mtype { case "c": m.tags["metric_type"] = "counter" @@ -433,7 +442,7 @@ func (s *Statsd) parseName(bucket string) (string, string, map[string]string) { name = strings.Replace(name, "-", "__", -1) } if field == "" { - field = "value" + field = defaultFieldName } return name, field, tags @@ -461,26 +470,32 @@ func parseKeyValue(keyvalue string) (string, string) { func (s *Statsd) aggregate(m metric) { switch m.mtype { case "ms", "h": + // Check if the measurement exists cached, ok := s.timings[m.hash] if !ok { cached = cachedtimings{ - name: m.name, - tags: m.tags, - stats: RunningStats{ - PercLimit: s.PercentileLimit, - }, + name: m.name, + fields: make(map[string]RunningStats), + tags: m.tags, + } + } + // Check if the field exists. If we've not enabled multiple fields per timer + // this will be the default field name, eg. "value" + field, ok := cached.fields[m.field] + if !ok { + field = RunningStats{ + PercLimit: s.PercentileLimit, } } - if m.samplerate > 0 { for i := 0; i < int(1.0/m.samplerate); i++ { - cached.stats.AddValue(m.floatvalue) + field.AddValue(m.floatvalue) } - s.timings[m.hash] = cached } else { - cached.stats.AddValue(m.floatvalue) - s.timings[m.hash] = cached + field.AddValue(m.floatvalue) } + cached.fields[m.field] = field + s.timings[m.hash] = cached case "c": // check if the measurement exists _, ok := s.counters[m.hash] diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index a285467b0..3a87f00aa 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -561,12 +561,12 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) { // A 0 with invalid samplerate will add a single 0, // plus the last bit of value 1 // which adds up to 12 individual datapoints to be cached - if cachedtiming.stats.n != 12 { - t.Errorf("Expected 11 additions, got %d", cachedtiming.stats.n) + if cachedtiming.fields[defaultFieldName].n != 12 { + t.Errorf("Expected 11 additions, got %d", cachedtiming.fields[defaultFieldName].n) } - if cachedtiming.stats.upper != 1 { - t.Errorf("Expected max input to be 1, got %f", cachedtiming.stats.upper) + if cachedtiming.fields[defaultFieldName].upper != 1 { + t.Errorf("Expected max input to be 1, got %f", cachedtiming.fields[defaultFieldName].upper) } } @@ -842,7 +842,105 @@ func TestParse_Timings(t *testing.T) { } acc.AssertContainsFields(t, "test_timing", valid) +} +// Tests low-level functionality of timings when multiple fields is enabled +// and a measurement template has been defined which can parse field names +func TestParse_Timings_MultipleFieldsWithTemplate(t *testing.T) { + s := NewStatsd() + s.Templates = []string{"measurement.field"} + s.Percentiles = []int{90} + acc := &testutil.Accumulator{} + + validLines := []string{ + "test_timing.success:1|ms", + "test_timing.success:11|ms", + "test_timing.success:1|ms", + "test_timing.success:1|ms", + "test_timing.success:1|ms", + "test_timing.error:2|ms", + "test_timing.error:22|ms", + "test_timing.error:2|ms", + "test_timing.error:2|ms", + "test_timing.error:2|ms", + } + + for _, line := range validLines { + err := s.parseStatsdLine(line) + if err != nil { + t.Errorf("Parsing line %s should not have resulted in an error\n", line) + } + } + s.Gather(acc) + + valid := map[string]interface{}{ + "success_90_percentile": float64(11), + "success_count": int64(5), + "success_lower": float64(1), + "success_mean": float64(3), + "success_stddev": float64(4), + "success_upper": float64(11), + + "error_90_percentile": float64(22), + "error_count": int64(5), + "error_lower": float64(2), + "error_mean": float64(6), + "error_stddev": float64(8), + "error_upper": float64(22), + } + + acc.AssertContainsFields(t, "test_timing", valid) +} + +// Tests low-level functionality of timings when multiple fields is enabled +// but a measurement template hasn't been defined so we can't parse field names +// In this case the behaviour should be the same as normal behaviour +func TestParse_Timings_MultipleFieldsWithoutTemplate(t *testing.T) { + s := NewStatsd() + s.Templates = []string{} + s.Percentiles = []int{90} + acc := &testutil.Accumulator{} + + validLines := []string{ + "test_timing.success:1|ms", + "test_timing.success:11|ms", + "test_timing.success:1|ms", + "test_timing.success:1|ms", + "test_timing.success:1|ms", + "test_timing.error:2|ms", + "test_timing.error:22|ms", + "test_timing.error:2|ms", + "test_timing.error:2|ms", + "test_timing.error:2|ms", + } + + for _, line := range validLines { + err := s.parseStatsdLine(line) + if err != nil { + t.Errorf("Parsing line %s should not have resulted in an error\n", line) + } + } + s.Gather(acc) + + expectedSuccess := map[string]interface{}{ + "90_percentile": float64(11), + "count": int64(5), + "lower": float64(1), + "mean": float64(3), + "stddev": float64(4), + "upper": float64(11), + } + expectedError := map[string]interface{}{ + "90_percentile": float64(22), + "count": int64(5), + "lower": float64(2), + "mean": float64(6), + "stddev": float64(8), + "upper": float64(22), + } + + acc.AssertContainsFields(t, "test_timing_success", expectedSuccess) + acc.AssertContainsFields(t, "test_timing_error", expectedError) } func TestParse_Timings_Delete(t *testing.T) { From 11482a75a1b5f1a5f0e0188bcb229692b9980cc2 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 23 Feb 2016 15:47:31 -0700 Subject: [PATCH 077/287] Changelog update, field and name drop and pass params --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e46b495ff..f0f87e499 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ ## v0.10.4 [unreleased] +### Release Notes +- The pass/drop parameters have been renamed to fielddrop/fieldpass parameters, +to more accurately indicate their purpose. +- There are also now namedrop/namepass parameters for passing/dropping based +on the metric _name_. + ### Features - [#727](https://github.com/influxdata/telegraf/pull/727): riak input, thanks @jcoene! - [#694](https://github.com/influxdata/telegraf/pull/694): DNS Query input, thanks @mjasion! From f1fa915985dde95fe879c2d68361885d72c48690 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 23 Feb 2016 15:59:02 -0700 Subject: [PATCH 078/287] Release 0.10.4 w/ windows builds --- CHANGELOG.md | 11 ++++++++++- README.md | 24 +++++++++++++++--------- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f0f87e499..dc83441b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,19 @@ -## v0.10.4 [unreleased] +## v0.10.5 [unreleased] + +### Release Notes + +### Features + +### Bugfixes + +## v0.10.4 [2016-02-24] ### Release Notes - The pass/drop parameters have been renamed to fielddrop/fieldpass parameters, to more accurately indicate their purpose. - There are also now namedrop/namepass parameters for passing/dropping based on the metric _name_. +- Experimental windows builds now available. ### Features - [#727](https://github.com/influxdata/telegraf/pull/727): riak input, thanks @jcoene! diff --git a/README.md b/README.md index 9f3716789..0f3f81ebf 100644 --- a/README.md +++ b/README.md @@ -27,12 +27,12 @@ the [release blog post](https://influxdata.com/blog/announcing-telegraf-0-10-0/) ### Linux deb and rpm Packages: Latest: -* http://get.influxdb.org/telegraf/telegraf_0.10.3-1_amd64.deb -* http://get.influxdb.org/telegraf/telegraf-0.10.3-1.x86_64.rpm +* http://get.influxdb.org/telegraf/telegraf_0.10.4-1_amd64.deb +* http://get.influxdb.org/telegraf/telegraf-0.10.4-1.x86_64.rpm Latest (arm): -* http://get.influxdb.org/telegraf/telegraf_0.10.3-1_arm.deb -* http://get.influxdb.org/telegraf/telegraf-0.10.3-1.arm.rpm +* http://get.influxdb.org/telegraf/telegraf_0.10.4-1_arm.deb +* http://get.influxdb.org/telegraf/telegraf-0.10.4-1.arm.rpm 0.2.x: * http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb @@ -56,9 +56,9 @@ for instructions, replacing the `influxdb` package name with `telegraf`. ### Linux tarballs: Latest: -* http://get.influxdb.org/telegraf/telegraf-0.10.3-1_linux_amd64.tar.gz -* http://get.influxdb.org/telegraf/telegraf-0.10.3-1_linux_i386.tar.gz -* http://get.influxdb.org/telegraf/telegraf-0.10.3-1_linux_arm.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_linux_amd64.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_linux_i386.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_linux_arm.tar.gz 0.2.x: * http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz @@ -70,13 +70,13 @@ Latest: To install the full directory structure with config file, run: ``` -sudo tar -C / -zxvf ./telegraf-0.10.3-1_linux_amd64.tar.gz +sudo tar -C / -zxvf ./telegraf-0.10.4-1_linux_amd64.tar.gz ``` To extract only the binary, run: ``` -tar -zxvf telegraf-0.10.3-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf +tar -zxvf telegraf-0.10.4-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf ``` ### Ansible Role: @@ -90,6 +90,12 @@ brew update brew install telegraf ``` +### Windows Binaries (EXPERIMENTAL) + +Latest: +* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_windows_amd64.zip +* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_windows_i386.zip + ### From Source: Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm), From fc4cb1654cc443b7e960463a0d34ba8eea405b81 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 24 Feb 2016 09:10:47 -0700 Subject: [PATCH 079/287] Fix deb and rpm packages closes #752 closes #750 --- scripts/build.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/build.py b/scripts/build.py index 1465e36f3..df8fbb979 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -411,8 +411,10 @@ def build_packages(build_output, version, pkg_arch, nightly=False, rc=None, iter for b in targets: if p == 'windows': b = b + '.exe' + to = os.path.join(build_root, b) + else: + to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], b) fr = os.path.join(current_location, b) - to = os.path.join(build_root, b) print("\t- [{}][{}] - Moving from '{}' to '{}'".format(p, a, fr, to)) copy_file(fr, to) # Package the directory structure From 664816383ad2601f3a8511b991634fa45cd4dc60 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 24 Feb 2016 10:05:10 -0700 Subject: [PATCH 080/287] Update readme to 0.10.4.1 --- CHANGELOG.md | 9 +++++++++ README.md | 22 +++++++++++----------- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dc83441b2..b8ae58e74 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,15 @@ ### Bugfixes +## v0.10.4.1 + +### Release Notes +- Bug in the build script broke deb and rpm packages. + +### Bugfixes +- [#750](https://github.com/influxdata/telegraf/issues/750): deb package broken +- [#752](https://github.com/influxdata/telegraf/issues/752): rpm package broken + ## v0.10.4 [2016-02-24] ### Release Notes diff --git a/README.md b/README.md index 0f3f81ebf..b4e661735 100644 --- a/README.md +++ b/README.md @@ -27,12 +27,12 @@ the [release blog post](https://influxdata.com/blog/announcing-telegraf-0-10-0/) ### Linux deb and rpm Packages: Latest: -* http://get.influxdb.org/telegraf/telegraf_0.10.4-1_amd64.deb -* http://get.influxdb.org/telegraf/telegraf-0.10.4-1.x86_64.rpm +* http://get.influxdb.org/telegraf/telegraf_0.10.4.1-1_amd64.deb +* http://get.influxdb.org/telegraf/telegraf-0.10.4.1-1.x86_64.rpm Latest (arm): -* http://get.influxdb.org/telegraf/telegraf_0.10.4-1_arm.deb -* http://get.influxdb.org/telegraf/telegraf-0.10.4-1.arm.rpm +* http://get.influxdb.org/telegraf/telegraf_0.10.4.1-1_arm.deb +* http://get.influxdb.org/telegraf/telegraf-0.10.4.1-1.arm.rpm 0.2.x: * http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb @@ -56,9 +56,9 @@ for instructions, replacing the `influxdb` package name with `telegraf`. ### Linux tarballs: Latest: -* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_linux_amd64.tar.gz -* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_linux_i386.tar.gz -* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_linux_arm.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.10.4.1-1_linux_amd64.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.10.4.1-1_linux_i386.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.10.4.1-1_linux_arm.tar.gz 0.2.x: * http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz @@ -70,13 +70,13 @@ Latest: To install the full directory structure with config file, run: ``` -sudo tar -C / -zxvf ./telegraf-0.10.4-1_linux_amd64.tar.gz +sudo tar -C / -zxvf ./telegraf-0.10.4.1-1_linux_amd64.tar.gz ``` To extract only the binary, run: ``` -tar -zxvf telegraf-0.10.4-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf +tar -zxvf telegraf-0.10.4.1-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf ``` ### Ansible Role: @@ -93,8 +93,8 @@ brew install telegraf ### Windows Binaries (EXPERIMENTAL) Latest: -* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_windows_amd64.zip -* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_windows_i386.zip +* http://get.influxdb.org/telegraf/telegraf-0.10.4.1-1_windows_amd64.zip +* http://get.influxdb.org/telegraf/telegraf-0.10.4.1-1_windows_i386.zip ### From Source: From a97c93abe4bab79da4d4faf52e95918e97080d7c Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 26 Feb 2016 15:12:37 +0000 Subject: [PATCH 081/287] add usage_percent into docker readme closes #726 --- plugins/inputs/docker/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index fa662ca80..6086c89e8 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -74,6 +74,7 @@ on the availability of per-cpu stats on your system. - usage_in_usermode - usage_system - usage_total + - usage_percent - docker_net - rx_dropped - rx_bytes From 6284e2011c3f038c239ac12143cadd1f8d235988 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 26 Feb 2016 15:10:12 +0000 Subject: [PATCH 082/287] Fix sensor plugin, was splitting on ":" incorrectly closes #748 --- CHANGELOG.md | 1 + plugins/inputs/sensors/sensors.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b8ae58e74..3320e1dc8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ ### Features ### Bugfixes +- [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":" ## v0.10.4.1 diff --git a/plugins/inputs/sensors/sensors.go b/plugins/inputs/sensors/sensors.go index b2c2919cc..dbb304b71 100644 --- a/plugins/inputs/sensors/sensors.go +++ b/plugins/inputs/sensors/sensors.go @@ -49,7 +49,7 @@ func (s *Sensors) Gather(acc telegraf.Accumulator) error { var found bool for _, sensor := range s.Sensors { - parts := strings.SplitN(":", sensor, 2) + parts := strings.SplitN(sensor, ":", 2) if parts[0] == chipName { if parts[1] == "*" || parts[1] == featureLabel { From 04a8e5b888e5b084683baaedd495324759ed926f Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 26 Feb 2016 16:26:43 +0000 Subject: [PATCH 083/287] influxdb output: try to connect on write if there are no conns --- plugins/outputs/influxdb/influxdb.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 60d235511..db9926bbc 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -129,6 +129,7 @@ func (i *InfluxDB) Connect() error { if e != nil { log.Println("Database creation failed: " + e.Error()) + continue } conns = append(conns, c) @@ -156,6 +157,12 @@ func (i *InfluxDB) Description() string { // Choose a random server in the cluster to write to until a successful write // occurs, logging each unsuccessful. If all servers fail, return error. func (i *InfluxDB) Write(metrics []telegraf.Metric) error { + if len(i.conns) == 0 { + err := i.Connect() + if err != nil { + return err + } + } bp, err := client.NewBatchPoints(client.BatchPointsConfig{ Database: i.Database, Precision: i.Precision, From 0fab573c9828a3913fb49e22715caab233a77796 Mon Sep 17 00:00:00 2001 From: arthtux Date: Sun, 28 Feb 2016 15:38:46 -0500 Subject: [PATCH 084/287] add nginx description --- plugins/inputs/nginx/README.md | 50 ++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 plugins/inputs/nginx/README.md diff --git a/plugins/inputs/nginx/README.md b/plugins/inputs/nginx/README.md new file mode 100644 index 000000000..a3d425586 --- /dev/null +++ b/plugins/inputs/nginx/README.md @@ -0,0 +1,50 @@ +# Telegraf Plugin: Nginx + +The example plugin gathers metrics about example things + +### Configuration: + +``` +# Read Nginx's basic status information (ngx_http_stub_status_module) +[[inputs.nginx]] + ## An array of Nginx stub_status URI to gather stats. + urls = ["http://localhost/server_status"] +``` + +### Measurements & Fields: + +- measurement + - port + - server + - accepts + - active + - handled + - reading + - requests + - waiting + +### Tags: + +- All measurements have the following tags: + - port + - server + +### Example Output: + +Using this configuration: +``` +[[inputs.nginx]] + ## An array of Nginx stub_status URI to gather stats. + urls = ["http://localhost/status"] +``` + +When run with: +``` +./telegraf -config telegraf.conf -input-filter nginx -test +``` + +It produces: +``` +* Plugin: nginx, Collection 1 +> nginx,port=80,server=localhost accepts=605i,active=2i,handled=605i,reading=0i,requests=12132i,waiting=1i,writing=1i 1456690994701784331 +``` From 1677960caa2f89ac4aa65b16940ed36b506fd8e7 Mon Sep 17 00:00:00 2001 From: arthtux Date: Sun, 28 Feb 2016 15:41:16 -0500 Subject: [PATCH 085/287] correct nginx README --- plugins/inputs/nginx/README.md | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/plugins/inputs/nginx/README.md b/plugins/inputs/nginx/README.md index a3d425586..8c64f6311 100644 --- a/plugins/inputs/nginx/README.md +++ b/plugins/inputs/nginx/README.md @@ -1,7 +1,5 @@ # Telegraf Plugin: Nginx -The example plugin gathers metrics about example things - ### Configuration: ``` @@ -13,9 +11,7 @@ The example plugin gathers metrics about example things ### Measurements & Fields: -- measurement - - port - - server +- Measurement - accepts - active - handled From baa38d6266e0fe43632fdb902ae6bd377fbe1d08 Mon Sep 17 00:00:00 2001 From: bastard Date: Fri, 26 Feb 2016 20:06:56 +0000 Subject: [PATCH 086/287] Fixing Librato plugin closes #722 --- CHANGELOG.md | 1 + plugins/outputs/librato/librato.go | 62 ++++++++++++++++--- plugins/outputs/librato/librato_test.go | 34 ++++++---- plugins/serializers/graphite/graphite.go | 50 +++++++++------ plugins/serializers/graphite/graphite_test.go | 59 ++++++++++++++++++ 5 files changed, 164 insertions(+), 42 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3320e1dc8..1b98e8bcc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ ### Bugfixes - [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":" +- [#722](https://github.com/influxdata/telegraf/pull/722): Librato output plugin fixes. Thanks @chrusty! ## v0.10.4.1 diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go index 3897e0b4f..ed15350fc 100644 --- a/plugins/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -4,19 +4,24 @@ import ( "bytes" "encoding/json" "fmt" + "io/ioutil" "log" "net/http" + "strings" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers/graphite" ) type Librato struct { - ApiUser string - ApiToken string - SourceTag string - Timeout internal.Duration + ApiUser string + ApiToken string + Debug bool + NameFromTags bool + SourceTag string + Timeout internal.Duration apiUrl string client *http.Client @@ -32,9 +37,12 @@ var sampleConfig = ` ## Librato API token api_token = "my-secret-token" # required. - ## Tag Field to populate source attribute (optional) - ## This is typically the _hostname_ from which the metric was obtained. - source_tag = "hostname" + ### Debug + # debug = false + + ### Tag Field to populate source attribute (optional) + ### This is typically the _hostname_ from which the metric was obtained. + source_tag = "host" ## Connection timeout. # timeout = "5s" @@ -82,17 +90,27 @@ func (l *Librato) Write(metrics []telegraf.Metric) error { for _, gauge := range gauges { tempGauges = append(tempGauges, gauge) metricCounter++ + if l.Debug { + log.Printf("[DEBUG] Got a gauge: %v\n", gauge) + } } } else { log.Printf("unable to build Gauge for %s, skipping\n", m.Name()) + if l.Debug { + log.Printf("[DEBUG] Couldn't build gauge: %v\n", err) + } } } lmetrics.Gauges = make([]*Gauge, metricCounter) copy(lmetrics.Gauges, tempGauges[0:]) - metricsBytes, err := json.Marshal(metrics) + metricsBytes, err := json.Marshal(lmetrics) if err != nil { return fmt.Errorf("unable to marshal Metrics, %s\n", err.Error()) + } else { + if l.Debug { + log.Printf("[DEBUG] Librato request: %v\n", string(metricsBytes)) + } } req, err := http.NewRequest("POST", l.apiUrl, bytes.NewBuffer(metricsBytes)) if err != nil { @@ -103,8 +121,21 @@ func (l *Librato) Write(metrics []telegraf.Metric) error { resp, err := l.client.Do(req) if err != nil { + if l.Debug { + log.Printf("[DEBUG] Error POSTing metrics: %v\n", err.Error()) + } return fmt.Errorf("error POSTing metrics, %s\n", err.Error()) + } else { + if l.Debug { + htmlData, err := ioutil.ReadAll(resp.Body) + if err != nil { + log.Printf("[DEBUG] Couldn't get response! (%v)\n", err) + } else { + log.Printf("[DEBUG] Librato response: %v\n", string(htmlData)) + } + } } + defer resp.Body.Close() if resp.StatusCode != 200 { @@ -122,11 +153,20 @@ func (l *Librato) Description() string { return "Configuration for Librato API to send metrics to." } +func (l *Librato) buildGaugeName(m telegraf.Metric, fieldName string) string { + // Use the GraphiteSerializer + graphiteSerializer := graphite.GraphiteSerializer{} + serializedMetric := graphiteSerializer.SerializeBucketName(m, fieldName) + + // Deal with slash characters: + return strings.Replace(serializedMetric, "/", "-", -1) +} + func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) { gauges := []*Gauge{} for fieldName, value := range m.Fields() { gauge := &Gauge{ - Name: m.Name() + "_" + fieldName, + Name: l.buildGaugeName(m, fieldName), MeasureTime: m.Time().Unix(), } if err := gauge.setValue(value); err != nil { @@ -142,6 +182,10 @@ func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) { l.SourceTag) } } + gauges = append(gauges, gauge) + } + if l.Debug { + fmt.Printf("[DEBUG] Built gauges: %v\n", gauges) } return gauges, nil } diff --git a/plugins/outputs/librato/librato_test.go b/plugins/outputs/librato/librato_test.go index c0b6ba021..ae08793e0 100644 --- a/plugins/outputs/librato/librato_test.go +++ b/plugins/outputs/librato/librato_test.go @@ -9,9 +9,9 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/testutil" - "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/serializers/graphite" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -28,6 +28,14 @@ func fakeLibrato() *Librato { return l } +func BuildTags(t *testing.T) { + testMetric := testutil.TestMetric(0.0, "test1") + graphiteSerializer := graphite.GraphiteSerializer{} + tags, err := graphiteSerializer.Serialize(testMetric) + fmt.Printf("Tags: %v", tags) + require.NoError(t, err) +} + func TestUriOverride(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) @@ -78,7 +86,7 @@ func TestBuildGauge(t *testing.T) { { testutil.TestMetric(0.0, "test1"), &Gauge{ - Name: "test1", + Name: "value1.test1.value", MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), Value: 0.0, }, @@ -87,7 +95,7 @@ func TestBuildGauge(t *testing.T) { { testutil.TestMetric(1.0, "test2"), &Gauge{ - Name: "test2", + Name: "value1.test2.value", MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), Value: 1.0, }, @@ -96,7 +104,7 @@ func TestBuildGauge(t *testing.T) { { testutil.TestMetric(10, "test3"), &Gauge{ - Name: "test3", + Name: "value1.test3.value", MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), Value: 10.0, }, @@ -105,7 +113,7 @@ func TestBuildGauge(t *testing.T) { { testutil.TestMetric(int32(112345), "test4"), &Gauge{ - Name: "test4", + Name: "value1.test4.value", MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), Value: 112345.0, }, @@ -114,7 +122,7 @@ func TestBuildGauge(t *testing.T) { { testutil.TestMetric(int64(112345), "test5"), &Gauge{ - Name: "test5", + Name: "value1.test5.value", MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), Value: 112345.0, }, @@ -123,7 +131,7 @@ func TestBuildGauge(t *testing.T) { { testutil.TestMetric(float32(11234.5), "test6"), &Gauge{ - Name: "test6", + Name: "value1.test6.value", MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), Value: 11234.5, }, @@ -132,7 +140,7 @@ func TestBuildGauge(t *testing.T) { { testutil.TestMetric("11234.5", "test7"), &Gauge{ - Name: "test7", + Name: "value1.test7.value", MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), Value: 11234.5, }, @@ -163,13 +171,13 @@ func TestBuildGauge(t *testing.T) { func TestBuildGaugeWithSource(t *testing.T) { pt1, _ := telegraf.NewMetric( "test1", - map[string]string{"hostname": "192.168.0.1"}, + map[string]string{"hostname": "192.168.0.1", "tag1": "value1"}, map[string]interface{}{"value": 0.0}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) pt2, _ := telegraf.NewMetric( "test2", - map[string]string{"hostnam": "192.168.0.1"}, + map[string]string{"hostnam": "192.168.0.1", "tag1": "value1"}, map[string]interface{}{"value": 1.0}, time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC), ) @@ -182,7 +190,7 @@ func TestBuildGaugeWithSource(t *testing.T) { { pt1, &Gauge{ - Name: "test1", + Name: "192_168_0_1.value1.test1.value", MeasureTime: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), Value: 0.0, Source: "192.168.0.1", @@ -192,7 +200,7 @@ func TestBuildGaugeWithSource(t *testing.T) { { pt2, &Gauge{ - Name: "test2", + Name: "192_168_0_1.value1.test1.value", MeasureTime: time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC).Unix(), Value: 1.0, }, diff --git a/plugins/serializers/graphite/graphite.go b/plugins/serializers/graphite/graphite.go index d04f756c1..908dce8fa 100644 --- a/plugins/serializers/graphite/graphite.go +++ b/plugins/serializers/graphite/graphite.go @@ -14,39 +14,49 @@ type GraphiteSerializer struct { func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]string, error) { out := []string{} - // Get name - name := metric.Name() + // Convert UnixNano to Unix timestamps timestamp := metric.UnixNano() / 1000000000 - tag_str := buildTags(metric) for field_name, value := range metric.Fields() { // Convert value value_str := fmt.Sprintf("%#v", value) // Write graphite metric var graphitePoint string - if name == field_name { - graphitePoint = fmt.Sprintf("%s.%s %s %d", - tag_str, - strings.Replace(name, ".", "_", -1), - value_str, - timestamp) - } else { - graphitePoint = fmt.Sprintf("%s.%s.%s %s %d", - tag_str, - strings.Replace(name, ".", "_", -1), - strings.Replace(field_name, ".", "_", -1), - value_str, - timestamp) - } - if s.Prefix != "" { - graphitePoint = fmt.Sprintf("%s.%s", s.Prefix, graphitePoint) - } + graphitePoint = fmt.Sprintf("%s %s %d", + s.SerializeBucketName(metric, field_name), + value_str, + timestamp) out = append(out, graphitePoint) } return out, nil } +func (s *GraphiteSerializer) SerializeBucketName(metric telegraf.Metric, field_name string) string { + // Get the metric name + name := metric.Name() + + // Convert UnixNano to Unix timestamps + tag_str := buildTags(metric) + + // Write graphite metric + var serializedBucketName string + if name == field_name { + serializedBucketName = fmt.Sprintf("%s.%s", + tag_str, + strings.Replace(name, ".", "_", -1)) + } else { + serializedBucketName = fmt.Sprintf("%s.%s.%s", + tag_str, + strings.Replace(name, ".", "_", -1), + strings.Replace(field_name, ".", "_", -1)) + } + if s.Prefix != "" { + serializedBucketName = fmt.Sprintf("%s.%s", s.Prefix, serializedBucketName) + } + return serializedBucketName +} + func buildTags(metric telegraf.Metric) string { var keys []string tags := metric.Tags() diff --git a/plugins/serializers/graphite/graphite_test.go b/plugins/serializers/graphite/graphite_test.go index 72b203b7a..8d25bf937 100644 --- a/plugins/serializers/graphite/graphite_test.go +++ b/plugins/serializers/graphite/graphite_test.go @@ -119,3 +119,62 @@ func TestSerializeMetricPrefix(t *testing.T) { sort.Strings(expS) assert.Equal(t, expS, mS) } + +func TestSerializeBucketNameNoHost(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + "datacenter": "us-west-2", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + m, err := telegraf.NewMetric("cpu", tags, fields, now) + assert.NoError(t, err) + + s := GraphiteSerializer{} + mS := s.SerializeBucketName(m, "usage_idle") + + expS := fmt.Sprintf("cpu0.us-west-2.cpu.usage_idle") + assert.Equal(t, expS, mS) +} + +func TestSerializeBucketNameHost(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "host": "localhost", + "cpu": "cpu0", + "datacenter": "us-west-2", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + m, err := telegraf.NewMetric("cpu", tags, fields, now) + assert.NoError(t, err) + + s := GraphiteSerializer{} + mS := s.SerializeBucketName(m, "usage_idle") + + expS := fmt.Sprintf("localhost.cpu0.us-west-2.cpu.usage_idle") + assert.Equal(t, expS, mS) +} + +func TestSerializeBucketNamePrefix(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "host": "localhost", + "cpu": "cpu0", + "datacenter": "us-west-2", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + m, err := telegraf.NewMetric("cpu", tags, fields, now) + assert.NoError(t, err) + + s := GraphiteSerializer{Prefix: "prefix"} + mS := s.SerializeBucketName(m, "usage_idle") + + expS := fmt.Sprintf("prefix.localhost.cpu0.us-west-2.cpu.usage_idle") + assert.Equal(t, expS, mS) +} From 3568fb9f9384fdffe24a6807d51ccca04fa12fcf Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 29 Feb 2016 16:13:00 +0000 Subject: [PATCH 087/287] Support specifying influxdb retention policy closes #692 --- CHANGELOG.md | 1 + etc/telegraf.conf | 25 +++++++++++++++-------- plugins/outputs/influxdb/influxdb.go | 30 ++++++++++++++++------------ 3 files changed, 35 insertions(+), 21 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b98e8bcc..72300f6da 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ### Release Notes ### Features +- [#692](https://github.com/influxdata/telegraf/pull/770): Support InfluxDB retention policies ### Bugfixes - [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":" diff --git a/etc/telegraf.conf b/etc/telegraf.conf index d8a295442..f5a2b34dd 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -56,15 +56,17 @@ # Configuration for influxdb server to send metrics to [[outputs.influxdb]] - # The full HTTP or UDP endpoint URL for your InfluxDB instance. - # Multiple urls can be specified but it is assumed that they are part of the same - # cluster, this means that only ONE of the urls will be written to each interval. + ## The full HTTP or UDP endpoint URL for your InfluxDB instance. + ## Multiple urls can be specified as part of the same cluster, + ## this means that only ONE of the urls will be written to each interval. # urls = ["udp://localhost:8089"] # UDP endpoint example urls = ["http://localhost:8086"] # required - # The target database for metrics (telegraf will create it if not exists) + ## The target database for metrics (telegraf will create it if not exists). database = "telegraf" # required - # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". - # note: using second precision greatly helps InfluxDB compression + ## Retention policy to write to. + retention_policy = "default" + ## Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". + ## note: using "s" precision greatly improves InfluxDB compression. precision = "s" ## Write timeout (for the InfluxDB client), formatted as a string. @@ -72,11 +74,18 @@ timeout = "5s" # username = "telegraf" # password = "metricsmetricsmetricsmetrics" - # Set the user agent for HTTP POSTs (can be useful for log differentiation) + ## Set the user agent for HTTP POSTs (can be useful for log differentiation) # user_agent = "telegraf" - # Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) + ## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) # udp_payload = 512 + ## Optional SSL Config + # ssl_ca = "/etc/telegraf/ca.pem" + # ssl_cert = "/etc/telegraf/cert.pem" + # ssl_key = "/etc/telegraf/key.pem" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false + ############################################################################### # INPUTS # diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index db9926bbc..5eef553a2 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -18,15 +18,16 @@ import ( type InfluxDB struct { // URL is only for backwards compatability - URL string - URLs []string `toml:"urls"` - Username string - Password string - Database string - UserAgent string - Precision string - Timeout internal.Duration - UDPPayload int `toml:"udp_payload"` + URL string + URLs []string `toml:"urls"` + Username string + Password string + Database string + UserAgent string + Precision string + RetentionPolicy string + Timeout internal.Duration + UDPPayload int `toml:"udp_payload"` // Path to CA file SSLCA string `toml:"ssl_ca"` @@ -46,10 +47,12 @@ var sampleConfig = ` ## this means that only ONE of the urls will be written to each interval. # urls = ["udp://localhost:8089"] # UDP endpoint example urls = ["http://localhost:8086"] # required - ## The target database for metrics (telegraf will create it if not exists) + ## The target database for metrics (telegraf will create it if not exists). database = "telegraf" # required + ## Retention policy to write to. + retention_policy = "default" ## Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". - ## note: using "s" precision greatly improves InfluxDB compression + ## note: using "s" precision greatly improves InfluxDB compression. precision = "s" ## Write timeout (for the InfluxDB client), formatted as a string. @@ -164,8 +167,9 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { } } bp, err := client.NewBatchPoints(client.BatchPointsConfig{ - Database: i.Database, - Precision: i.Precision, + Database: i.Database, + Precision: i.Precision, + RetentionPolicy: i.RetentionPolicy, }) if err != nil { return err From ea7cbc781e5937767d808e4b6e9d7d5fe47e2554 Mon Sep 17 00:00:00 2001 From: Dirk Pahl Date: Mon, 29 Feb 2016 15:19:44 +0100 Subject: [PATCH 088/287] Create a FreeBSD build closes #766 --- scripts/build.py | 4 +++- scripts/circle-test.sh | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/scripts/build.py b/scripts/build.py index df8fbb979..0f3007cfa 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -72,12 +72,14 @@ targets = { supported_builds = { 'darwin': [ "amd64", "i386" ], 'windows': [ "amd64", "i386" ], - 'linux': [ "amd64", "i386", "arm" ] + 'linux': [ "amd64", "i386", "arm" ], + 'freebsd': [ "amd64" ] } supported_packages = { "darwin": [ "tar", "zip" ], "linux": [ "deb", "rpm", "tar", "zip" ], "windows": [ "zip" ], + 'freebsd': [ "tar" ] } supported_tags = { # "linux": { diff --git a/scripts/circle-test.sh b/scripts/circle-test.sh index 91511b050..9a3e0e678 100755 --- a/scripts/circle-test.sh +++ b/scripts/circle-test.sh @@ -77,5 +77,6 @@ if [ $? -eq 0 ]; then echo $tag exit_if_fail ./scripts/build.py --package --version=$tag --platform=linux --arch=all --upload exit_if_fail ./scripts/build.py --package --version=$tag --platform=windows --arch=all --upload + exit_if_fail ./scripts/build.py --package --version=$tag --platform=freebsd --arch=all --upload mv build $CIRCLE_ARTIFACTS fi From 7416d6ea71e8afbbaafcc37eaa727793acf474fc Mon Sep 17 00:00:00 2001 From: Pierre Fersing Date: Mon, 29 Feb 2016 17:52:58 +0100 Subject: [PATCH 089/287] Improve timeout in input plugins --- plugins/inputs/apache/apache.go | 5 ++- plugins/inputs/couchdb/couchdb.go | 12 ++++++- plugins/inputs/disque/disque.go | 8 ++++- plugins/inputs/dovecot/dovecot.go | 7 +++- plugins/inputs/elasticsearch/elasticsearch.go | 7 +++- plugins/inputs/haproxy/haproxy.go | 7 ++-- plugins/inputs/httpjson/httpjson.go | 7 +++- plugins/inputs/influxdb/influxdb.go | 12 ++++++- plugins/inputs/jolokia/jolokia.go | 8 ++++- plugins/inputs/mailchimp/chimp_api.go | 6 +++- plugins/inputs/mesos/mesos.go | 12 ++++++- plugins/inputs/mysql/mysql.go | 23 ++++++++++++ plugins/inputs/mysql/mysql_test.go | 35 +++++++++++++++++++ plugins/inputs/nginx/nginx.go | 5 ++- plugins/inputs/nsq/nsq.go | 5 ++- plugins/inputs/prometheus/prometheus.go | 12 ++++++- plugins/inputs/rabbitmq/rabbitmq.go | 6 +++- plugins/inputs/raindrops/raindrops.go | 9 +++-- plugins/inputs/redis/redis.go | 8 ++++- plugins/inputs/riak/riak.go | 8 ++++- plugins/inputs/zookeeper/zookeeper.go | 3 ++ 21 files changed, 184 insertions(+), 21 deletions(-) diff --git a/plugins/inputs/apache/apache.go b/plugins/inputs/apache/apache.go index b6e3e50f1..eba5a1188 100644 --- a/plugins/inputs/apache/apache.go +++ b/plugins/inputs/apache/apache.go @@ -58,7 +58,10 @@ var tr = &http.Transport{ ResponseHeaderTimeout: time.Duration(3 * time.Second), } -var client = &http.Client{Transport: tr} +var client = &http.Client{ + Transport: tr, + Timeout: time.Duration(4 * time.Second), +} func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { resp, err := client.Get(addr.String()) diff --git a/plugins/inputs/couchdb/couchdb.go b/plugins/inputs/couchdb/couchdb.go index ba64e4a6d..bf241649a 100644 --- a/plugins/inputs/couchdb/couchdb.go +++ b/plugins/inputs/couchdb/couchdb.go @@ -10,6 +10,7 @@ import ( "reflect" "strings" "sync" + "time" ) // Schema: @@ -112,9 +113,18 @@ func (c *CouchDB) Gather(accumulator telegraf.Accumulator) error { } +var tr = &http.Transport{ + ResponseHeaderTimeout: time.Duration(3 * time.Second), +} + +var client = &http.Client{ + Transport: tr, + Timeout: time.Duration(4 * time.Second), +} + func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host string) error { - response, error := http.Get(host) + response, error := client.Get(host) if error != nil { return error } diff --git a/plugins/inputs/disque/disque.go b/plugins/inputs/disque/disque.go index a311b6739..822e5924f 100644 --- a/plugins/inputs/disque/disque.go +++ b/plugins/inputs/disque/disque.go @@ -9,6 +9,7 @@ import ( "strconv" "strings" "sync" + "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" @@ -30,6 +31,8 @@ var sampleConfig = ` servers = ["localhost"] ` +var defaultTimeout = 5 * time.Second + func (r *Disque) SampleConfig() string { return sampleConfig } @@ -107,7 +110,7 @@ func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { addr.Host = addr.Host + ":" + defaultPort } - c, err := net.Dial("tcp", addr.Host) + c, err := net.DialTimeout("tcp", addr.Host, defaultTimeout) if err != nil { return fmt.Errorf("Unable to connect to disque server '%s': %s", addr.Host, err) } @@ -132,6 +135,9 @@ func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { g.c = c } + // Extend connection + g.c.SetDeadline(time.Now().Add(defaultTimeout)) + g.c.Write([]byte("info\r\n")) r := bufio.NewReader(g.c) diff --git a/plugins/inputs/dovecot/dovecot.go b/plugins/inputs/dovecot/dovecot.go index 75829f595..3a6607da9 100644 --- a/plugins/inputs/dovecot/dovecot.go +++ b/plugins/inputs/dovecot/dovecot.go @@ -34,6 +34,8 @@ var sampleConfig = ` domains = [] ` +var defaultTimeout = time.Second * time.Duration(5) + func (d *Dovecot) SampleConfig() string { return sampleConfig } const defaultPort = "24242" @@ -74,12 +76,15 @@ func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, doms map[s return fmt.Errorf("Error: %s on url %s\n", err, addr) } - c, err := net.Dial("tcp", addr) + c, err := net.DialTimeout("tcp", addr, defaultTimeout) if err != nil { return fmt.Errorf("Unable to connect to dovecot server '%s': %s", addr, err) } defer c.Close() + // Extend connection + c.SetDeadline(time.Now().Add(defaultTimeout)) + c.Write([]byte("EXPORT\tdomain\n\n")) var buf bytes.Buffer io.Copy(&buf, c) diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index aae97f4d7..32bd58516 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -81,7 +81,12 @@ type Elasticsearch struct { // NewElasticsearch return a new instance of Elasticsearch func NewElasticsearch() *Elasticsearch { - return &Elasticsearch{client: http.DefaultClient} + tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)} + client := &http.Client{ + Transport: tr, + Timeout: time.Duration(4 * time.Second), + } + return &Elasticsearch{client: client} } // SampleConfig returns sample configuration for this plugin. diff --git a/plugins/inputs/haproxy/haproxy.go b/plugins/inputs/haproxy/haproxy.go index 233cd8481..b1402d8ec 100644 --- a/plugins/inputs/haproxy/haproxy.go +++ b/plugins/inputs/haproxy/haproxy.go @@ -129,8 +129,11 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error { func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error { if g.client == nil { - - client := &http.Client{} + tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)} + client := &http.Client{ + Transport: tr, + Timeout: time.Duration(4 * time.Second), + } g.client = client } diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index c055f66de..c07a9602a 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -244,6 +244,11 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) { func init() { inputs.Add("httpjson", func() telegraf.Input { - return &HttpJson{client: RealHTTPClient{client: &http.Client{}}} + tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)} + client := &http.Client{ + Transport: tr, + Timeout: time.Duration(4 * time.Second), + } + return &HttpJson{client: RealHTTPClient{client: client}} }) } diff --git a/plugins/inputs/influxdb/influxdb.go b/plugins/inputs/influxdb/influxdb.go index 63a3c1854..5af9a0731 100644 --- a/plugins/inputs/influxdb/influxdb.go +++ b/plugins/inputs/influxdb/influxdb.go @@ -7,6 +7,7 @@ import ( "net/http" "strings" "sync" + "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" @@ -70,6 +71,15 @@ type point struct { Values map[string]interface{} `json:"values"` } +var tr = &http.Transport{ + ResponseHeaderTimeout: time.Duration(3 * time.Second), +} + +var client = &http.Client{ + Transport: tr, + Timeout: time.Duration(4 * time.Second), +} + // Gathers data from a particular URL // Parameters: // acc : The telegraf Accumulator to use @@ -81,7 +91,7 @@ func (i *InfluxDB) gatherURL( acc telegraf.Accumulator, url string, ) error { - resp, err := http.Get(url) + resp, err := client.Get(url) if err != nil { return err } diff --git a/plugins/inputs/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go index 2e0bba6d5..a65f5ff8f 100644 --- a/plugins/inputs/jolokia/jolokia.go +++ b/plugins/inputs/jolokia/jolokia.go @@ -7,6 +7,7 @@ import ( "io/ioutil" "net/http" "net/url" + "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" @@ -160,6 +161,11 @@ func (j *Jolokia) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("jolokia", func() telegraf.Input { - return &Jolokia{jClient: &JolokiaClientImpl{client: &http.Client{}}} + tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)} + client := &http.Client{ + Transport: tr, + Timeout: time.Duration(4 * time.Second), + } + return &Jolokia{jClient: &JolokiaClientImpl{client: client}} }) } diff --git a/plugins/inputs/mailchimp/chimp_api.go b/plugins/inputs/mailchimp/chimp_api.go index fe2c56d0c..75c9a30d7 100644 --- a/plugins/inputs/mailchimp/chimp_api.go +++ b/plugins/inputs/mailchimp/chimp_api.go @@ -10,6 +10,7 @@ import ( "net/url" "regexp" "sync" + "time" ) const ( @@ -120,7 +121,10 @@ func (a *ChimpAPI) GetReport(campaignID string) (Report, error) { } func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { - client := &http.Client{Transport: api.Transport} + client := &http.Client{ + Transport: api.Transport, + Timeout: time.Duration(4 * time.Second), + } var b bytes.Buffer req, err := http.NewRequest("GET", api.url.String(), &b) diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 5bcda7970..ccb76daae 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -10,6 +10,7 @@ import ( "strconv" "strings" "sync" + "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" @@ -261,6 +262,15 @@ func (m *Mesos) removeGroup(j *map[string]interface{}) { } } +var tr = &http.Transport{ + ResponseHeaderTimeout: time.Duration(3 * time.Second), +} + +var client = &http.Client{ + Transport: tr, + Timeout: time.Duration(4 * time.Second), +} + // This should not belong to the object func (m *Mesos) gatherMetrics(a string, acc telegraf.Accumulator) error { var jsonOut map[string]interface{} @@ -282,7 +292,7 @@ func (m *Mesos) gatherMetrics(a string, acc telegraf.Accumulator) error { ts := strconv.Itoa(m.Timeout) + "ms" - resp, err := http.Get("http://" + a + "/metrics/snapshot?timeout=" + ts) + resp, err := client.Get("http://" + a + "/metrics/snapshot?timeout=" + ts) if err != nil { return err diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index b2e2729a9..cd9e7ae28 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -2,8 +2,10 @@ package mysql import ( "database/sql" + "net/url" "strconv" "strings" + "time" _ "github.com/go-sql-driver/mysql" "github.com/influxdata/telegraf" @@ -26,6 +28,8 @@ var sampleConfig = ` servers = ["tcp(127.0.0.1:3306)/"] ` +var defaultTimeout = time.Second * time.Duration(5) + func (m *Mysql) SampleConfig() string { return sampleConfig } @@ -122,6 +126,10 @@ func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error { serv = "" } + serv, err := dsnAddTimeout(serv) + if err != nil { + return err + } db, err := sql.Open("mysql", serv) if err != nil { return err @@ -207,6 +215,21 @@ func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error { return nil } +func dsnAddTimeout(dsn string) (string, error) { + u, err := url.Parse(dsn) + if err != nil { + return "", err + } + v := u.Query() + + // Only override timeout if not already defined + if _, ok := v["timeout"]; ok == false { + v.Add("timeout", defaultTimeout.String()) + u.RawQuery = v.Encode() + } + return u.String(), nil +} + func init() { inputs.Add("mysql", func() telegraf.Input { return &Mysql{} diff --git a/plugins/inputs/mysql/mysql_test.go b/plugins/inputs/mysql/mysql_test.go index 855e8ba52..dffc328fa 100644 --- a/plugins/inputs/mysql/mysql_test.go +++ b/plugins/inputs/mysql/mysql_test.go @@ -84,3 +84,38 @@ func TestMysqlParseDSN(t *testing.T) { } } } + +func TestMysqlDNSAddTimeout(t *testing.T) { + tests := []struct { + input string + output string + }{ + { + "", + "?timeout=5s", + }, + { + "127.0.0.1", + "127.0.0.1?timeout=5s", + }, + { + "tcp(192.168.1.1:3306)/", + "tcp(192.168.1.1:3306)/?timeout=5s", + }, + { + "root:passwd@tcp(192.168.1.1:3306)/?tls=false", + "root:passwd@tcp(192.168.1.1:3306)/?timeout=5s&tls=false", + }, + { + "root:passwd@tcp(192.168.1.1:3306)/?tls=false&timeout=10s", + "root:passwd@tcp(192.168.1.1:3306)/?tls=false&timeout=10s", + }, + } + + for _, test := range tests { + output, _ := parseDSN(test.input) + if output != test.output { + t.Errorf("Expected %s, got %s\n", test.output, output) + } + } +} diff --git a/plugins/inputs/nginx/nginx.go b/plugins/inputs/nginx/nginx.go index 3b008fbf3..c13ba39f3 100644 --- a/plugins/inputs/nginx/nginx.go +++ b/plugins/inputs/nginx/nginx.go @@ -58,7 +58,10 @@ var tr = &http.Transport{ ResponseHeaderTimeout: time.Duration(3 * time.Second), } -var client = &http.Client{Transport: tr} +var client = &http.Client{ + Transport: tr, + Timeout: time.Duration(4 * time.Second), +} func (n *Nginx) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { resp, err := client.Get(addr.String()) diff --git a/plugins/inputs/nsq/nsq.go b/plugins/inputs/nsq/nsq.go index 6b3be66f2..35ba76866 100644 --- a/plugins/inputs/nsq/nsq.go +++ b/plugins/inputs/nsq/nsq.go @@ -84,7 +84,10 @@ var tr = &http.Transport{ ResponseHeaderTimeout: time.Duration(3 * time.Second), } -var client = &http.Client{Transport: tr} +var client = &http.Client{ + Transport: tr, + Timeout: time.Duration(4 * time.Second), +} func (n *NSQ) gatherEndpoint(e string, acc telegraf.Accumulator) error { u, err := buildURL(e) diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 188e6b914..5873b27cc 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -10,6 +10,7 @@ import ( "io" "net/http" "sync" + "time" ) type Prometheus struct { @@ -51,8 +52,17 @@ func (g *Prometheus) Gather(acc telegraf.Accumulator) error { return outerr } +var tr = &http.Transport{ + ResponseHeaderTimeout: time.Duration(3 * time.Second), +} + +var client = &http.Client{ + Transport: tr, + Timeout: time.Duration(4 * time.Second), +} + func (g *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error { - resp, err := http.Get(url) + resp, err := client.Get(url) if err != nil { return fmt.Errorf("error making HTTP request to %s: %s", url, err) } diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index e51d65e15..4d119282d 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -122,7 +122,11 @@ func (r *RabbitMQ) Description() string { func (r *RabbitMQ) Gather(acc telegraf.Accumulator) error { if r.Client == nil { - r.Client = &http.Client{} + tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)} + r.Client = &http.Client{ + Transport: tr, + Timeout: time.Duration(4 * time.Second), + } } var errChan = make(chan error, len(gatherFunctions)) diff --git a/plugins/inputs/raindrops/raindrops.go b/plugins/inputs/raindrops/raindrops.go index fed22b693..6851f5d93 100644 --- a/plugins/inputs/raindrops/raindrops.go +++ b/plugins/inputs/raindrops/raindrops.go @@ -177,8 +177,11 @@ func (r *Raindrops) getTags(addr *url.URL) map[string]string { func init() { inputs.Add("raindrops", func() telegraf.Input { - return &Raindrops{http_client: &http.Client{Transport: &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), - }}} + return &Raindrops{http_client: &http.Client{ + Transport: &http.Transport{ + ResponseHeaderTimeout: time.Duration(3 * time.Second), + }, + Timeout: time.Duration(4 * time.Second), + }} }) } diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index b8862f6bc..859b23a22 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -9,6 +9,7 @@ import ( "strconv" "strings" "sync" + "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" @@ -30,6 +31,8 @@ var sampleConfig = ` servers = ["tcp://localhost:6379"] ` +var defaultTimeout = 5 * time.Second + func (r *Redis) SampleConfig() string { return sampleConfig } @@ -120,12 +123,15 @@ func (r *Redis) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { addr.Host = addr.Host + ":" + defaultPort } - c, err := net.Dial("tcp", addr.Host) + c, err := net.DialTimeout("tcp", addr.Host, defaultTimeout) if err != nil { return fmt.Errorf("Unable to connect to redis server '%s': %s", addr.Host, err) } defer c.Close() + // Extend connection + c.SetDeadline(time.Now().Add(defaultTimeout)) + if addr.User != nil { pwd, set := addr.User.Password() if set && pwd != "" { diff --git a/plugins/inputs/riak/riak.go b/plugins/inputs/riak/riak.go index 6750c75a0..56231176b 100644 --- a/plugins/inputs/riak/riak.go +++ b/plugins/inputs/riak/riak.go @@ -5,6 +5,7 @@ import ( "fmt" "net/http" "net/url" + "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" @@ -20,7 +21,12 @@ type Riak struct { // NewRiak return a new instance of Riak with a default http client func NewRiak() *Riak { - return &Riak{client: http.DefaultClient} + tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)} + client := &http.Client{ + Transport: tr, + Timeout: time.Duration(4 * time.Second), + } + return &Riak{client: client} } // Type riakStats represents the data that is received from Riak diff --git a/plugins/inputs/zookeeper/zookeeper.go b/plugins/inputs/zookeeper/zookeeper.go index 0f2b2e06f..54defc56f 100644 --- a/plugins/inputs/zookeeper/zookeeper.go +++ b/plugins/inputs/zookeeper/zookeeper.go @@ -67,6 +67,9 @@ func (z *Zookeeper) gatherServer(address string, acc telegraf.Accumulator) error } defer c.Close() + // Extend connection + c.SetDeadline(time.Now().Add(defaultTimeout)) + fmt.Fprintf(c, "%s\n", "mntr") rdr := bufio.NewReader(c) scanner := bufio.NewScanner(rdr) From f0747e76dade818e4150f13a06f214766e0afdb7 Mon Sep 17 00:00:00 2001 From: Pierre Fersing Date: Mon, 29 Feb 2016 19:04:56 +0100 Subject: [PATCH 090/287] Fix newly added test --- plugins/inputs/mysql/mysql_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/mysql/mysql_test.go b/plugins/inputs/mysql/mysql_test.go index dffc328fa..0b25fe2be 100644 --- a/plugins/inputs/mysql/mysql_test.go +++ b/plugins/inputs/mysql/mysql_test.go @@ -113,7 +113,7 @@ func TestMysqlDNSAddTimeout(t *testing.T) { } for _, test := range tests { - output, _ := parseDSN(test.input) + output, _ := dsnAddTimeout(test.input) if output != test.output { t.Errorf("Expected %s, got %s\n", test.output, output) } From ca3a80fbe113f2c74f128d29cd8f561e65d9a428 Mon Sep 17 00:00:00 2001 From: Pierre Fersing Date: Mon, 29 Feb 2016 19:39:22 +0100 Subject: [PATCH 091/287] Fix invalid DSN after dsnAddTimeout and "" DSN --- plugins/inputs/mysql/mysql.go | 6 ++++++ plugins/inputs/mysql/mysql_test.go | 6 +----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index cd9e7ae28..474067716 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -216,6 +216,12 @@ func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error { } func dsnAddTimeout(dsn string) (string, error) { + + // DSN "?timeout=5s" is not valid, but "/?timeout=5s" is valid ("" and "/" + // are the same DSN) + if dsn == "" { + dsn = "/" + } u, err := url.Parse(dsn) if err != nil { return "", err diff --git a/plugins/inputs/mysql/mysql_test.go b/plugins/inputs/mysql/mysql_test.go index 0b25fe2be..50c1ed7b7 100644 --- a/plugins/inputs/mysql/mysql_test.go +++ b/plugins/inputs/mysql/mysql_test.go @@ -92,11 +92,7 @@ func TestMysqlDNSAddTimeout(t *testing.T) { }{ { "", - "?timeout=5s", - }, - { - "127.0.0.1", - "127.0.0.1?timeout=5s", + "/?timeout=5", }, { "tcp(192.168.1.1:3306)/", From fe43fb47e19d5cf0f1d097a17d96f0fe3cdfe810 Mon Sep 17 00:00:00 2001 From: Pierre Fersing Date: Mon, 29 Feb 2016 22:02:33 +0100 Subject: [PATCH 092/287] Fix test closes #771 --- CHANGELOG.md | 1 + plugins/inputs/elasticsearch/elasticsearch_test.go | 3 +++ plugins/inputs/mysql/mysql_test.go | 2 +- 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 72300f6da..74e211fae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ ### Features - [#692](https://github.com/influxdata/telegraf/pull/770): Support InfluxDB retention policies +- [#771](https://github.com/influxdata/telegraf/pull/771): Default timeouts for input plugns. Thanks @PierreF! ### Bugfixes - [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":" diff --git a/plugins/inputs/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go index f94d3f9ac..f29857507 100644 --- a/plugins/inputs/elasticsearch/elasticsearch_test.go +++ b/plugins/inputs/elasticsearch/elasticsearch_test.go @@ -34,6 +34,9 @@ func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) { return res, nil } +func (t *transportMock) CancelRequest(_ *http.Request) { +} + func TestElasticsearch(t *testing.T) { es := NewElasticsearch() es.Servers = []string{"http://example.com:9200"} diff --git a/plugins/inputs/mysql/mysql_test.go b/plugins/inputs/mysql/mysql_test.go index 50c1ed7b7..9e4073432 100644 --- a/plugins/inputs/mysql/mysql_test.go +++ b/plugins/inputs/mysql/mysql_test.go @@ -92,7 +92,7 @@ func TestMysqlDNSAddTimeout(t *testing.T) { }{ { "", - "/?timeout=5", + "/?timeout=5s", }, { "tcp(192.168.1.1:3306)/", From 9af8d6912a42e95813943258df86a3edc4f2f1b0 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 1 Mar 2016 10:12:28 +0000 Subject: [PATCH 093/287] Remove naoina/toml dependency, use influxdata/toml closes #745 --- CHANGELOG.md | 1 + Godeps | 6 +++--- internal/config/config.go | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 74e211fae..b39e639f7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ ### Bugfixes - [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":" - [#722](https://github.com/influxdata/telegraf/pull/722): Librato output plugin fixes. Thanks @chrusty! +- [#745](https://github.com/influxdata/telegraf/issues/745): Fix Telegraf toml parse panic on large config files. ## v0.10.4.1 diff --git a/Godeps b/Godeps index d2ac1857f..7389b1cb8 100644 --- a/Godeps +++ b/Godeps @@ -18,15 +18,16 @@ github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2 github.com/gorilla/context 1c83b3eabd45b6d76072b66b746c20815fb2872d github.com/gorilla/mux 26a6070f849969ba72b72256e9f14cf519751690 github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478 -github.com/influxdata/config bae7cb98197d842374d3b8403905924094930f24 +github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da github.com/influxdata/influxdb ef571fc104dc24b77cd3710c156cd95e5cfd7aa5 +github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0 github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264 github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38 github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453 +github.com/miekg/dns e0d84d97e59bcb6561eae269c4e94d25b66822cb github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504 github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b -github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9 github.com/nats-io/nats 6a83f1a633cfbfd90aa648ac99fb38c06a8b40df github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2 @@ -50,4 +51,3 @@ gopkg.in/dancannon/gorethink.v1 6f088135ff288deb9d5546f4c71919207f891a70 gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715 gopkg.in/mgo.v2 03c9f3ee4c14c8e51ee521a6a7d0425658dd6f64 gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4 -github.com/miekg/dns e0d84d97e59bcb6561eae269c4e94d25b66822cb \ No newline at end of file diff --git a/internal/config/config.go b/internal/config/config.go index b5b73e06e..a7a9eaab4 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -19,7 +19,7 @@ import ( "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/config" - "github.com/naoina/toml/ast" + "github.com/influxdata/toml/ast" ) // Config specifies the URL/user/password for the database that telegraf From 8d9111562356bbeea948da7b4c9685042fc6c065 Mon Sep 17 00:00:00 2001 From: Aleksei Magusev and Andrea Leopardi Date: Thu, 25 Feb 2016 16:57:28 +0100 Subject: [PATCH 094/287] Add generic UDP listener service input --- plugins/inputs/all/all.go | 1 + plugins/inputs/udp_listener/udp_listener.go | 154 ++++++++++++++++++++ 2 files changed, 155 insertions(+) create mode 100644 plugins/inputs/udp_listener/udp_listener.go diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 5af18fcff..262de37ac 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -49,6 +49,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/system" _ "github.com/influxdata/telegraf/plugins/inputs/trig" _ "github.com/influxdata/telegraf/plugins/inputs/twemproxy" + _ "github.com/influxdata/telegraf/plugins/inputs/udp_listener" _ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters" _ "github.com/influxdata/telegraf/plugins/inputs/zfs" _ "github.com/influxdata/telegraf/plugins/inputs/zookeeper" diff --git a/plugins/inputs/udp_listener/udp_listener.go b/plugins/inputs/udp_listener/udp_listener.go new file mode 100644 index 000000000..c6d483d48 --- /dev/null +++ b/plugins/inputs/udp_listener/udp_listener.go @@ -0,0 +1,154 @@ +package udp_listener + +import ( + "log" + "net" + "sync" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" +) + +type UdpListener struct { + ServiceAddress string + UDPPacketSize int `toml:"udp_packet_size"` + AllowedPendingMessages int + sync.Mutex + + in chan []byte + done chan struct{} + + parser parsers.Parser + + // Keep the accumulator in this struct + acc telegraf.Accumulator +} + +const UDP_PACKET_SIZE int = 1500 + +var dropwarn = "ERROR: Message queue full. Discarding line [%s] " + + "You may want to increase allowed_pending_messages in the config\n" + +const sampleConfig = ` + ## Address and port to host UDP listener on + service_address = ":8125" + + ## Number of UDP messages allowed to queue up. Once filled, the + ## UDP listener will start dropping packets. + allowed_pending_messages = 10000 + + ## UDP packet size for the server to listen for. This will depend + ## on the size of the packets that the client is sending, which is + ## usually 1500 bytes. + udp_packet_size = 1500 + + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +` + +func (u *UdpListener) SampleConfig() string { + return sampleConfig +} + +func (u *UdpListener) Description() string { + return "Generic UDP listener" +} + +// All the work is done in the Start() function, so this is just a dummy +// function. +func (u *UdpListener) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (u *UdpListener) SetParser(parser parsers.Parser) { + u.parser = parser +} + +func (u *UdpListener) Start(acc telegraf.Accumulator) error { + u.Lock() + defer u.Unlock() + + u.acc = acc + u.in = make(chan []byte, u.AllowedPendingMessages) + u.done = make(chan struct{}) + + go u.udpListen() + go u.udpParser() + + log.Printf("Started UDP listener service on %s\n", u.ServiceAddress) + return nil +} + +func (u *UdpListener) Stop() { + u.Lock() + defer u.Unlock() + close(u.done) + close(u.in) + log.Println("Stopped UDP listener service on ", u.ServiceAddress) +} + +func (u *UdpListener) udpListen() error { + address, _ := net.ResolveUDPAddr("udp", u.ServiceAddress) + listener, err := net.ListenUDP("udp", address) + if err != nil { + log.Fatalf("ERROR: ListenUDP - %s", err) + } + defer listener.Close() + log.Println("UDP server listening on: ", listener.LocalAddr().String()) + + for { + select { + case <-u.done: + return nil + default: + buf := make([]byte, u.UDPPacketSize) + n, _, err := listener.ReadFromUDP(buf) + if err != nil { + log.Printf("ERROR: %s\n", err.Error()) + } + + select { + case u.in <- buf[:n]: + default: + log.Printf(dropwarn, string(buf[:n])) + } + } + } +} + +func (u *UdpListener) udpParser() error { + for { + select { + case <-u.done: + return nil + case packet := <-u.in: + metrics, err := u.parser.Parse(packet) + if err == nil { + u.storeMetrics(metrics) + } else { + log.Printf("Malformed packet: [%s], Error: %s\n", packet, err) + } + } + } +} + +func (u *UdpListener) storeMetrics(metrics []telegraf.Metric) error { + u.Lock() + defer u.Unlock() + for _, m := range metrics { + u.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) + } + return nil +} + +func init() { + inputs.Add("udp_listener", func() telegraf.Input { + return &UdpListener{ + UDPPacketSize: UDP_PACKET_SIZE, + } + }) +} From a92e73231d8cb777e542e4524bb42bc7f16546d3 Mon Sep 17 00:00:00 2001 From: Andrea Leopardi Date: Mon, 29 Feb 2016 12:08:33 +0100 Subject: [PATCH 095/287] Add tests for the udp_listener input plugin --- .../inputs/udp_listener/udp_listener_test.go | 112 ++++++++++++++++++ 1 file changed, 112 insertions(+) create mode 100644 plugins/inputs/udp_listener/udp_listener_test.go diff --git a/plugins/inputs/udp_listener/udp_listener_test.go b/plugins/inputs/udp_listener/udp_listener_test.go new file mode 100644 index 000000000..2f0f6fae5 --- /dev/null +++ b/plugins/inputs/udp_listener/udp_listener_test.go @@ -0,0 +1,112 @@ +package udp_listener + +import ( + "io/ioutil" + "log" + "testing" + "time" + + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" +) + +func newTestUdpListener() (*UdpListener, chan []byte) { + in := make(chan []byte, 1500) + listener := &UdpListener{ + ServiceAddress: ":8125", + UDPPacketSize: 1500, + AllowedPendingMessages: 10000, + in: in, + done: make(chan struct{}), + } + return listener, in +} + +func TestRunParser(t *testing.T) { + log.SetOutput(ioutil.Discard) + var testmsg = []byte("cpu_load_short,host=server01 value=12.0 1422568543702900257") + + listener, in := newTestUdpListener() + acc := testutil.Accumulator{} + listener.acc = &acc + defer close(listener.done) + + listener.parser, _ = parsers.NewInfluxParser() + go listener.udpParser() + + in <- testmsg + time.Sleep(time.Millisecond * 25) + listener.Gather(&acc) + + if a := acc.NFields(); a != 1 { + t.Errorf("got %v, expected %v", a, 1) + } + + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01"}, + ) +} + +func TestRunParserInvalidMsg(t *testing.T) { + log.SetOutput(ioutil.Discard) + var testmsg = []byte("cpu_load_short") + + listener, in := newTestUdpListener() + acc := testutil.Accumulator{} + listener.acc = &acc + defer close(listener.done) + + listener.parser, _ = parsers.NewInfluxParser() + go listener.udpParser() + + in <- testmsg + time.Sleep(time.Millisecond * 25) + + if a := acc.NFields(); a != 0 { + t.Errorf("got %v, expected %v", a, 0) + } +} + +func TestRunParserGraphiteMsg(t *testing.T) { + log.SetOutput(ioutil.Discard) + var testmsg = []byte("cpu.load.graphite 12 1454780029") + + listener, in := newTestUdpListener() + acc := testutil.Accumulator{} + listener.acc = &acc + defer close(listener.done) + + listener.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) + go listener.udpParser() + + in <- testmsg + time.Sleep(time.Millisecond * 25) + listener.Gather(&acc) + + acc.AssertContainsFields(t, "cpu_load_graphite", + map[string]interface{}{"value": float64(12)}) +} + +func TestRunParserJSONMsg(t *testing.T) { + log.SetOutput(ioutil.Discard) + var testmsg = []byte("{\"a\": 5, \"b\": {\"c\": 6}}\n") + + listener, in := newTestUdpListener() + acc := testutil.Accumulator{} + listener.acc = &acc + defer close(listener.done) + + listener.parser, _ = parsers.NewJSONParser("udp_json_test", []string{}, nil) + go listener.udpParser() + + in <- testmsg + time.Sleep(time.Millisecond * 25) + listener.Gather(&acc) + + acc.AssertContainsFields(t, "udp_json_test", + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }) +} From e1f30aeff949495ceb3a77f02d8fe02eb0daf371 Mon Sep 17 00:00:00 2001 From: Andrea Leopardi Date: Mon, 29 Feb 2016 12:14:44 +0100 Subject: [PATCH 096/287] Add a README for the UDP listener input plugin --- plugins/inputs/udp_listener/README.md | 31 +++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 plugins/inputs/udp_listener/README.md diff --git a/plugins/inputs/udp_listener/README.md b/plugins/inputs/udp_listener/README.md new file mode 100644 index 000000000..e2fe846f9 --- /dev/null +++ b/plugins/inputs/udp_listener/README.md @@ -0,0 +1,31 @@ +# UDP listener service input plugin + +The UDP listener is a service input plugin that listens for messages on a UDP +socket and adds those messages to InfluxDB. +The plugin expects messages in the +[Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). + +### Configuration: + +This is a sample configuration for the plugin. + +```toml +[[inputs.udp_listener]] + ## Address and port to host UDP listener on + service_address = ":8125" + + ## Number of UDP messages allowed to queue up. Once filled, the + ## UDP listener will start dropping packets. + allowed_pending_messages = 10000 + + ## UDP packet size for the server to listen for. This will depend + ## on the size of the packets that the client is sending, which is + ## usually 1500 bytes. + udp_packet_size = 1500 + + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +``` From 3b496ab3d88553a8bffad5a48c50ab23005e1be9 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 1 Mar 2016 14:53:55 +0000 Subject: [PATCH 097/287] udp listener: add os buffer size notes & change default port - using 8092 as the default port because it's similar to the rest of the TICK stack (InfluxDB, for example, uses 8083, 8086, 8088, etc.). didn't want to use 8125 because that conflicts with statsd. closes #758 --- plugins/inputs/udp_listener/README.md | 62 ++++++++++++++++++++- plugins/inputs/udp_listener/udp_listener.go | 4 +- 2 files changed, 63 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/udp_listener/README.md b/plugins/inputs/udp_listener/README.md index e2fe846f9..724ae43ae 100644 --- a/plugins/inputs/udp_listener/README.md +++ b/plugins/inputs/udp_listener/README.md @@ -12,7 +12,7 @@ This is a sample configuration for the plugin. ```toml [[inputs.udp_listener]] ## Address and port to host UDP listener on - service_address = ":8125" + service_address = ":8092" ## Number of UDP messages allowed to queue up. Once filled, the ## UDP listener will start dropping packets. @@ -29,3 +29,63 @@ This is a sample configuration for the plugin. ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ``` + +## A Note on UDP OS Buffer Sizes + +Some OSes (most notably, Linux) place very restricive limits on the performance +of UDP protocols. It is _highly_ recommended that you increase these OS limits to +at least 8MB before trying to run large amounts of UDP traffic to your instance. +8MB is just a recommendation, and can be adjusted higher. + +### Linux +Check the current UDP/IP receive buffer limit & default by typing the following +commands: + +``` +sysctl net.core.rmem_max +sysctl net.core.rmem_default +``` + +If the values are less than 8388608 bytes you should add the following lines to +the /etc/sysctl.conf file: + +``` +net.core.rmem_max=8388608 +net.core.rmem_default=8388608 +``` + +Changes to /etc/sysctl.conf do not take effect until reboot. +To update the values immediately, type the following commands as root: + +``` +sysctl -w net.core.rmem_max=8388608 +sysctl -w net.core.rmem_default=8388608 +``` + +### BSD/Darwin + +On BSD/Darwin systems you need to add about a 15% padding to the kernel limit +socket buffer. Meaning if you want an 8MB buffer (8388608 bytes) you need to set +the kernel limit to `8388608*1.15 = 9646900`. This is not documented anywhere but +happens +[in the kernel here.](https://github.com/freebsd/freebsd/blob/master/sys/kern/uipc_sockbuf.c#L63-L64) + +Check the current UDP/IP buffer limit by typing the following command: + +``` +sysctl kern.ipc.maxsockbuf +``` + +If the value is less than 9646900 bytes you should add the following lines +to the /etc/sysctl.conf file (create it if necessary): + +``` +kern.ipc.maxsockbuf=9646900 +``` + +Changes to /etc/sysctl.conf do not take effect until reboot. +To update the values immediately, type the following commands as root: + +``` +sysctl -w kern.ipc.maxsockbuf=9646900 +``` diff --git a/plugins/inputs/udp_listener/udp_listener.go b/plugins/inputs/udp_listener/udp_listener.go index c6d483d48..7aac3160c 100644 --- a/plugins/inputs/udp_listener/udp_listener.go +++ b/plugins/inputs/udp_listener/udp_listener.go @@ -32,7 +32,7 @@ var dropwarn = "ERROR: Message queue full. Discarding line [%s] " + const sampleConfig = ` ## Address and port to host UDP listener on - service_address = ":8125" + service_address = ":8092" ## Number of UDP messages allowed to queue up. Once filled, the ## UDP listener will start dropping packets. @@ -40,7 +40,7 @@ const sampleConfig = ` ## UDP packet size for the server to listen for. This will depend ## on the size of the packets that the client is sending, which is - ## usually 1500 bytes. + ## usually 1500 bytes, but can be as large as 65,535 bytes. udp_packet_size = 1500 ## Data format to consume. This can be "json", "influx" or "graphite" From 2945f9daa9d16303c1c304e11478e13ef1032532 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 1 Mar 2016 15:11:37 +0000 Subject: [PATCH 098/287] Changelog update --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b39e639f7..c390add59 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,11 +5,12 @@ ### Features - [#692](https://github.com/influxdata/telegraf/pull/770): Support InfluxDB retention policies - [#771](https://github.com/influxdata/telegraf/pull/771): Default timeouts for input plugns. Thanks @PierreF! +- [#758](https://github.com/influxdata/telegraf/pull/758): UDP Listener input plugin, thanks @whatyouhide! ### Bugfixes - [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":" - [#722](https://github.com/influxdata/telegraf/pull/722): Librato output plugin fixes. Thanks @chrusty! -- [#745](https://github.com/influxdata/telegraf/issues/745): Fix Telegraf toml parse panic on large config files. +- [#745](https://github.com/influxdata/telegraf/issues/745): Fix Telegraf toml parse panic on large config files. Thanks @titilambert! ## v0.10.4.1 From 74aaf4f75b75da743526192fbed3892630e71161 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 1 Mar 2016 15:46:29 +0000 Subject: [PATCH 099/287] Add udp listener to readme list of plugins --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index b4e661735..e9c20996a 100644 --- a/README.md +++ b/README.md @@ -218,6 +218,7 @@ Currently implemented sources: Telegraf can also collect metrics via the following service plugins: * statsd +* udp listener * mqtt_consumer * kafka_consumer * nats_consumer From b2a4d4a01898bfc30f78cf771893da059dd303cd Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 1 Mar 2016 17:13:26 +0000 Subject: [PATCH 100/287] Allow ssl option specification for httpjson plugin closes #769 --- CHANGELOG.md | 1 + plugins/inputs/httpjson/httpjson.go | 52 +++++++++++++++++++++--- plugins/inputs/httpjson/httpjson_test.go | 7 ++++ 3 files changed, 54 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c390add59..d2181787a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ - [#692](https://github.com/influxdata/telegraf/pull/770): Support InfluxDB retention policies - [#771](https://github.com/influxdata/telegraf/pull/771): Default timeouts for input plugns. Thanks @PierreF! - [#758](https://github.com/influxdata/telegraf/pull/758): UDP Listener input plugin, thanks @whatyouhide! +- [#769](https://github.com/influxdata/telegraf/issues/769): httpjson plugin: allow specifying SSL configuration. ### Bugfixes - [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":" diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index c07a9602a..061995892 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -11,6 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" ) @@ -23,6 +24,15 @@ type HttpJson struct { Parameters map[string]string Headers map[string]string + // Path to CA file + SSLCA string `toml:"ssl_ca"` + // Path to host cert file + SSLCert string `toml:"ssl_cert"` + // Path to cert key file + SSLKey string `toml:"ssl_key"` + // Use SSL but skip chain & host verification + InsecureSkipVerify bool + client HTTPClient } @@ -36,6 +46,9 @@ type HTTPClient interface { // http.Response: HTTP respons object // error : Any error that may have occurred MakeRequest(req *http.Request) (*http.Response, error) + + SetHTTPClient(client *http.Client) + HTTPClient() *http.Client } type RealHTTPClient struct { @@ -46,6 +59,14 @@ func (c RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) { return c.client.Do(req) } +func (c RealHTTPClient) SetHTTPClient(client *http.Client) { + c.client = client +} + +func (c RealHTTPClient) HTTPClient() *http.Client { + return c.client +} + var sampleConfig = ` ## NOTE This plugin only reads numerical measurements, strings and booleans ## will be ignored. @@ -77,6 +98,13 @@ var sampleConfig = ` # [inputs.httpjson.headers] # X-Auth-Token = "my-xauth-token" # apiVersion = "v1" + + ## Optional SSL Config + # ssl_ca = "/etc/telegraf/ca.pem" + # ssl_cert = "/etc/telegraf/cert.pem" + # ssl_key = "/etc/telegraf/key.pem" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false ` func (h *HttpJson) SampleConfig() string { @@ -91,6 +119,23 @@ func (h *HttpJson) Description() string { func (h *HttpJson) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup + if h.client.HTTPClient() == nil { + tlsCfg, err := internal.GetTLSConfig( + h.SSLCert, h.SSLKey, h.SSLCA, h.InsecureSkipVerify) + if err != nil { + return err + } + tr := &http.Transport{ + ResponseHeaderTimeout: time.Duration(3 * time.Second), + TLSClientConfig: tlsCfg, + } + client := &http.Client{ + Transport: tr, + Timeout: time.Duration(4 * time.Second), + } + h.client.SetHTTPClient(client) + } + errorChannel := make(chan error, len(h.Servers)) for _, server := range h.Servers { @@ -244,11 +289,6 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) { func init() { inputs.Add("httpjson", func() telegraf.Input { - tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)} - client := &http.Client{ - Transport: tr, - Timeout: time.Duration(4 * time.Second), - } - return &HttpJson{client: RealHTTPClient{client: client}} + return &HttpJson{client: RealHTTPClient{}} }) } diff --git a/plugins/inputs/httpjson/httpjson_test.go b/plugins/inputs/httpjson/httpjson_test.go index b6b57a167..1a1187d44 100644 --- a/plugins/inputs/httpjson/httpjson_test.go +++ b/plugins/inputs/httpjson/httpjson_test.go @@ -147,6 +147,13 @@ func (c mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) { return &resp, nil } +func (c mockHTTPClient) SetHTTPClient(_ *http.Client) { +} + +func (c mockHTTPClient) HTTPClient() *http.Client { + return nil +} + // Generates a pointer to an HttpJson object that uses a mock HTTP client. // Parameters: // response : Body of the response that the mock HTTP client should return From ed9937bbd86afebc451379deeefc76535169fe32 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 2 Mar 2016 10:23:01 +0000 Subject: [PATCH 101/287] Update all dependency hashes --- Godeps | 61 +++++++++++++++++++++++++++++----------------------------- 1 file changed, 31 insertions(+), 30 deletions(-) diff --git a/Godeps b/Godeps index 7389b1cb8..089860ed5 100644 --- a/Godeps +++ b/Godeps @@ -1,53 +1,54 @@ git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git 617c801af238c3af2d9e72c5d4a0f02edad03ce5 -github.com/Shopify/sarama d37c73f2b2bce85f7fa16b6a550d26c5372892ef -github.com/Sirupsen/logrus f7f79f729e0fbe2fcc061db48a9ba0263f588252 -github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339 -github.com/aws/aws-sdk-go 87b1e60a50b09e4812dee560b33a238f67305804 -github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d +github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9 +github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc +github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687 +github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857 +github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4 github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99 -github.com/dancannon/gorethink 6f088135ff288deb9d5546f4c71919207f891a70 +github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3 github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367 -github.com/fsouza/go-dockerclient 7b651349f9479f5114913eefbfd3c4eeddd79ab4 -github.com/go-ini/ini afbd495e5aaea13597b5e14fe514ddeaa4d76fc3 -github.com/go-sql-driver/mysql 7c7f556282622f94213bc028b4d0a7b6151ba239 -github.com/golang/protobuf 6aaa8d47701fa6cf07e914ec01fde3d4a1fe79c3 -github.com/golang/snappy 723cc1e459b8eea2dea4583200fd60757d40097a +github.com/fsouza/go-dockerclient a49c8269a6899cae30da1f8a4b82e0ce945f9967 +github.com/go-ini/ini 776aa739ce9373377cd16f526cdf06cb4c89b40f +github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee +github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032 +github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7 github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2 -github.com/gorilla/context 1c83b3eabd45b6d76072b66b746c20815fb2872d -github.com/gorilla/mux 26a6070f849969ba72b72256e9f14cf519751690 +github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a +github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478 github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da -github.com/influxdata/influxdb ef571fc104dc24b77cd3710c156cd95e5cfd7aa5 +github.com/influxdata/influxdb e3fef5593c21644f2b43af55d6e17e70910b0e48 github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0 -github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264 -github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38 -github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f +github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74 +github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720 +github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36 github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453 -github.com/miekg/dns e0d84d97e59bcb6561eae269c4e94d25b66822cb +github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504 github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b -github.com/nats-io/nats 6a83f1a633cfbfd90aa648ac99fb38c06a8b40df -github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f +github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3 +github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa +github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980 github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2 -github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f +github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831 github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 -github.com/prometheus/common 14ca1097bbe21584194c15e391a9dab95ad42a59 +github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37 github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8 github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f -github.com/shirou/gopsutil e77438504d45b9985c99a75730fe65220ceea00e +github.com/shirou/gopsutil 1de1357e7737a536c7f4ff6be7bd27977db4d2cb github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744 github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94 -github.com/stretchr/testify f390dcf405f7b83c997eac1b06768bb9f44dec18 +github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3 github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8 github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363 -golang.org/x/crypto 1f22c0103821b9390939b6776727195525381532 -golang.org/x/net 04b9de9b512f58addf28c9853d50ebef61c3953e -golang.org/x/text 6d3c22c4525a4da167968fa2479be5524d2e8bd0 -gopkg.in/dancannon/gorethink.v1 6f088135ff288deb9d5546f4c71919207f891a70 +golang.org/x/crypto 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3 +golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172 +golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34 +gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715 -gopkg.in/mgo.v2 03c9f3ee4c14c8e51ee521a6a7d0425658dd6f64 -gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4 +gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886 +gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4 From 8464be691ecdaddb73c8f4f87c1b1ab7347a1254 Mon Sep 17 00:00:00 2001 From: Pascal Larin Date: Wed, 2 Mar 2016 12:36:23 -0500 Subject: [PATCH 102/287] Username not set for mqtt_consumer plugin Username parameter for the mqtt_consumer plugin was not pass to the client because an incorrect empty check. closes #781 --- CHANGELOG.md | 1 + plugins/inputs/mqtt_consumer/mqtt_consumer.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d2181787a..02ddbc647 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ - [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":" - [#722](https://github.com/influxdata/telegraf/pull/722): Librato output plugin fixes. Thanks @chrusty! - [#745](https://github.com/influxdata/telegraf/issues/745): Fix Telegraf toml parse panic on large config files. Thanks @titilambert! +- [#781](https://github.com/influxdata/telegraf/pull/781): Fix mqtt_consumer username not being set. Thanks @chaton78! ## v0.10.4.1 diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 2d0fbef06..42cadfd60 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -181,7 +181,7 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { } user := m.Username - if user == "" { + if user != "" { opts.SetUsername(user) } password := m.Password From e5482a57255bc1edfa4aa6c5d0660d99b7995afb Mon Sep 17 00:00:00 2001 From: Manuel Sangoi Date: Thu, 3 Mar 2016 14:14:50 +0100 Subject: [PATCH 103/287] Do not ignore username option for mqtt output --- CHANGELOG.md | 1 + plugins/outputs/mqtt/mqtt.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 02ddbc647..47bd2e256 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ - [#722](https://github.com/influxdata/telegraf/pull/722): Librato output plugin fixes. Thanks @chrusty! - [#745](https://github.com/influxdata/telegraf/issues/745): Fix Telegraf toml parse panic on large config files. Thanks @titilambert! - [#781](https://github.com/influxdata/telegraf/pull/781): Fix mqtt_consumer username not being set. Thanks @chaton78! +- [#786](https://github.com/influxdata/telegraf/pull/786): Fix mqtt output username not being set. Thanks @msangoi! ## v0.10.4.1 diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index 6f8abe954..10c1b1a9e 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -172,7 +172,7 @@ func (m *MQTT) createOpts() (*paho.ClientOptions, error) { } user := m.Username - if user == "" { + if user != "" { opts.SetUsername(user) } password := m.Password From e81278b8006fc5615360a4d832e27ae1b0ff1970 Mon Sep 17 00:00:00 2001 From: Auke Willem Oosterhoff Date: Thu, 3 Mar 2016 14:14:09 +0100 Subject: [PATCH 104/287] Fix bug in sample code. closes #784 closes #785 --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index afbfbf088..68c9da478 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -80,7 +80,7 @@ func (s *Simple) SampleConfig() string { return "ok = true # indicate if everything is fine" } -func (s *Simple) Gather(acc inputs.Accumulator) error { +func (s *Simple) Gather(acc telegraf.Accumulator) error { if s.Ok { acc.Add("state", "pretty good", nil) } else { From 29d1cbb6736d4a7894a80057ae9dfef78c34771a Mon Sep 17 00:00:00 2001 From: Pierre Fersing Date: Wed, 2 Mar 2016 15:38:08 +0100 Subject: [PATCH 105/287] Reduce metric_buffer_limit to 1000 closes #780 --- etc/telegraf.conf | 2 +- etc/telegraf_windows.conf | 2 +- internal/config/config.go | 2 +- internal/models/running_output.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index f5a2b34dd..a6057ecd2 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -24,7 +24,7 @@ ## Telegraf will cache metric_buffer_limit metrics for each output, and will ## flush this buffer on a successful write. - metric_buffer_limit = 10000 + metric_buffer_limit = 1000 ## Flush the buffer whenever full, regardless of flush_interval. flush_buffer_when_full = true diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index 7e66cb209..9ce067c39 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -24,7 +24,7 @@ ## Telegraf will cache metric_buffer_limit metrics for each output, and will ## flush this buffer on a successful write. - metric_buffer_limit = 10000 + metric_buffer_limit = 1000 ## Flush the buffer whenever full, regardless of flush_interval. flush_buffer_when_full = true diff --git a/internal/config/config.go b/internal/config/config.go index a7a9eaab4..f64e0a56a 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -159,7 +159,7 @@ var header = `# Telegraf Configuration ## Telegraf will cache metric_buffer_limit metrics for each output, and will ## flush this buffer on a successful write. - metric_buffer_limit = 10000 + metric_buffer_limit = 1000 ## Flush the buffer whenever full, regardless of flush_interval. flush_buffer_when_full = true diff --git a/internal/models/running_output.go b/internal/models/running_output.go index 37b479dfb..9d111c757 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -10,7 +10,7 @@ import ( const ( // Default number of metrics kept between flushes. - DEFAULT_METRIC_BUFFER_LIMIT = 10000 + DEFAULT_METRIC_BUFFER_LIMIT = 1000 // Limit how many full metric buffers are kept due to failed writes. FULL_METRIC_BUFFERS_LIMIT = 100 From 03d37725a9a0662cf6cb70b7b35dd69c50eabe3f Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 3 Mar 2016 15:44:16 +0000 Subject: [PATCH 106/287] dns_query unit tests, require that field exists --- plugins/inputs/dns_query/dns_query_test.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/plugins/inputs/dns_query/dns_query_test.go b/plugins/inputs/dns_query/dns_query_test.go index 076db5fab..d7d267a59 100644 --- a/plugins/inputs/dns_query/dns_query_test.go +++ b/plugins/inputs/dns_query/dns_query_test.go @@ -1,11 +1,14 @@ package dns_query import ( - "github.com/influxdata/telegraf/testutil" - "github.com/miekg/dns" - "github.com/stretchr/testify/assert" "testing" "time" + + "github.com/influxdata/telegraf/testutil" + + "github.com/miekg/dns" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var servers = []string{"8.8.8.8"} @@ -21,7 +24,7 @@ func TestGathering(t *testing.T) { err := dnsConfig.Gather(&acc) assert.NoError(t, err) metric, ok := acc.Get("dns_query") - assert.True(t, ok) + require.True(t, ok) queryTime, _ := metric.Fields["query_time_ms"].(float64) assert.NotEqual(t, 0, queryTime) @@ -38,7 +41,7 @@ func TestGatheringMxRecord(t *testing.T) { err := dnsConfig.Gather(&acc) assert.NoError(t, err) metric, ok := acc.Get("dns_query") - assert.True(t, ok) + require.True(t, ok) queryTime, _ := metric.Fields["query_time_ms"].(float64) assert.NotEqual(t, 0, queryTime) @@ -61,7 +64,7 @@ func TestGatheringRootDomain(t *testing.T) { err := dnsConfig.Gather(&acc) assert.NoError(t, err) metric, ok := acc.Get("dns_query") - assert.True(t, ok) + require.True(t, ok) queryTime, _ := metric.Fields["query_time_ms"].(float64) fields["query_time_ms"] = queryTime @@ -84,7 +87,7 @@ func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) { err := dnsConfig.Gather(&acc) assert.NoError(t, err) metric, ok := acc.Get("dns_query") - assert.True(t, ok) + require.True(t, ok) queryTime, _ := metric.Fields["query_time_ms"].(float64) fields["query_time_ms"] = queryTime From ee7b225272655f70eb9b1ece606a9e5d61ec5b79 Mon Sep 17 00:00:00 2001 From: Thibault Cohen Date: Wed, 17 Feb 2016 00:08:20 -0500 Subject: [PATCH 107/287] Add snmp table feature #540 --- plugins/inputs/snmp/snmp.go | 322 +++++++++++++++++++++++++++++++++++- 1 file changed, 318 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 371bc2ad9..557753110 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -20,6 +20,8 @@ type Snmp struct { Host []Host Get []Data Bulk []Data + Table []Table + Subtable []Subtable SnmptranslateFile string } @@ -36,9 +38,51 @@ type Host struct { Collect []string // easy get oids GetOids []string + // Table + Table []HostTable // Oids getOids []Data bulkOids []Data + tables []HostTable +} + +type Table struct { + // name = "iftable" + Name string + // oid = ".1.3.6.1.2.1.31.1.1.1" + Oid string + //if empty get all instances + //mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" + MappingTable string + // if empty get all subtables + // sub_tables could be not "real subtables" + //sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] + SubTables []string +} + +type HostTable struct { + // name = "iftable" + Name string + // Includes only these instances + // include_instances = ["eth0", "eth1"] + IncludeInstances []string + // Excludes only these instances + // exclude_instances = ["eth20", "eth21"] + ExcludeInstances []string + // From Table struct + oid string + mappingTable string + subTables []string +} + +// TODO find better names +type Subtable struct { + //name = "bytes_send" + Name string + //oid = ".1.3.6.1.2.1.31.1.1.1.10" + Oid string + //unit = "octets" + Unit string } type Data struct { @@ -69,8 +113,12 @@ var initNode = Node{ subnodes: make(map[string]Node), } +var SubTableMap = make(map[string]Subtable) + var NameToOid = make(map[string]string) +var OidInstanceMapping = make(map[string]map[string]string) + var sampleConfig = ` ## Use 'oids.txt' file to translate oids to names ## To generate 'oids.txt' you need to run: @@ -113,7 +161,7 @@ var sampleConfig = ` [[inputs.snmp.get]] name = "interface_speed" oid = "ifSpeed" - instance = 0 + instance = "0" [[inputs.snmp.get]] name = "sysuptime" @@ -129,6 +177,52 @@ var sampleConfig = ` name = "ifoutoctets" max_repetition = 127 oid = "ifOutOctets" + + + [[inputs.snmp.host]] + address = "192.168.2.13:161" + #address = "127.0.0.1:161" + community = "public" + version = 2 + timeout = 2.0 + retries = 2 + #collect = ["mybulk", "sysservices", "sysdescr", "systype"] + collect = ["sysuptime" ] + [[inputs.snmp.host.table]] + name = "iftable3" + include_instances = ["enp5s0", "eth1"] + + # SNMP TABLEs + # table without mapping neither subtables + [[inputs.snmp.table]] + name = "iftable1" + oid = ".1.3.6.1.2.1.31.1.1.1" + + # table without mapping but with subtables + [[inputs.snmp.table]] + name = "iftable2" + oid = ".1.3.6.1.2.1.31.1.1.1" + sub_tables = [".1.3.6.1.2.1.2.2.1.13"] + + # table with mapping but without subtables + [[inputs.snmp.table]] + name = "iftable3" + oid = ".1.3.6.1.2.1.31.1.1.1" + # if empty. get all instances + mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" + # if empty, get all subtables + + # table with both mapping and subtables + [[inputs.snmp.table]] + name = "iftable4" + oid = ".1.3.6.1.2.1.31.1.1.1" + # if empty get all instances + mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" + # if empty get all subtables + # sub_tables could be not "real subtables" + sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] + + ` // SampleConfig returns sample configuration message @@ -189,6 +283,12 @@ func findnodename(node Node, ids []string) (string, string) { } func (s *Snmp) Gather(acc telegraf.Accumulator) error { + // Create subtables mapping + if len(SubTableMap) == 0 { + for _, sb := range s.Subtable { + SubTableMap[sb.Name] = sb + } + } // Create oid tree if s.SnmptranslateFile != "" && len(initNode.subnodes) == 0 { data, err := ioutil.ReadFile(s.SnmptranslateFile) @@ -273,6 +373,27 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { } } } + // Table + for _, hostTable := range host.Table { + for _, snmpTable := range s.Table { + if hostTable.Name == snmpTable.Name { + table := hostTable + table.oid = snmpTable.Oid + table.mappingTable = snmpTable.MappingTable + table.subTables = snmpTable.SubTables + host.tables = append(host.tables, table) + } + } + } + // Launch Mapping + // TODO save mapping and computed oids + // to do it only the first time + // only if len(OidInstanceMapping) == 0 + if len(OidInstanceMapping) >= 0 { + if err := host.SNMPMap(acc); err != nil { + return err + } + } // Launch Get requests if err := host.SNMPGet(acc); err != nil { return err @@ -284,6 +405,183 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { return nil } +func (h *Host) SNMPMap(acc telegraf.Accumulator) error { + // Get snmp client + snmpClient, err := h.GetSNMPClient() + if err != nil { + return err + } + // Deconnection + defer snmpClient.Conn.Close() + // Prepare OIDs + for _, table := range h.tables { + // We don't have mapping + if table.mappingTable == "" { + if len(table.subTables) == 0 { + // If We don't have mapping table + // neither subtables list + // This is just a bulk request + oid := Data{} + oid.Oid = table.oid + if val, ok := NameToOid[oid.Oid]; ok { + oid.rawOid = "." + val + } else { + oid.rawOid = oid.Oid + } + h.bulkOids = append(h.bulkOids, oid) + } else { + // If We don't have mapping table + // but we have subtables + // This is a bunch of bulk requests + // For each subtable ... + for _, sb := range table.subTables { + // ... we create a new Data (oid) object + oid := Data{} + // Looking for more information about this subtable + ssb, exists := SubTableMap[sb] + if exists { + // We found a subtable section in config files + oid.Oid = ssb.Oid + oid.rawOid = ssb.Oid + oid.Unit = ssb.Unit + } else { + // We did NOT find a subtable section in config files + oid.Oid = sb + oid.rawOid = sb + } + // TODO check oid validity + + // Add the new oid to getOids list + h.bulkOids = append(h.bulkOids, oid) + } + } + } else { + // We have a mapping table + // We need to query this table + // To get mapping between instance id + // and instance name + oid_asked := table.mappingTable + need_more_requests := true + // Set max repetition + maxRepetition := uint8(32) + // Launch requests + for need_more_requests { + // Launch request + result, err3 := snmpClient.GetBulk([]string{oid_asked}, 0, maxRepetition) + if err3 != nil { + return err3 + } + + lastOid := "" + for _, variable := range result.Variables { + lastOid = variable.Name + if strings.HasPrefix(variable.Name, oid_asked) { + switch variable.Type { + // handle instance names + case gosnmp.OctetString: + // Check if instance is in includes instances + getInstances := true + if len(table.IncludeInstances) > 0 { + getInstances = false + for _, instance := range table.IncludeInstances { + if instance == string(variable.Value.([]byte)) { + getInstances = true + } + } + } + // Check if instance is in excludes instances + if len(table.ExcludeInstances) > 0 { + getInstances = true + for _, instance := range table.ExcludeInstances { + if instance == string(variable.Value.([]byte)) { + getInstances = false + } + } + } + // We don't want this instance + if !getInstances { + continue + } + + // remove oid table from the complete oid + // in order to get the current instance id + key := strings.Replace(variable.Name, oid_asked, "", 1) + + if len(table.subTables) == 0 { + // We have a mapping table + // but no subtables + // This is just a bulk request + + // Building mapping table + mapping := map[string]string{strings.Trim(key, "."): string(variable.Value.([]byte))} + _, exists := OidInstanceMapping[table.oid] + if exists { + OidInstanceMapping[table.oid][strings.Trim(key, ".")] = string(variable.Value.([]byte)) + } else { + OidInstanceMapping[table.oid] = mapping + } + + // Add table oid in bulk oid list + oid := Data{} + oid.Oid = table.oid + if val, ok := NameToOid[oid.Oid]; ok { + oid.rawOid = "." + val + } else { + oid.rawOid = oid.Oid + } + h.bulkOids = append(h.bulkOids, oid) + } else { + // We have a mapping table + // and some subtables + // This is a bunch of get requests + // This is the best case :) + + // For each subtable ... + for _, sb := range table.subTables { + // ... we create a new Data (oid) object + oid := Data{} + // Looking for more information about this subtable + ssb, exists := SubTableMap[sb] + if exists { + // We found a subtable section in config files + oid.Oid = ssb.Oid + key + oid.rawOid = ssb.Oid + key + oid.Unit = ssb.Unit + oid.Instance = string(variable.Value.([]byte)) + } else { + // We did NOT find a subtable section in config files + oid.Oid = sb + key + oid.rawOid = sb + key + oid.Instance = string(variable.Value.([]byte)) + } + // TODO check oid validity + + // Add the new oid to getOids list + h.getOids = append(h.getOids, oid) + } + } + default: + } + } else { + break + } + } + // Determine if we need more requests + if strings.HasPrefix(lastOid, oid_asked) { + need_more_requests = true + } else { + need_more_requests = false + } + } + } + } + // Mapping finished + + // Create newoids based on mapping + + return nil +} + func (h *Host) SNMPGet(acc telegraf.Accumulator) error { // Get snmp client snmpClient, err := h.GetSNMPClient() @@ -431,11 +729,27 @@ func (h *Host) HandleResponse(oids map[string]Data, result *gosnmp.SnmpPacket, a // Get name and instance var oid_name string var instance string - // Get oidname and instannce from translate file + // Get oidname and instance from translate file oid_name, instance = findnodename(initNode, strings.Split(string(variable.Name[1:]), ".")) - - if instance != "" { + // Set instance tag + // From mapping table + mapping, inMappingNoSubTable := OidInstanceMapping[oid_key] + if inMappingNoSubTable { + // filter if the instance in not in + // OidInstanceMapping mapping map + if instance_name, exists := mapping[instance]; exists { + tags["instance"] = instance_name + } else { + continue + } + } else if oid.Instance != "" { + // From config files + tags["instance"] = oid.Instance + } else if instance != "" { + // Using last id of the current oid, ie: + // with .1.3.6.1.2.1.31.1.1.1.10.3 + // instance is 3 tags["instance"] = instance } From e5503c56ad754e5b23f60a1f783dc7cd18a5124f Mon Sep 17 00:00:00 2001 From: Thibault Cohen Date: Tue, 1 Mar 2016 19:45:01 -0500 Subject: [PATCH 108/287] Fix #773 --- plugins/inputs/snmp/snmp.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 557753110..3d4827fc1 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -44,6 +44,9 @@ type Host struct { getOids []Data bulkOids []Data tables []HostTable + // array of processed oids + // to skip oid duplication + processedOids []string } type Table struct { @@ -714,8 +717,15 @@ func (h *Host) HandleResponse(oids map[string]Data, result *gosnmp.SnmpPacket, a var lastOid string for _, variable := range result.Variables { lastOid = variable.Name - // Remove unwanted oid + nextresult: + // Get only oid wanted for oid_key, oid := range oids { + // Skip oids already processed + for _, processedOid := range h.processedOids { + if variable.Name == processedOid { + break nextresult + } + } if strings.HasPrefix(variable.Name, oid_key) { switch variable.Type { // handle Metrics @@ -767,6 +777,7 @@ func (h *Host) HandleResponse(oids map[string]Data, result *gosnmp.SnmpPacket, a fields := make(map[string]interface{}) fields[string(field_name)] = variable.Value + h.processedOids = append(h.processedOids, variable.Name) acc.AddFields(field_name, fields, tags) case gosnmp.NoSuchObject, gosnmp.NoSuchInstance: // Oid not found From 72027b5b3cd1dad23d6f3fe3508f93cf8711b041 Mon Sep 17 00:00:00 2001 From: Thibault Cohen Date: Tue, 1 Mar 2016 19:45:40 -0500 Subject: [PATCH 109/287] Add README.md for snmp input plugin closes #735 closes #773 closes #540 --- CHANGELOG.md | 2 + plugins/inputs/snmp/README.md | 549 ++++++++++++++++++++++++++++++++++ 2 files changed, 551 insertions(+) create mode 100644 plugins/inputs/snmp/README.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 47bd2e256..79cc0a235 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ - [#771](https://github.com/influxdata/telegraf/pull/771): Default timeouts for input plugns. Thanks @PierreF! - [#758](https://github.com/influxdata/telegraf/pull/758): UDP Listener input plugin, thanks @whatyouhide! - [#769](https://github.com/influxdata/telegraf/issues/769): httpjson plugin: allow specifying SSL configuration. +- [#735](https://github.com/influxdata/telegraf/pull/735): SNMP Table feature. Thanks @titilambert! ### Bugfixes - [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":" @@ -14,6 +15,7 @@ - [#745](https://github.com/influxdata/telegraf/issues/745): Fix Telegraf toml parse panic on large config files. Thanks @titilambert! - [#781](https://github.com/influxdata/telegraf/pull/781): Fix mqtt_consumer username not being set. Thanks @chaton78! - [#786](https://github.com/influxdata/telegraf/pull/786): Fix mqtt output username not being set. Thanks @msangoi! +- [#773](https://github.com/influxdata/telegraf/issues/773): Fix duplicate measurements in snmp plugin. Thanks @titilambert! ## v0.10.4.1 diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md new file mode 100644 index 000000000..ee6d17857 --- /dev/null +++ b/plugins/inputs/snmp/README.md @@ -0,0 +1,549 @@ +# SNMP Input Plugin + +The SNMP input plugin gathers metrics from SNMP agents + +### Configuration: + + +#### Very simple example + +In this example, the plugin will gather value of OIDS: + + - `.1.3.6.1.2.1.2.2.1.4.1` + +```toml +# Very Simple Example +[[inputs.snmp]] + + [[inputs.snmp.host]] + address = "127.0.0.1:161" + # SNMP community + community = "public" # default public + # SNMP version (1, 2 or 3) + # Version 3 not supported yet + version = 2 # default 2 + # Simple list of OIDs to get, in addition to "collect" + get_oids = [".1.3.6.1.2.1.2.2.1.4.1"] +``` + + +#### Simple example + +In this example, Telegraf gathers value of OIDS: + + - named **ifnumber** + - named **interface_speed** + +With **inputs.snmp.get** section the plugin gets the oid number: + + - **ifnumber** => `.1.3.6.1.2.1.2.1.0` + - **interface_speed** => *ifSpeed* + +As you can see *ifSpeed* is not a valid OID. In order to get +the valid OID, the plugin uses `snmptranslate_file` to match the OID: + + - **ifnumber** => `.1.3.6.1.2.1.2.1.0` + - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5` + +Also as the plugin will append `instance` to the corresponding OID: + + - **ifnumber** => `.1.3.6.1.2.1.2.1.0` + - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1` + +In this example, the plugin will gather value of OIDS: + +- `.1.3.6.1.2.1.2.1.0` +- `.1.3.6.1.2.1.2.2.1.5.1` + + +```toml +# Simple example +[[inputs.snmp]] + ## Use 'oids.txt' file to translate oids to names + ## To generate 'oids.txt' you need to run: + ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt + ## Or if you have an other MIB folder with custom MIBs + ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt + snmptranslate_file = "/tmp/oids.txt" + [[inputs.snmp.host]] + address = "127.0.0.1:161" + # SNMP community + community = "public" # default public + # SNMP version (1, 2 or 3) + # Version 3 not supported yet + version = 2 # default 2 + # Which get/bulk do you want to collect for this host + collect = ["ifnumber", "interface_speed"] + + [[inputs.snmp.get]] + name = "ifnumber" + oid = ".1.3.6.1.2.1.2.1.0" + + [[inputs.snmp.get]] + name = "interface_speed" + oid = "ifSpeed" + instance = "1" + +``` + + +#### Simple bulk example + +In this example, Telegraf gathers value of OIDS: + + - named **ifnumber** + - named **interface_speed** + - named **if_out_octets** + +With **inputs.snmp.get** section the plugin gets oid number: + + - **ifnumber** => `.1.3.6.1.2.1.2.1.0` + - **interface_speed** => *ifSpeed* + +With **inputs.snmp.bulk** section the plugin gets the oid number: + + - **if_out_octets** => *ifOutOctets* + +As you can see *ifSpeed* and *ifOutOctets* are not a valid OID. +In order to get the valid OID, the plugin uses `snmptranslate_file` +to match the OID: + + - **ifnumber** => `.1.3.6.1.2.1.2.1.0` + - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5` + - **if_out_octets** => *ifOutOctets* => `.1.3.6.1.2.1.2.2.1.16` + +Also, the plugin will append `instance` to the corresponding OID: + + - **ifnumber** => `.1.3.6.1.2.1.2.1.0` + - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1` + +And **if_out_octets** is a bulk request, the plugin will gathers all +OIDS in the table. + +- `.1.3.6.1.2.1.2.2.1.16.1` +- `.1.3.6.1.2.1.2.2.1.16.2` +- `.1.3.6.1.2.1.2.2.1.16.3` +- `.1.3.6.1.2.1.2.2.1.16.4` +- `.1.3.6.1.2.1.2.2.1.16.5` +- `...` + +In this example, the plugin will gather value of OIDS: + +- `.1.3.6.1.2.1.2.1.0` +- `.1.3.6.1.2.1.2.2.1.5.1` +- `.1.3.6.1.2.1.2.2.1.16.1` +- `.1.3.6.1.2.1.2.2.1.16.2` +- `.1.3.6.1.2.1.2.2.1.16.3` +- `.1.3.6.1.2.1.2.2.1.16.4` +- `.1.3.6.1.2.1.2.2.1.16.5` +- `...` + + +```toml +# Simple bulk example +[[inputs.snmp]] + ## Use 'oids.txt' file to translate oids to names + ## To generate 'oids.txt' you need to run: + ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt + ## Or if you have an other MIB folder with custom MIBs + ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt + snmptranslate_file = "/tmp/oids.txt" + [[inputs.snmp.host]] + address = "127.0.0.1:161" + # SNMP community + community = "public" # default public + # SNMP version (1, 2 or 3) + # Version 3 not supported yet + version = 2 # default 2 + # Which get/bulk do you want to collect for this host + collect = ["interface_speed", "if_number", "if_out_octets"] + + [[inputs.snmp.get]] + name = "interface_speed" + oid = "ifSpeed" + instance = "1" + + [[inputs.snmp.get]] + name = "if_number" + oid = "ifNumber" + + [[inputs.snmp.bulk]] + name = "if_out_octets" + oid = "ifOutOctets" +``` + + +#### Table example + +In this example, we remove collect attribute to the host section, +but you can still use it in combination of the following part. + +Note: This example is like a bulk request a but using an +other configuration + +Telegraf gathers value of OIDS of the table: + + - named **iftable1** + +With **inputs.snmp.table** section the plugin gets oid number: + + - **iftable1** => `.1.3.6.1.2.1.31.1.1.1` + +Also **iftable1** is a table, the plugin will gathers all +OIDS in the table and in the subtables + +- `.1.3.6.1.2.1.31.1.1.1.1` +- `.1.3.6.1.2.1.31.1.1.1.1.1` +- `.1.3.6.1.2.1.31.1.1.1.1.2` +- `.1.3.6.1.2.1.31.1.1.1.1.3` +- `.1.3.6.1.2.1.31.1.1.1.1.4` +- `.1.3.6.1.2.1.31.1.1.1.1....` +- `.1.3.6.1.2.1.31.1.1.1.2` +- `.1.3.6.1.2.1.31.1.1.1.2....` +- `.1.3.6.1.2.1.31.1.1.1.3` +- `.1.3.6.1.2.1.31.1.1.1.3....` +- `.1.3.6.1.2.1.31.1.1.1.4` +- `.1.3.6.1.2.1.31.1.1.1.4....` +- `.1.3.6.1.2.1.31.1.1.1.5` +- `.1.3.6.1.2.1.31.1.1.1.5....` +- `.1.3.6.1.2.1.31.1.1.1.6....` +- `...` + +```toml +# Table example +[[inputs.snmp]] + ## Use 'oids.txt' file to translate oids to names + ## To generate 'oids.txt' you need to run: + ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt + ## Or if you have an other MIB folder with custom MIBs + ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt + snmptranslate_file = "/tmp/oids.txt" + [[inputs.snmp.host]] + address = "127.0.0.1:161" + # SNMP community + community = "public" # default public + # SNMP version (1, 2 or 3) + # Version 3 not supported yet + version = 2 # default 2 + # Which get/bulk do you want to collect for this host + # Which table do you want to collect + [[inputs.snmp.host.table]] + name = "iftable1" + + # table without mapping neither subtables + # This is like bulk request + [[inputs.snmp.table]] + name = "iftable1" + oid = ".1.3.6.1.2.1.31.1.1.1" +``` + + +#### Table with subtable example + +In this example, we remove collect attribute to the host section, +but you can still use it in combination of the following part. + +Note: This example is like a bulk request a but using an +other configuration + +Telegraf gathers value of OIDS of the table: + + - named **iftable2** + +With **inputs.snmp.table** section *AND* **sub_tables** attribute, +the plugin will get OIDS from subtables: + + - **iftable2** => `.1.3.6.1.2.1.2.2.1.13` + +Also **iftable2** is a table, the plugin will gathers all +OIDS in subtables: + +- `.1.3.6.1.2.1.2.2.1.13.1` +- `.1.3.6.1.2.1.2.2.1.13.2` +- `.1.3.6.1.2.1.2.2.1.13.3` +- `.1.3.6.1.2.1.2.2.1.13.4` +- `.1.3.6.1.2.1.2.2.1.13....` + + +```toml +# Table with subtable example +[[inputs.snmp]] + ## Use 'oids.txt' file to translate oids to names + ## To generate 'oids.txt' you need to run: + ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt + ## Or if you have an other MIB folder with custom MIBs + ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt + snmptranslate_file = "/tmp/oids.txt" + [[inputs.snmp.host]] + address = "127.0.0.1:161" + # SNMP community + community = "public" # default public + # SNMP version (1, 2 or 3) + # Version 3 not supported yet + version = 2 # default 2 + # Which table do you want to collect + [[inputs.snmp.host.table]] + name = "iftable2" + + # table without mapping but with subtables + [[inputs.snmp.table]] + name = "iftable2" + sub_tables = [".1.3.6.1.2.1.2.2.1.13"] + # note + # oid attribute is useless +``` + + +#### Table with mapping example + +In this example, we remove collect attribute to the host section, +but you can still use it in combination of the following part. + +Telegraf gathers value of OIDS of the table: + + - named **iftable3** + +With **inputs.snmp.table** section the plugin gets oid number: + + - **iftable3** => `.1.3.6.1.2.1.31.1.1.1` + +Also **iftable2** is a table, the plugin will gathers all +OIDS in the table and in the subtables + +- `.1.3.6.1.2.1.31.1.1.1.1` +- `.1.3.6.1.2.1.31.1.1.1.1.1` +- `.1.3.6.1.2.1.31.1.1.1.1.2` +- `.1.3.6.1.2.1.31.1.1.1.1.3` +- `.1.3.6.1.2.1.31.1.1.1.1.4` +- `.1.3.6.1.2.1.31.1.1.1.1....` +- `.1.3.6.1.2.1.31.1.1.1.2` +- `.1.3.6.1.2.1.31.1.1.1.2....` +- `.1.3.6.1.2.1.31.1.1.1.3` +- `.1.3.6.1.2.1.31.1.1.1.3....` +- `.1.3.6.1.2.1.31.1.1.1.4` +- `.1.3.6.1.2.1.31.1.1.1.4....` +- `.1.3.6.1.2.1.31.1.1.1.5` +- `.1.3.6.1.2.1.31.1.1.1.5....` +- `.1.3.6.1.2.1.31.1.1.1.6....` +- `...` + +But the **include_instances** attribute will filter which OIDS +will be gathered; As you see, there is an other attribute, `mapping_table`. +`include_instances` and `mapping_table` permit to build a hash table +to filter only OIDS you want. +Let's say, we have the following data on SNMP server: + - OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0` + - OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1` + - OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2` + - OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0` + - OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1` + +The plugin will build the following hash table: + +| instance name | instance id | +|---------------|-------------| +| `enp5s0` | `1` | +| `enp5s1` | `2` | +| `enp5s2` | `3` | +| `eth0` | `4` | +| `eth1` | `5` | + +With the **include_instances** attribute, the plugin will gather +the following OIDS: + +- `.1.3.6.1.2.1.31.1.1.1.1.1` +- `.1.3.6.1.2.1.31.1.1.1.1.5` +- `.1.3.6.1.2.1.31.1.1.1.2.1` +- `.1.3.6.1.2.1.31.1.1.1.2.5` +- `.1.3.6.1.2.1.31.1.1.1.3.1` +- `.1.3.6.1.2.1.31.1.1.1.3.5` +- `.1.3.6.1.2.1.31.1.1.1.4.1` +- `.1.3.6.1.2.1.31.1.1.1.4.5` +- `.1.3.6.1.2.1.31.1.1.1.5.1` +- `.1.3.6.1.2.1.31.1.1.1.5.5` +- `.1.3.6.1.2.1.31.1.1.1.6.1` +- `.1.3.6.1.2.1.31.1.1.1.6.5` +- `...` + +Note: the plugin will add instance name as tag *instance* + +```toml +# Simple table with mapping example +[[inputs.snmp]] + ## Use 'oids.txt' file to translate oids to names + ## To generate 'oids.txt' you need to run: + ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt + ## Or if you have an other MIB folder with custom MIBs + ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt + snmptranslate_file = "/tmp/oids.txt" + [[inputs.snmp.host]] + address = "127.0.0.1:161" + # SNMP community + community = "public" # default public + # SNMP version (1, 2 or 3) + # Version 3 not supported yet + version = 2 # default 2 + # Which table do you want to collect + [[inputs.snmp.host.table]] + name = "iftable3" + include_instances = ["enp5s0", "eth1"] + + # table with mapping but without subtables + [[inputs.snmp.table]] + name = "iftable3" + oid = ".1.3.6.1.2.1.31.1.1.1" + # if empty. get all instances + mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" + # if empty, get all subtables +``` + + +#### Table with both mapping and subtable example + +In this example, we remove collect attribute to the host section, +but you can still use it in combination of the following part. + +Telegraf gathers value of OIDS of the table: + + - named **iftable4** + +With **inputs.snmp.table** section *AND* **sub_tables** attribute, +the plugin will get OIDS from subtables: + + - **iftable4** => `.1.3.6.1.2.1.31.1.1.1` + +Also **iftable2** is a table, the plugin will gathers all +OIDS in the table and in the subtables + +- `.1.3.6.1.2.1.31.1.1.1.6.1 +- `.1.3.6.1.2.1.31.1.1.1.6.2` +- `.1.3.6.1.2.1.31.1.1.1.6.3` +- `.1.3.6.1.2.1.31.1.1.1.6.4` +- `.1.3.6.1.2.1.31.1.1.1.6....` +- `.1.3.6.1.2.1.31.1.1.1.10.1` +- `.1.3.6.1.2.1.31.1.1.1.10.2` +- `.1.3.6.1.2.1.31.1.1.1.10.3` +- `.1.3.6.1.2.1.31.1.1.1.10.4` +- `.1.3.6.1.2.1.31.1.1.1.10....` + +But the **include_instances** attribute will filter which OIDS +will be gathered; As you see, there is an other attribute, `mapping_table`. +`include_instances` and `mapping_table` permit to build a hash table +to filter only OIDS you want. +Let's say, we have the following data on SNMP server: + - OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0` + - OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1` + - OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2` + - OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0` + - OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1` + +The plugin will build the following hash table: + +| instance name | instance id | +|---------------|-------------| +| `enp5s0` | `1` | +| `enp5s1` | `2` | +| `enp5s2` | `3` | +| `eth0` | `4` | +| `eth1` | `5` | + +With the **include_instances** attribute, the plugin will gather +the following OIDS: + +- `.1.3.6.1.2.1.31.1.1.1.6.1` +- `.1.3.6.1.2.1.31.1.1.1.6.5` +- `.1.3.6.1.2.1.31.1.1.1.10.1` +- `.1.3.6.1.2.1.31.1.1.1.10.5` + +Note: the plugin will add instance name as tag *instance* + + + +```toml +# Table with both mapping and subtable example +[[inputs.snmp]] + ## Use 'oids.txt' file to translate oids to names + ## To generate 'oids.txt' you need to run: + ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt + ## Or if you have an other MIB folder with custom MIBs + ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt + snmptranslate_file = "/tmp/oids.txt" + [[inputs.snmp.host]] + address = "127.0.0.1:161" + # SNMP community + community = "public" # default public + # SNMP version (1, 2 or 3) + # Version 3 not supported yet + version = 2 # default 2 + # Which table do you want to collect + [[inputs.snmp.host.table]] + name = "iftable4" + include_instances = ["enp5s0", "eth1"] + + # table with both mapping and subtables + [[inputs.snmp.table]] + name = "iftable4" + # if empty get all instances + mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" + # if empty get all subtables + # sub_tables could be not "real subtables" + sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] + # note + # oid attribute is useless + + # SNMP SUBTABLES + [[plugins.snmp.subtable]] + name = "bytes_recv" + oid = ".1.3.6.1.2.1.31.1.1.1.6" + unit = "octets" + + [[plugins.snmp.subtable]] + name = "bytes_send" + oid = ".1.3.6.1.2.1.31.1.1.1.10" + unit = "octets" +``` + +#### Configuration notes + +- In **plugins.snmp.table** section, the `oid` attribute is useless if + the `sub_tables` attributes is defined + +- In **plugins.snmp.subtable** section, you can put a name from `snmptranslate_file` + as `oid` attribute instead of a valid OID + +### Measurements & Fields: + +With the last example (Table with both mapping and subtable example): + +- ifHCOutOctets + - ifHCOutOctets +- ifInDiscards + - ifInDiscards +- ifHCInOctets + - ifHCInOctets + +### Tags: + +With the last example (Table with both mapping and subtable example): + +- ifHCOutOctets + - host + - instance + - unit +- ifInDiscards + - host + - instance +- ifHCInOctets + - host + - instance + - unit + +### Example Output: + +With the last example (Table with both mapping and subtable example): + +``` +ifHCOutOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCOutOctets=10565628i 1456878706044462901 +ifInDiscards,host=127.0.0.1,instance=enp5s0 ifInDiscards=0i 1456878706044510264 +ifHCInOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCInOctets=76351777i 1456878706044531312 +``` From a77bfecb0261dcd740acd3df6b149f7317bffb74 Mon Sep 17 00:00:00 2001 From: Ross McDonald Date: Wed, 2 Mar 2016 09:17:15 -0600 Subject: [PATCH 110/287] Updates to build script to improve ARM builds and other functionality. --- scripts/build.py | 624 +++++++++++++++++++++++++++++------------------ 1 file changed, 382 insertions(+), 242 deletions(-) diff --git a/scripts/build.py b/scripts/build.py index 0f3007cfa..6c9302158 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -1,11 +1,4 @@ -#!/usr/bin/env python -# -# This is the Telegraf build script. -# -# Current caveats: -# - Does not checkout the correct commit/branch (for now, you will need to do so manually) -# - Has external dependencies for packaging (fpm) and uploading (boto) -# +#!/usr/bin/python -u import sys import os @@ -19,7 +12,12 @@ import re debug = False -# PACKAGING VARIABLES +################ +#### Telegraf Variables +################ + +# Packaging variables +PACKAGE_NAME = "telegraf" INSTALL_ROOT_DIR = "/usr/bin" LOG_DIR = "/var/log/telegraf" SCRIPT_DIR = "/usr/lib/telegraf/scripts" @@ -34,6 +32,14 @@ DEFAULT_WINDOWS_CONFIG = "etc/telegraf_windows.conf" POSTINST_SCRIPT = "scripts/post-install.sh" PREINST_SCRIPT = "scripts/pre-install.sh" +# Default AWS S3 bucket for uploads +DEFAULT_BUCKET = "get.influxdb.org/telegraf" + +CONFIGURATION_FILES = [ + CONFIG_DIR + '/telegraf.conf', + LOGROTATE_DIR + '/telegraf', +] + # META-PACKAGE VARIABLES PACKAGE_LICENSE = "MIT" PACKAGE_URL = "https://github.com/influxdata/telegraf" @@ -43,7 +49,8 @@ DESCRIPTION = "Plugin-driven server agent for reporting metrics into InfluxDB." # SCRIPT START prereqs = [ 'git', 'go' ] -optional_prereqs = [ 'fpm', 'rpmbuild' ] +go_vet_command = "go tool vet -composites=true ./" +optional_prereqs = [ 'gvm', 'fpm', 'rpmbuild' ] fpm_common_args = "-f -s dir --log error \ --vendor {} \ @@ -66,30 +73,79 @@ fpm_common_args = "-f -s dir --log error \ DESCRIPTION) targets = { - 'telegraf' : './cmd/telegraf/telegraf.go', + 'telegraf' : './cmd/telegraf', } supported_builds = { - 'darwin': [ "amd64", "i386" ], - 'windows': [ "amd64", "i386" ], - 'linux': [ "amd64", "i386", "arm" ], - 'freebsd': [ "amd64" ] + "darwin": [ "amd64", "i386" ], + "windows": [ "amd64", "i386" ], + "linux": [ "amd64", "i386", "armhf", "armel", "arm64" ], + "freebsd": [ "amd64" ] } + supported_packages = { - "darwin": [ "tar", "zip" ], - "linux": [ "deb", "rpm", "tar", "zip" ], + "darwin": [ "tar" ], + "linux": [ "deb", "rpm", "tar" ], "windows": [ "zip" ], - 'freebsd': [ "tar" ] + "freebsd": [ "tar" ] } + supported_tags = { # "linux": { # "amd64": ["sensors"] # } } + prereq_cmds = { # "linux": "sudo apt-get install lm-sensors libsensors4-dev" } +################ +#### Telegraf Functions +################ + +def create_package_fs(build_root): + print("Creating a filesystem hierarchy from directory: {}".format(build_root)) + # Using [1:] for the path names due to them being absolute + # (will overwrite previous paths, per 'os.path.join' documentation) + dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:] ] + for d in dirs: + create_dir(os.path.join(build_root, d)) + os.chmod(os.path.join(build_root, d), 0o755) + +def package_scripts(build_root, windows=False): + print("Copying scripts and sample configuration to build directory") + if windows: + shutil.copyfile(DEFAULT_WINDOWS_CONFIG, os.path.join(build_root, "telegraf.conf")) + os.chmod(os.path.join(build_root, "telegraf.conf"), 0o644) + else: + shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1])) + os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644) + shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1])) + os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644) + shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf")) + os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0o644) + shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf")) + os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0o644) + +def run_generate(): + # NOOP for Telegraf + return True + +def go_get(branch, update=False, no_stash=False): + if not check_path_for("gdm"): + print("Downloading `gdm`...") + get_command = "go get github.com/sparrc/gdm" + run(get_command) + print("Retrieving dependencies with `gdm`...") + run("{}/bin/gdm restore -f Godeps_windows".format(os.environ.get("GOPATH"))) + run("{}/bin/gdm restore".format(os.environ.get("GOPATH"))) + return True + +################ +#### All Telegraf-specific content above this line +################ + def run(command, allow_failure=False, shell=False): out = None if debug: @@ -100,6 +156,8 @@ def run(command, allow_failure=False, shell=False): else: out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT) out = out.decode("utf8") + if debug: + print("[DEBUG] command output: {}".format(out)) except subprocess.CalledProcessError as e: print("") print("") @@ -129,16 +187,32 @@ def run(command, allow_failure=False, shell=False): else: return out -def create_temp_dir(prefix=None): +def create_temp_dir(prefix = None): if prefix is None: - return tempfile.mkdtemp(prefix="telegraf-build.") + return tempfile.mkdtemp(prefix="{}-build.".format(PACKAGE_NAME)) else: return tempfile.mkdtemp(prefix=prefix) +def get_current_version_tag(): + version = run("git describe --always --tags --abbrev=0").strip() + return version + def get_current_version(): - command = "git describe --always --tags --abbrev=0" - out = run(command) - return out.strip() + version_tag = get_current_version_tag() + if version_tag[0] == 'v': + # Remove leading 'v' and possible '-rc\d+' + version = re.sub(r'-rc\d+', '', version_tag[1:]) + else: + version = re.sub(r'-rc\d+', '', version_tag) + return version + +def get_current_rc(): + rc = None + version_tag = get_current_version_tag() + matches = re.match(r'.*-rc(\d+)', version_tag) + if matches: + rc, = matches.groups(1) + return rc def get_current_commit(short=False): command = None @@ -183,56 +257,61 @@ def check_path_for(b): if os.path.isfile(full_path) and os.access(full_path, os.X_OK): return full_path -def check_environ(build_dir = None): - print("\nChecking environment:") +def check_environ(build_dir=None): + print("") + print("Checking environment:") for v in [ "GOPATH", "GOBIN", "GOROOT" ]: - print("\t- {} -> {}".format(v, os.environ.get(v))) + print("- {} -> {}".format(v, os.environ.get(v))) cwd = os.getcwd() - if build_dir == None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd: - print("\n!! WARNING: Your current directory is not under your GOPATH. This may lead to build failures.") + if build_dir is None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd: + print("!! WARNING: Your current directory is not under your GOPATH. This may lead to build failures.") def check_prereqs(): - print("\nChecking for dependencies:") + print("") + print("Checking for dependencies:") for req in prereqs: path = check_path_for(req) - if path is None: - path = '?' - print("\t- {} -> {}".format(req, path)) + if path: + print("- {} -> {}".format(req, path)) + else: + print("- {} -> ?".format(req)) for req in optional_prereqs: path = check_path_for(req) - if path is None: - path = '?' - print("\t- {} (optional) -> {}".format(req, path)) + if path: + print("- {} (optional) -> {}".format(req, path)) + else: + print("- {} (optional) -> ?".format(req)) print("") + return True def upload_packages(packages, bucket_name=None, nightly=False): if debug: - print("[DEBUG] upload_packags: {}".format(packages)) + print("[DEBUG] upload_packages: {}".format(packages)) try: import boto from boto.s3.key import Key except ImportError: - print "!! Cannot upload packages without the 'boto' python library." + print("!! Cannot upload packages without the 'boto' Python library.") return 1 - print("Uploading packages to S3...") - print("") + print("Connecting to S3...".format(bucket_name)) c = boto.connect_s3() if bucket_name is None: - bucket_name = 'get.influxdb.org/telegraf' + bucket_name = DEFAULT_BUCKET bucket = c.get_bucket(bucket_name.split('/')[0]) - print("\t - Using bucket: {}".format(bucket_name)) + print("Using bucket: {}".format(bucket_name)) for p in packages: if '/' in bucket_name: # Allow for nested paths within the bucket name (ex: - # bucket/telegraf). Assuming forward-slashes as path + # bucket/folder). Assuming forward-slashes as path # delimiter. name = os.path.join('/'.join(bucket_name.split('/')[1:]), os.path.basename(p)) else: name = os.path.basename(p) if bucket.get_key(name) is None or nightly: - print("\t - Uploading {} to {}...".format(name, bucket_name)) + print("Uploading {}...".format(name)) + sys.stdout.flush() k = Key(bucket) k.key = name if nightly: @@ -241,8 +320,54 @@ def upload_packages(packages, bucket_name=None, nightly=False): n = k.set_contents_from_filename(p, replace=False) k.make_public() else: - print("\t - Not uploading {}, already exists.".format(p)) + print("!! Not uploading package {}, as it already exists.".format(p)) print("") + return 0 + +def run_tests(race, parallel, timeout, no_vet): + print("Downloading vet tool...") + sys.stdout.flush() + run("go get golang.org/x/tools/cmd/vet") + print("Running tests:") + print("\tRace: {}".format(race)) + if parallel is not None: + print("\tParallel: {}".format(parallel)) + if timeout is not None: + print("\tTimeout: {}".format(timeout)) + sys.stdout.flush() + p = subprocess.Popen(["go", "fmt", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = p.communicate() + if len(out) > 0 or len(err) > 0: + print("Code not formatted. Please use 'go fmt ./...' to fix formatting errors.") + print(out) + print(err) + return False + if not no_vet: + p = subprocess.Popen(go_vet_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = p.communicate() + if len(out) > 0 or len(err) > 0: + print("Go vet failed. Please run 'go vet ./...' and fix any errors.") + print(out) + print(err) + return False + else: + print("Skipping go vet ...") + sys.stdout.flush() + test_command = "go test -v" + if race: + test_command += " -race" + if parallel is not None: + test_command += " -parallel {}".format(parallel) + if timeout is not None: + test_command += " -timeout {}".format(timeout) + test_command += " ./..." + code = os.system(test_command) + if code != 0: + print("Tests Failed") + return False + else: + print("Tests Passed") + return True def build(version=None, branch=None, @@ -253,22 +378,18 @@ def build(version=None, rc=None, race=False, clean=False, - outdir=".", - goarm_version="6"): - print("-------------------------") - print("") - print("Build plan:") - print("\t- version: {}".format(version)) + outdir="."): + print("\n-------------------------\n") + print("Build Plan:") + print("- version: {}".format(version)) if rc: - print("\t- release candidate: {}".format(rc)) - print("\t- commit: {}".format(commit)) - print("\t- branch: {}".format(branch)) - print("\t- platform: {}".format(platform)) - print("\t- arch: {}".format(arch)) - if arch == 'arm' and goarm_version: - print("\t- ARM version: {}".format(goarm_version)) - print("\t- nightly? {}".format(str(nightly).lower())) - print("\t- race enabled? {}".format(str(race).lower())) + print("- release candidate: {}".format(rc)) + print("- commit: {}".format(get_current_commit(short=True))) + print("- branch: {}".format(get_current_branch())) + print("- platform: {}".format(platform)) + print("- arch: {}".format(arch)) + print("- nightly? {}".format(str(nightly).lower())) + print("- race enabled? {}".format(str(race).lower())) print("") if not os.path.exists(outdir): @@ -282,45 +403,49 @@ def build(version=None, # If a release candidate, update the version information accordingly version = "{}rc{}".format(version, rc) - # Set architecture to something that Go expects - if arch == 'i386': - arch = '386' - elif arch == 'x86_64': - arch = 'amd64' - print("Starting build...") + tmp_build_dir = create_temp_dir() for b, c in targets.items(): - if platform == 'windows': - b = b + '.exe' - print("\t- Building '{}'...".format(os.path.join(outdir, b))) + print("Building '{}'...".format(os.path.join(outdir, b))) build_command = "" - build_command += "GOOS={} GOARCH={} ".format(platform, arch) - if arch == "arm" and goarm_version: - if goarm_version not in ["5", "6", "7", "arm64"]: - print("!! Invalid ARM build version: {}".format(goarm_version)) - build_command += "GOARM={} ".format(goarm_version) - build_command += "go build -o {} ".format(os.path.join(outdir, b)) + if "arm" in arch: + build_command += "GOOS={} GOARCH={} ".format(platform, "arm") + else: + if arch == 'i386': + arch = '386' + elif arch == 'x86_64': + arch = 'amd64' + build_command += "GOOS={} GOARCH={} ".format(platform, arch) + if "arm" in arch: + if arch == "armel": + build_command += "GOARM=5 " + elif arch == "armhf" or arch == "arm": + build_command += "GOARM=6 " + elif arch == "arm64": + build_command += "GOARM=7 " + else: + print("!! Invalid ARM architecture specifed: {}".format(arch)) + print("Please specify either 'armel', 'armhf', or 'arm64'") + return 1 + if platform == 'windows': + build_command += "go build -o {} ".format(os.path.join(outdir, b + '.exe')) + else: + build_command += "go build -o {} ".format(os.path.join(outdir, b)) if race: build_command += "-race " - if platform in supported_tags: - if arch in supported_tags[platform]: - build_tags = supported_tags[platform][arch] - for build_tag in build_tags: - build_command += "-tags "+build_tag+" " go_version = get_go_version() if "1.4" in go_version: - build_command += "-ldflags=\"-X main.buildTime '{}' ".format(datetime.datetime.utcnow().isoformat()) - build_command += "-X main.Version {} ".format(version) - build_command += "-X main.Branch {} ".format(get_current_branch()) - build_command += "-X main.Commit {}\" ".format(get_current_commit()) + build_command += "-ldflags=\"-X main.Version {} -X main.Branch {} -X main.Commit {}\" ".format(version, + get_current_branch(), + get_current_commit()) else: - build_command += "-ldflags=\"-X main.buildTime='{}' ".format(datetime.datetime.utcnow().isoformat()) - build_command += "-X main.Version={} ".format(version) - build_command += "-X main.Branch={} ".format(get_current_branch()) - build_command += "-X main.Commit={}\" ".format(get_current_commit()) + # With Go 1.5, the linker flag arguments changed to 'name=value' from 'name value' + build_command += "-ldflags=\"-X main.Version={} -X main.Branch={} -X main.Commit={}\" ".format(version, + get_current_branch(), + get_current_commit()) build_command += c run(build_command, shell=True) - print("") + return 0 def create_dir(path): try: @@ -345,35 +470,12 @@ def copy_file(fr, to): except OSError as e: print(e) -def create_package_fs(build_root): - print("\t- Creating a filesystem hierarchy from directory: {}".format(build_root)) - # Using [1:] for the path names due to them being absolute - # (will overwrite previous paths, per 'os.path.join' documentation) - dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:] ] - for d in dirs: - create_dir(os.path.join(build_root, d)) - os.chmod(os.path.join(build_root, d), 0o755) - -def package_scripts(build_root, windows=False): - print("\t- Copying scripts and sample configuration to build directory") - if windows: - shutil.copyfile(DEFAULT_WINDOWS_CONFIG, os.path.join(build_root, "telegraf.conf")) - os.chmod(os.path.join(build_root, "telegraf.conf"), 0o644) - else: - shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1])) - os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644) - shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1])) - os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644) - shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf")) - os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0o644) - shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf")) - os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0o644) - -def go_get(): - print("Retrieving Go dependencies...") - run("go get github.com/sparrc/gdm") - run("gdm restore -f Godeps_windows") - run("gdm restore") +def generate_md5_from_file(path): + m = hashlib.md5() + with open(path, 'rb') as f: + for chunk in iter(lambda: f.read(4096), b""): + m.update(chunk) + return m.hexdigest() def generate_md5_from_file(path): m = hashlib.md5() @@ -385,103 +487,111 @@ def generate_md5_from_file(path): m.update(data) return m.hexdigest() -def build_packages(build_output, version, pkg_arch, nightly=False, rc=None, iteration=1): +def build_packages(build_output, version, nightly=False, rc=None, iteration=1): outfiles = [] tmp_build_dir = create_temp_dir() if debug: print("[DEBUG] build_output = {}".format(build_output)) try: - print("-------------------------") - print("") + print("-------------------------\n") print("Packaging...") - for p in build_output: + for platform in build_output: # Create top-level folder displaying which platform (linux, etc) - create_dir(os.path.join(tmp_build_dir, p)) - for a in build_output[p]: - current_location = build_output[p][a] - # Create second-level directory displaying the architecture (amd64, etc)p - build_root = os.path.join(tmp_build_dir, p, a) + create_dir(os.path.join(tmp_build_dir, platform)) + for arch in build_output[platform]: + # Create second-level directory displaying the architecture (amd64, etc) + current_location = build_output[platform][arch] + # Create directory tree to mimic file system of package + build_root = os.path.join(tmp_build_dir, + platform, + arch, + '{}-{}-{}'.format(PACKAGE_NAME, version, iteration)) create_dir(build_root) - if p == 'windows': - package_scripts(build_root, windows=True) - else: - create_package_fs(build_root) - # Copy in packaging and miscellaneous scripts - package_scripts(build_root) - # Copy newly-built binaries to packaging directory - for b in targets: - if p == 'windows': - b = b + '.exe' - to = os.path.join(build_root, b) - else: - to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], b) - fr = os.path.join(current_location, b) - print("\t- [{}][{}] - Moving from '{}' to '{}'".format(p, a, fr, to)) - copy_file(fr, to) - # Package the directory structure - for package_type in supported_packages[p]: - print("\t- Packaging directory '{}' as '{}'...".format(build_root, package_type)) - name = "telegraf" + create_package_fs(build_root) + + # Copy packaging scripts to build directory + package_scripts(build_root) + + for binary in targets: + # Copy newly-built binaries to packaging directory + if platform == 'windows': + binary = binary + '.exe' + # Where the binary currently is located + fr = os.path.join(current_location, binary) + # Where the binary should go in the package filesystem + to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary) + if debug: + print("[{}][{}] - Moving from '{}' to '{}'".format(platform, + arch, + fr, + to)) + copy_file(fr, to) + + for package_type in supported_packages[platform]: + # Package the directory structure for each package type for the platform + print("Packaging directory '{}' as '{}'...".format(build_root, package_type)) + name = PACKAGE_NAME # Reset version, iteration, and current location on each run # since they may be modified below. package_version = version package_iteration = iteration - current_location = build_output[p][a] + package_build_root = build_root + current_location = build_output[platform][arch] if package_type in ['zip', 'tar']: + # For tars and zips, start the packaging one folder above + # the build root (to include the package name) + package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1])) if nightly: - name = '{}-nightly_{}_{}'.format(name, p, a) + name = '{}-nightly_{}_{}'.format(name, + platform, + arch) else: - name = '{}-{}-{}_{}_{}'.format(name, package_version, package_iteration, p, a) + name = '{}-{}-{}_{}_{}'.format(name, + package_version, + package_iteration, + platform, + arch) + if package_type == 'tar': - # Add `tar.gz` to path to reduce package size + # Add `tar.gz` to path to compress package output current_location = os.path.join(current_location, name + '.tar.gz') + elif package_type == 'zip': + current_location = os.path.join(current_location, name + '.zip') + if rc is not None: + # Set iteration to 0 since it's a release candidate package_iteration = "0.rc{}".format(rc) - saved_a = a - if pkg_arch is not None: - a = pkg_arch - if a == '386': - a = 'i386' - if package_type == 'zip': - zip_command = "cd {} && zip {}.zip ./*".format( - build_root, - name) - run(zip_command, shell=True) - run("mv {}.zip {}".format(os.path.join(build_root, name), current_location), shell=True) - outfile = os.path.join(current_location, name+".zip") - outfiles.append(outfile) - print("\t\tMD5 = {}".format(generate_md5_from_file(outfile))) + + fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format( + fpm_common_args, + name, + arch, + package_type, + package_version, + package_iteration, + package_build_root, + current_location) + if debug: + fpm_command += "--verbose " + if package_type == "rpm": + fpm_command += "--depends coreutils " + fpm_command += "--depends lsof " + out = run(fpm_command, shell=True) + matches = re.search(':path=>"(.*)"', out) + outfile = None + if matches is not None: + outfile = matches.groups()[0] + if outfile is None: + print("!! Could not determine output from packaging command.") else: - fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format( - fpm_common_args, - name, - a, - package_type, - package_version, - package_iteration, - build_root, - current_location) - if pkg_arch is not None: - a = saved_a - if package_type == "rpm": - fpm_command += "--depends coreutils " - fpm_command += "--depends lsof" - out = run(fpm_command, shell=True) - matches = re.search(':path=>"(.*)"', out) - outfile = None - if matches is not None: - outfile = matches.groups()[0] - if outfile is None: - print("[ COULD NOT DETERMINE OUTPUT ]") - else: - # Strip nightly version (the unix epoch) from filename - if nightly and package_type in ['deb', 'rpm']: - outfile = rename_file(outfile, outfile.replace("{}-{}".format(version, iteration), "nightly")) - outfiles.append(os.path.join(os.getcwd(), outfile)) - # Display MD5 hash for generated package - print("\t\tMD5 = {}".format(generate_md5_from_file(outfile))) + # Strip nightly version (the unix epoch) from filename + if nightly and package_type in [ 'deb', 'rpm' ]: + outfile = rename_file(outfile, outfile.replace("{}-{}".format(version, iteration), "nightly")) + outfiles.append(os.path.join(os.getcwd(), outfile)) + # Display MD5 hash for generated package + print("MD5({}) = {}".format(outfile, generate_md5_from_file(outfile))) print("") if debug: print("[DEBUG] package outfiles: {}".format(outfiles)) @@ -495,11 +605,9 @@ def print_usage(): print("") print("Options:") print("\t --outdir= \n\t\t- Send build output to a specified path. Defaults to ./build.") - print("\t --arch= \n\t\t- Build for specified architecture. Acceptable values: x86_64|amd64, 386, arm, or all") - print("\t --goarm= \n\t\t- Build for specified ARM version (when building for ARM). Default value is: 6") + print("\t --arch= \n\t\t- Build for specified architecture. Acceptable values: x86_64|amd64, 386|i386, arm, or all") print("\t --platform= \n\t\t- Build for specified platform. Acceptable values: linux, windows, darwin, or all") print("\t --version= \n\t\t- Version information to apply to build metadata. If not specified, will be pulled from repo tag.") - print("\t --pkgarch= \n\t\t- Package architecture if different from ") print("\t --commit= \n\t\t- Use specific commit for build (currently a NOOP).") print("\t --branch= \n\t\t- Build from a specific branch (currently a NOOP).") print("\t --rc= \n\t\t- Whether or not the build is a release candidate (affects version information).") @@ -507,9 +615,13 @@ def print_usage(): print("\t --race \n\t\t- Whether the produced build should have race detection enabled.") print("\t --package \n\t\t- Whether the produced builds should be packaged for the target platform(s).") print("\t --nightly \n\t\t- Whether the produced build is a nightly (affects version information).") + print("\t --update \n\t\t- Whether dependencies should be updated prior to building.") + print("\t --test \n\t\t- Run Go tests. Will not produce a build.") print("\t --parallel \n\t\t- Run Go tests in parallel up to the count specified.") + print("\t --generate \n\t\t- Run `go generate`.") print("\t --timeout \n\t\t- Timeout for Go tests. Defaults to 480s.") print("\t --clean \n\t\t- Clean the build output directory prior to creating build.") + print("\t --no-get \n\t\t- Do not run `go get` before building.") print("\t --bucket=\n\t\t- Full path of the bucket to upload packages to (must also specify --upload).") print("\t --debug \n\t\t- Displays debug output.") print("") @@ -518,17 +630,18 @@ def print_package_summary(packages): print(packages) def main(): + global debug + # Command-line arguments outdir = "build" commit = None target_platform = None target_arch = None - package_arch = None nightly = False race = False branch = None version = get_current_version() - rc = None + rc = get_current_rc() package = False update = False clean = False @@ -538,15 +651,15 @@ def main(): timeout = None iteration = 1 no_vet = False - goarm_version = "6" run_get = True upload_bucket = None - global debug + generate = False + no_stash = False for arg in sys.argv[1:]: if '--outdir' in arg: # Output directory. If none is specified, then builds will be placed in the same directory. - output_dir = arg.split("=")[1] + outdir = arg.split("=")[1] if '--commit' in arg: # Commit to build from. If none is specified, then it will build from the most recent commit. commit = arg.split("=")[1] @@ -562,9 +675,6 @@ def main(): elif '--version' in arg: # Version to assign to this build (0.9.5, etc) version = arg.split("=")[1] - elif '--pkgarch' in arg: - # Package architecture if different from (armhf, etc) - package_arch = arg.split("=")[1] elif '--rc' in arg: # Signifies that this is a release candidate build. rc = arg.split("=")[1] @@ -574,12 +684,20 @@ def main(): elif '--package' in arg: # Signifies that packages should be built. package = True + # If packaging do not allow stashing of local changes + no_stash = True elif '--nightly' in arg: # Signifies that this is a nightly build. nightly = True + elif '--update' in arg: + # Signifies that dependencies should be updated. + update = True elif '--upload' in arg: # Signifies that the resulting packages should be uploaded to S3 upload = True + elif '--test' in arg: + # Run tests and exit + test = True elif '--parallel' in arg: # Set parallel for tests. parallel = int(arg.split("=")[1]) @@ -593,14 +711,19 @@ def main(): iteration = arg.split("=")[1] elif '--no-vet' in arg: no_vet = True - elif '--goarm' in arg: - # Signifies GOARM flag to pass to build command when compiling for ARM - goarm_version = arg.split("=")[1] + elif '--no-get' in arg: + run_get = False elif '--bucket' in arg: # The bucket to upload the packages to, relies on boto upload_bucket = arg.split("=")[1] + elif '--no-stash' in arg: + # Do not stash uncommited changes + # Fail if uncommited changes exist + no_stash = True + elif '--generate' in arg: + generate = True elif '--debug' in arg: - print "[DEBUG] Using debug output" + print("[DEBUG] Using debug output") debug = True elif '--help' in arg: print_usage() @@ -610,54 +733,69 @@ def main(): print_usage() return 1 + if nightly and rc: + print("!! Cannot be both nightly and a release candidate! Stopping.") + return 1 + if nightly: - if rc: - print("!! Cannot be both nightly and a release candidate! Stopping.") - return 1 - # In order to support nightly builds on the repository, we are adding the epoch timestamp + # In order to cleanly delineate nightly version, we are adding the epoch timestamp # to the version so that version numbers are always greater than the previous nightly. - version = "{}.n{}".format(version, int(time.time())) + version = "{}~n{}".format(version, int(time.time())) + iteration = 0 + elif rc: + iteration = 0 # Pre-build checks check_environ() - check_prereqs() + if not check_prereqs(): + return 1 if not commit: commit = get_current_commit(short=True) if not branch: branch = get_current_branch() if not target_arch: - if 'arm' in get_system_arch(): + system_arch = get_system_arch() + if 'arm' in system_arch: # Prevent uname from reporting ARM arch (eg 'armv7l') target_arch = "arm" else: - target_arch = get_system_arch() - if not target_platform: + target_arch = system_arch + if target_arch == '386': + target_arch = 'i386' + elif target_arch == 'x86_64': + target_arch = 'amd64' + if target_platform: + if target_platform not in supported_builds and target_platform != 'all': + print("! Invalid build platform: {}".format(target_platform)) + return 1 + else: target_platform = get_system_platform() - if rc or nightly: - # If a release candidate or nightly, set iteration to 0 (instead of 1) - iteration = 0 - - if target_arch == '386': - target_arch = 'i386' - elif target_arch == 'x86_64': - target_arch = 'amd64' build_output = {} - go_get() + if generate: + if not run_generate(): + return 1 + + if run_get: + if not go_get(branch, update=update, no_stash=no_stash): + return 1 + + if test: + if not run_tests(race, parallel, timeout, no_vet): + return 1 + return 0 platforms = [] single_build = True if target_platform == 'all': - platforms = list(supported_builds.keys()) + platforms = supported_builds.keys() single_build = False else: platforms = [target_platform] for platform in platforms: - if platform in prereq_cmds: - run(prereq_cmds[platform]) build_output.update( { platform : {} } ) archs = [] if target_arch == "all": @@ -665,32 +803,34 @@ def main(): archs = supported_builds.get(platform) else: archs = [target_arch] + for arch in archs: od = outdir if not single_build: od = os.path.join(outdir, platform, arch) - build(version=version, - branch=branch, - commit=commit, - platform=platform, - arch=arch, - nightly=nightly, - rc=rc, - race=race, - clean=clean, - outdir=od, - goarm_version=goarm_version) + if build(version=version, + branch=branch, + commit=commit, + platform=platform, + arch=arch, + nightly=nightly, + rc=rc, + race=race, + clean=clean, + outdir=od): + return 1 build_output.get(platform).update( { arch : od } ) # Build packages if package: if not check_path_for("fpm"): - print("!! Cannot package without command 'fpm'. Stopping.") + print("!! Cannot package without command 'fpm'.") return 1 - packages = build_packages(build_output, version, package_arch, nightly=nightly, rc=rc, iteration=iteration) - # Optionally upload to S3 + + packages = build_packages(build_output, version, nightly=nightly, rc=rc, iteration=iteration) if upload: upload_packages(packages, bucket_name=upload_bucket, nightly=nightly) + print("Done!") return 0 if __name__ == '__main__': From 095c90ad22721ec1a71555372ace847f387bb611 Mon Sep 17 00:00:00 2001 From: Ross McDonald Date: Wed, 2 Mar 2016 10:42:58 -0600 Subject: [PATCH 111/287] Re-added zip package output format. Modified zip and tar packaging process to use the base 'tar' and 'zip' commands, instead of 'fpm'. --- scripts/build.py | 84 ++++++++++++++++++++++++++---------------------- 1 file changed, 46 insertions(+), 38 deletions(-) diff --git a/scripts/build.py b/scripts/build.py index 6c9302158..989cc7d6c 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -84,7 +84,7 @@ supported_builds = { } supported_packages = { - "darwin": [ "tar" ], + "darwin": [ "tar", "zip" ], "linux": [ "deb", "rpm", "tar" ], "windows": [ "zip" ], "freebsd": [ "tar" ] @@ -538,6 +538,9 @@ def build_packages(build_output, version, nightly=False, rc=None, iteration=1): package_iteration = iteration package_build_root = build_root current_location = build_output[platform][arch] + if rc is not None: + # Set iteration to 0 since it's a release candidate + package_iteration = "0.rc{}".format(rc) if package_type in ['zip', 'tar']: # For tars and zips, start the packaging one folder above @@ -554,44 +557,49 @@ def build_packages(build_output, version, nightly=False, rc=None, iteration=1): platform, arch) - if package_type == 'tar': - # Add `tar.gz` to path to compress package output - current_location = os.path.join(current_location, name + '.tar.gz') - elif package_type == 'zip': - current_location = os.path.join(current_location, name + '.zip') - - if rc is not None: - # Set iteration to 0 since it's a release candidate - package_iteration = "0.rc{}".format(rc) - - fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format( - fpm_common_args, - name, - arch, - package_type, - package_version, - package_iteration, - package_build_root, - current_location) - if debug: - fpm_command += "--verbose " - if package_type == "rpm": - fpm_command += "--depends coreutils " - fpm_command += "--depends lsof " - out = run(fpm_command, shell=True) - matches = re.search(':path=>"(.*)"', out) - outfile = None - if matches is not None: - outfile = matches.groups()[0] - if outfile is None: - print("!! Could not determine output from packaging command.") + current_location = os.path.join(os.getcwd(), current_location) + if package_type == 'tar': + tar_command = "cd {} && tar -cvzf {}.tar.gz ./*".format(build_root, name) + run(tar_command, shell=True) + run("mv {}.tar.gz {}".format(os.path.join(build_root, name), current_location), shell=True) + outfile = os.path.join(current_location, name + ".tar.gz") + outfiles.append(outfile) + print("MD5({}) = {}".format(outfile, generate_md5_from_file(outfile))) + elif package_type == 'zip': + zip_command = "cd {} && zip -r {}.zip ./*".format(build_root, name) + run(zip_command, shell=True) + run("mv {}.zip {}".format(os.path.join(build_root, name), current_location), shell=True) + outfile = os.path.join(current_location, name + ".zip") + outfiles.append(outfile) + print("MD5({}) = {}".format(outfile, generate_md5_from_file(outfile))) else: - # Strip nightly version (the unix epoch) from filename - if nightly and package_type in [ 'deb', 'rpm' ]: - outfile = rename_file(outfile, outfile.replace("{}-{}".format(version, iteration), "nightly")) - outfiles.append(os.path.join(os.getcwd(), outfile)) - # Display MD5 hash for generated package - print("MD5({}) = {}".format(outfile, generate_md5_from_file(outfile))) + fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format(fpm_common_args, + name, + arch, + package_type, + package_version, + package_iteration, + package_build_root, + current_location) + if debug: + fpm_command += "--verbose " + if package_type == "rpm": + fpm_command += "--depends coreutils " + fpm_command += "--depends lsof " + out = run(fpm_command, shell=True) + matches = re.search(':path=>"(.*)"', out) + outfile = None + if matches is not None: + outfile = matches.groups()[0] + if outfile is None: + print("!! Could not determine output from packaging command.") + else: + # Strip nightly version (the unix epoch) from filename + if nightly: + outfile = rename_file(outfile, outfile.replace("{}-{}".format(version, iteration), "nightly")) + outfiles.append(os.path.join(os.getcwd(), outfile)) + # Display MD5 hash for generated package + print("MD5({}) = {}".format(outfile, generate_md5_from_file(outfile))) print("") if debug: print("[DEBUG] package outfiles: {}".format(outfiles)) From c3d220175f2e79366713be225748fdcf369a488e Mon Sep 17 00:00:00 2001 From: Ross McDonald Date: Wed, 2 Mar 2016 11:12:45 -0600 Subject: [PATCH 112/287] Removed i386 as a target for darwin, as it currently doesnt compile. --- scripts/build.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/build.py b/scripts/build.py index 989cc7d6c..af597d28a 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -77,7 +77,7 @@ targets = { } supported_builds = { - "darwin": [ "amd64", "i386" ], + "darwin": [ "amd64" ], "windows": [ "amd64", "i386" ], "linux": [ "amd64", "i386", "armhf", "armel", "arm64" ], "freebsd": [ "amd64" ] From 0a9accccc105a2576dab1338c2e3a849e0707d50 Mon Sep 17 00:00:00 2001 From: Ross McDonald Date: Wed, 2 Mar 2016 11:16:30 -0600 Subject: [PATCH 113/287] Added permissions check to post-install script due to issues with RPMs having the incorrect permissions on the log directory. --- scripts/post-install.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/post-install.sh b/scripts/post-install.sh index 4f11fe8f6..0982dc855 100644 --- a/scripts/post-install.sh +++ b/scripts/post-install.sh @@ -28,7 +28,9 @@ if [[ $? -ne 0 ]]; then useradd --system -U -M telegraf -s /bin/false -d /etc/telegraf fi +test -d $LOG_DIR || mkdir -p $LOG_DIR chown -R -L telegraf:telegraf $LOG_DIR +chmod 755 $LOG_DIR # Remove legacy symlink, if it exists if [[ -L /etc/init.d/telegraf ]]; then From 28eb9b4c29b8c84412708446bcc0ee33642502e3 Mon Sep 17 00:00:00 2001 From: Ross McDonald Date: Wed, 2 Mar 2016 11:55:31 -0600 Subject: [PATCH 114/287] Fixed issue where binary wasnt copied to packaging directory correctly. --- scripts/build.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/build.py b/scripts/build.py index af597d28a..983cfae1b 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -526,7 +526,7 @@ def build_packages(build_output, version, nightly=False, rc=None, iteration=1): arch, fr, to)) - copy_file(fr, to) + copy_file(fr, to) for package_type in supported_packages[platform]: # Package the directory structure for each package type for the platform From b4b186628623715785cce6c46dd126f4e6fe3717 Mon Sep 17 00:00:00 2001 From: Ross McDonald Date: Wed, 2 Mar 2016 12:06:16 -0600 Subject: [PATCH 115/287] Removed test functionality from build script. closes #708 closes #713 --- CHANGELOG.md | 2 ++ scripts/build.py | 48 ++++-------------------------------------------- 2 files changed, 6 insertions(+), 44 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 79cc0a235..cf71e79aa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,8 @@ - [#781](https://github.com/influxdata/telegraf/pull/781): Fix mqtt_consumer username not being set. Thanks @chaton78! - [#786](https://github.com/influxdata/telegraf/pull/786): Fix mqtt output username not being set. Thanks @msangoi! - [#773](https://github.com/influxdata/telegraf/issues/773): Fix duplicate measurements in snmp plugin. Thanks @titilambert! +- [#708](https://github.com/influxdata/telegraf/issues/708): packaging: build ARM package +- [#713](https://github.com/influxdata/telegraf/issues/713): packaging: insecure permissions error on log directory ## v0.10.4.1 diff --git a/scripts/build.py b/scripts/build.py index 983cfae1b..950fa40e9 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -142,6 +142,10 @@ def go_get(branch, update=False, no_stash=False): run("{}/bin/gdm restore".format(os.environ.get("GOPATH"))) return True +def run_tests(race, parallel, timeout, no_vet): + # Currently a NOOP for Telegraf + return True + ################ #### All Telegraf-specific content above this line ################ @@ -324,50 +328,6 @@ def upload_packages(packages, bucket_name=None, nightly=False): print("") return 0 -def run_tests(race, parallel, timeout, no_vet): - print("Downloading vet tool...") - sys.stdout.flush() - run("go get golang.org/x/tools/cmd/vet") - print("Running tests:") - print("\tRace: {}".format(race)) - if parallel is not None: - print("\tParallel: {}".format(parallel)) - if timeout is not None: - print("\tTimeout: {}".format(timeout)) - sys.stdout.flush() - p = subprocess.Popen(["go", "fmt", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - out, err = p.communicate() - if len(out) > 0 or len(err) > 0: - print("Code not formatted. Please use 'go fmt ./...' to fix formatting errors.") - print(out) - print(err) - return False - if not no_vet: - p = subprocess.Popen(go_vet_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - out, err = p.communicate() - if len(out) > 0 or len(err) > 0: - print("Go vet failed. Please run 'go vet ./...' and fix any errors.") - print(out) - print(err) - return False - else: - print("Skipping go vet ...") - sys.stdout.flush() - test_command = "go test -v" - if race: - test_command += " -race" - if parallel is not None: - test_command += " -parallel {}".format(parallel) - if timeout is not None: - test_command += " -timeout {}".format(timeout) - test_command += " ./..." - code = os.system(test_command) - if code != 0: - print("Tests Failed") - return False - else: - print("Tests Passed") - return True def build(version=None, branch=None, From c6706a86f138b3d9769d306aa48656448d7619ad Mon Sep 17 00:00:00 2001 From: arthtux Date: Thu, 3 Mar 2016 20:20:03 -0500 Subject: [PATCH 116/287] add README.md for redis --- plugins/inputs/redis/README.md | 86 ++++++++++++++++++++++++++++++++++ plugins/inputs/redis/redis.go | 1 + 2 files changed, 87 insertions(+) create mode 100644 plugins/inputs/redis/README.md diff --git a/plugins/inputs/redis/README.md b/plugins/inputs/redis/README.md new file mode 100644 index 000000000..d7d98ccc9 --- /dev/null +++ b/plugins/inputs/redis/README.md @@ -0,0 +1,86 @@ +# Telegraf Plugin: Redis + +### Configuration: + +``` +# Read Redis's basic status information +[[inputs.redis]] + ## specify servers via a url matching: + ## [protocol://][:password]@address[:port] + ## e.g. + ## tcp://localhost:6379 + ## tcp://:password@192.168.99.100 + ## + ## If no servers are specified, then localhost is used as the host. + ## If no port is specified, 6379 is used + servers = ["tcp://localhost:6379"] +``` + +### Measurements & Fields: + +- Measurement + - uptime_in_seconds + - connected_clients + - used_memory + - used_memory_rss + - used_memory_peak + - used_memory_lua + - rdb_changes_since_last_save + - total_connections_received + - total_commands_processed + - instantaneous_ops_per_sec + - instantaneous_input_kbps + - instantaneous_output_kbps + - sync_full + - sync_partial_ok + - sync_partial_err + - expired_keys + - evicted_keys + - keyspace_hits + - keyspace_misses + - pubsub_channels + - pubsub_patterns + - latest_fork_usec + - connected_slaves + - master_repl_offset + - repl_backlog_active + - repl_backlog_size + - repl_backlog_histlen + - mem_fragmentation_ratio + - used_cpu_sys + - used_cpu_user + - used_cpu_sys_children + - used_cpu_user_children + +### Tags: + +- All measurements have the following tags: + - port + - server + +### Example Output: + +Using this configuration: +``` +[[inputs.nginx]] + ## specify servers via a url matching: + ## [protocol://][:password]@address[:port] + ## e.g. + ## tcp://localhost:6379 + ## tcp://:password@192.168.99.100 + ## + ## If no servers are specified, then localhost is used as the host. + ## If no port is specified, 6379 is used + servers = ["tcp://localhost:6379"] +``` + +When run with: +``` +./telegraf -config telegraf.conf -input-filter redis -test +``` + +It produces: +``` +* Plugin: redis, Collection 1 +> redis,port=6379,server=localhost clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i 1457052084987848383 +``` diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index b8862f6bc..df8dfe2f2 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -19,6 +19,7 @@ type Redis struct { } var sampleConfig = ` +[[inputs.redis]] ## specify servers via a url matching: ## [protocol://][:password]@address[:port] ## e.g. From 20999979de2e31b0d56bd129ab62fdccdb7c2160 Mon Sep 17 00:00:00 2001 From: Arthur Deschamps Date: Fri, 4 Mar 2016 07:22:54 -0500 Subject: [PATCH 117/287] Update redis.go --- plugins/inputs/redis/redis.go | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index df8dfe2f2..b8862f6bc 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -19,7 +19,6 @@ type Redis struct { } var sampleConfig = ` -[[inputs.redis]] ## specify servers via a url matching: ## [protocol://][:password]@address[:port] ## e.g. From 1c76d5d096f968a077ef4228a5a3dbe16ec33cfe Mon Sep 17 00:00:00 2001 From: Thibault Cohen Date: Tue, 23 Feb 2016 23:58:14 -0500 Subject: [PATCH 118/287] Improve docker input plugin closes #754 --- CHANGELOG.md | 1 + plugins/inputs/docker/README.md | 46 ++++++- plugins/inputs/docker/docker.go | 124 +++++++++++++++++- plugins/inputs/docker/docker_test.go | 185 +++++++++++++++++++++++++++ 4 files changed, 353 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cf71e79aa..e02c3bb9c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ - [#758](https://github.com/influxdata/telegraf/pull/758): UDP Listener input plugin, thanks @whatyouhide! - [#769](https://github.com/influxdata/telegraf/issues/769): httpjson plugin: allow specifying SSL configuration. - [#735](https://github.com/influxdata/telegraf/pull/735): SNMP Table feature. Thanks @titilambert! +- [#754](https://github.com/influxdata/telegraf/pull/754): docker plugin: adding `docker info` metrics to output. Thanks @titilambert! ### Bugfixes - [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":" diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index 6086c89e8..97450e2aa 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -95,18 +95,50 @@ on the availability of per-cpu stats on your system. - io_serviced_recursive_sync - io_serviced_recursive_total - io_serviced_recursive_write +- docker_ + - n_used_file_descriptors + - n_cpus + - n_containers + - n_images + - n_goroutines + - n_listener_events + - memory_total + - pool_blocksize +- docker_data + - available + - total + - used +- docker_metadata + - available + - total + - used + ### Tags: -- All stats have the following tags: +- docker (memory_total) + - unit=bytes +- docker (pool_blocksize) + - unit=bytes +- docker_data + - unit=bytes +- docker_metadata + - unit=bytes + +- docker_cpu specific: - cont_id (container ID) - cont_image (container image) - cont_name (container name) -- docker_cpu specific: - cpu - docker_net specific: + - cont_id (container ID) + - cont_image (container image) + - cont_name (container name) - network - docker_blkio specific: + - cont_id (container ID) + - cont_image (container image) + - cont_name (container name) - device ### Example Output: @@ -114,6 +146,16 @@ on the availability of per-cpu stats on your system. ``` % ./telegraf -config ~/ws/telegraf.conf -input-filter docker -test * Plugin: docker, Collection 1 +> docker n_cpus=8i 1456926671065383978 +> docker n_used_file_descriptors=15i 1456926671065383978 +> docker n_containers=7i 1456926671065383978 +> docker n_images=152i 1456926671065383978 +> docker n_goroutines=36i 1456926671065383978 +> docker n_listener_events=0i 1456926671065383978 +> docker,unit=bytes memory_total=18935443456i 1456926671065383978 +> docker,unit=bytes pool_blocksize=65540i 1456926671065383978 +> docker_data,unit=bytes available=24340000000i,total=107400000000i,used=14820000000i 1456926671065383978 +> docker_metadata,unit=bytes available=2126999999i,total=2146999999i,used=20420000i 145692667106538 > docker_mem,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\ cont_image=spotify/kafka,cont_name=kafka \ active_anon=52568064i,active_file=6926336i,cache=12038144i,fail_count=0i,\ diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index 0d89979c1..cdc8ec1e5 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -1,8 +1,11 @@ package system import ( + "encoding/json" "fmt" "log" + "regexp" + "strconv" "strings" "sync" "time" @@ -17,9 +20,29 @@ type Docker struct { Endpoint string ContainerNames []string - client *docker.Client + client DockerClient } +type DockerClient interface { + // Docker Client wrapper + // Useful for test + Info() (*docker.Env, error) + ListContainers(opts docker.ListContainersOptions) ([]docker.APIContainers, error) + Stats(opts docker.StatsOptions) error +} + +const ( + KB = 1000 + MB = 1000 * KB + GB = 1000 * MB + TB = 1000 * GB + PB = 1000 * TB +) + +var ( + sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`) +) + var sampleConfig = ` ## Docker Endpoint ## To use TCP, set endpoint = "tcp://[ip]:[port]" @@ -58,12 +81,20 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { d.client = c } + // Get daemon info + err := d.gatherInfo(acc) + if err != nil { + fmt.Println(err.Error()) + } + + // List containers opts := docker.ListContainersOptions{} containers, err := d.client.ListContainers(opts) if err != nil { return err } + // Get container data var wg sync.WaitGroup wg.Add(len(containers)) for _, container := range containers { @@ -81,6 +112,76 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { return nil } +func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { + // Init vars + var driverStatus [][]string + dataFields := make(map[string]interface{}) + metadataFields := make(map[string]interface{}) + now := time.Now() + // Get info from docker daemon + info, err := d.client.Info() + if err != nil { + return err + } + + fields := map[string]interface{}{ + "n_cpus": info.GetInt64("NCPU"), + "n_used_file_descriptors": info.GetInt64("NFd"), + "n_containers": info.GetInt64("Containers"), + "n_images": info.GetInt64("Images"), + "n_goroutines": info.GetInt64("NGoroutines"), + "n_listener_events": info.GetInt64("NEventsListener"), + } + // Add metrics + acc.AddFields("docker", + fields, + nil, + now) + acc.AddFields("docker", + map[string]interface{}{"memory_total": info.GetInt64("MemTotal")}, + map[string]string{"unit": "bytes"}, + now) + // Get storage metrics + driverStatusRaw := []byte(info.Get("DriverStatus")) + json.Unmarshal(driverStatusRaw, &driverStatus) + for _, rawData := range driverStatus { + // Try to convert string to int (bytes) + value, err := parseSize(rawData[1]) + if err != nil { + continue + } + name := strings.ToLower(strings.Replace(rawData[0], " ", "_", -1)) + if name == "pool_blocksize" { + // pool blocksize + acc.AddFields("docker", + map[string]interface{}{"pool_blocksize": value}, + map[string]string{"unit": "bytes"}, + now) + } else if strings.HasPrefix(name, "data_space_") { + // data space + field_name := strings.TrimPrefix(name, "data_space_") + dataFields[field_name] = value + } else if strings.HasPrefix(name, "metadata_space_") { + // metadata space + field_name := strings.TrimPrefix(name, "metadata_space_") + metadataFields[field_name] = value + } + } + if len(dataFields) > 0 { + acc.AddFields("docker_data", + dataFields, + map[string]string{"unit": "bytes"}, + now) + } + if len(metadataFields) > 0 { + acc.AddFields("docker_metadata", + metadataFields, + map[string]string{"unit": "bytes"}, + now) + } + return nil +} + func (d *Docker) gatherContainer( container docker.APIContainers, acc telegraf.Accumulator, @@ -334,6 +435,27 @@ func sliceContains(in string, sl []string) bool { return false } +// Parses the human-readable size string into the amount it represents. +func parseSize(sizeStr string) (int64, error) { + matches := sizeRegex.FindStringSubmatch(sizeStr) + if len(matches) != 4 { + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + + size, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return -1, err + } + + uMap := map[string]int64{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} + unitPrefix := strings.ToLower(matches[3]) + if mul, ok := uMap[unitPrefix]; ok { + size *= float64(mul) + } + + return int64(size), nil +} + func init() { inputs.Add("docker", func() telegraf.Input { return &Docker{} diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index aebe8102e..23fd0bb34 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -1,12 +1,14 @@ package system import ( + "encoding/json" "testing" "time" "github.com/influxdata/telegraf/testutil" "github.com/fsouza/go-dockerclient" + "github.com/stretchr/testify/require" ) func TestDockerGatherContainerStats(t *testing.T) { @@ -194,3 +196,186 @@ func testStats() *docker.Stats { return stats } + +type FakeDockerClient struct { +} + +func (d FakeDockerClient) Info() (*docker.Env, error) { + env := docker.Env{"Containers=108", "OomKillDisable=false", "SystemTime=2016-02-24T00:55:09.15073105-05:00", "NEventsListener=0", "ID=5WQQ:TFWR:FDNG:OKQ3:37Y4:FJWG:QIKK:623T:R3ME:QTKB:A7F7:OLHD", "Debug=false", "LoggingDriver=json-file", "KernelVersion=4.3.0-1-amd64", "IndexServerAddress=https://index.docker.io/v1/", "MemTotal=3840757760", "Images=199", "CpuCfsQuota=true", "Name=absol", "SwapLimit=false", "IPv4Forwarding=true", "ExecutionDriver=native-0.2", "InitSha1=23a51f3c916d2b5a3bbb31caf301fd2d14edd518", "ExperimentalBuild=false", "CpuCfsPeriod=true", "RegistryConfig={\"IndexConfigs\":{\"docker.io\":{\"Mirrors\":null,\"Name\":\"docker.io\",\"Official\":true,\"Secure\":true}},\"InsecureRegistryCIDRs\":[\"127.0.0.0/8\"],\"Mirrors\":null}", "OperatingSystem=Linux Mint LMDE (containerized)", "BridgeNfIptables=true", "HttpsProxy=", "Labels=null", "MemoryLimit=false", "DriverStatus=[[\"Pool Name\",\"docker-8:1-1182287-pool\"],[\"Pool Blocksize\",\"65.54 kB\"],[\"Backing Filesystem\",\"extfs\"],[\"Data file\",\"/dev/loop0\"],[\"Metadata file\",\"/dev/loop1\"],[\"Data Space Used\",\"17.3 GB\"],[\"Data Space Total\",\"107.4 GB\"],[\"Data Space Available\",\"36.53 GB\"],[\"Metadata Space Used\",\"20.97 MB\"],[\"Metadata Space Total\",\"2.147 GB\"],[\"Metadata Space Available\",\"2.127 GB\"],[\"Udev Sync Supported\",\"true\"],[\"Deferred Removal Enabled\",\"false\"],[\"Data loop file\",\"/var/lib/docker/devicemapper/devicemapper/data\"],[\"Metadata loop file\",\"/var/lib/docker/devicemapper/devicemapper/metadata\"],[\"Library Version\",\"1.02.115 (2016-01-25)\"]]", "NFd=19", "HttpProxy=", "Driver=devicemapper", "NGoroutines=39", "InitPath=/usr/lib/docker.io/dockerinit", "NCPU=4", "DockerRootDir=/var/lib/docker", "NoProxy=", "BridgeNfIp6tables=true"} + return &env, nil +} + +func (d FakeDockerClient) ListContainers(opts docker.ListContainersOptions) ([]docker.APIContainers, error) { + container1 := docker.APIContainers{ + ID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb", + Image: "quay.io/coreos/etcd:v2.2.2", + Command: "/etcd -name etcd0 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379", + Created: 1455941930, + Status: "Up 4 hours", + Ports: []docker.APIPort{ + docker.APIPort{ + PrivatePort: 7001, + PublicPort: 0, + Type: "tcp", + }, + docker.APIPort{ + PrivatePort: 4001, + PublicPort: 0, + Type: "tcp", + }, + docker.APIPort{ + PrivatePort: 2380, + PublicPort: 0, + Type: "tcp", + }, + docker.APIPort{ + PrivatePort: 2379, + PublicPort: 2379, + Type: "tcp", + IP: "0.0.0.0", + }, + }, + SizeRw: 0, + SizeRootFs: 0, + Names: []string{"/etcd"}, + } + container2 := docker.APIContainers{ + ID: "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173", + Image: "quay.io/coreos/etcd:v2.2.2", + Command: "/etcd -name etcd2 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379", + Created: 1455941933, + Status: "Up 4 hours", + Ports: []docker.APIPort{ + docker.APIPort{ + PrivatePort: 7002, + PublicPort: 0, + Type: "tcp", + }, + docker.APIPort{ + PrivatePort: 4002, + PublicPort: 0, + Type: "tcp", + }, + docker.APIPort{ + PrivatePort: 2381, + PublicPort: 0, + Type: "tcp", + }, + docker.APIPort{ + PrivatePort: 2382, + PublicPort: 2382, + Type: "tcp", + IP: "0.0.0.0", + }, + }, + SizeRw: 0, + SizeRootFs: 0, + Names: []string{"/etcd2"}, + } + + containers := []docker.APIContainers{container1, container2} + return containers, nil + + //#{e6a96c84ca91a5258b7cb752579fb68826b68b49ff957487695cd4d13c343b44 titilambert/snmpsim /bin/sh -c 'snmpsimd --agent-udpv4-endpoint=0.0.0.0:31161 --process-user=root --process-group=user' 1455724831 Up 4 hours [{31161 31161 udp 0.0.0.0}] 0 0 [/snmp] map[]}]2016/02/24 01:05:01 Gathered metrics, (3s interval), from 1 inputs in 1.233836656s +} + +func (d FakeDockerClient) Stats(opts docker.StatsOptions) error { + jsonStat := `{"read":"2016-02-24T11:42:27.472459608-05:00","memory_stats":{"stats":{},"limit":18935443456},"blkio_stats":{"io_service_bytes_recursive":[{"major":252,"minor":1,"op":"Read","value":753664},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":753664},{"major":252,"minor":1,"op":"Total","value":753664}],"io_serviced_recursive":[{"major":252,"minor":1,"op":"Read","value":26},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":26},{"major":252,"minor":1,"op":"Total","value":26}]},"cpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052607520000000,"throttling_data":{}},"precpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052599550000000,"throttling_data":{}}}` + var stat docker.Stats + json.Unmarshal([]byte(jsonStat), &stat) + opts.Stats <- &stat + return nil +} + +func TestDockerGatherInfo(t *testing.T) { + var acc testutil.Accumulator + client := FakeDockerClient{} + d := Docker{client: client} + + err := d.Gather(&acc) + + require.NoError(t, err) + + acc.AssertContainsTaggedFields(t, + "docker", + map[string]interface{}{ + "n_listener_events": int64(0), + "n_cpus": int64(4), + "n_used_file_descriptors": int64(19), + "n_containers": int64(108), + "n_images": int64(199), + "n_goroutines": int64(39), + }, + map[string]string{}, + ) + + acc.AssertContainsTaggedFields(t, + "docker_data", + map[string]interface{}{ + "used": int64(17300000000), + "total": int64(107400000000), + "available": int64(36530000000), + }, + map[string]string{ + "unit": "bytes", + }, + ) + acc.AssertContainsTaggedFields(t, + "docker_cpu", + map[string]interface{}{ + "usage_total": uint64(1231652), + }, + map[string]string{ + "cont_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173", + "cont_name": "etcd2", + "cont_image": "quay.io/coreos/etcd:v2.2.2", + "cpu": "cpu3", + }, + ) + acc.AssertContainsTaggedFields(t, + "docker_mem", + map[string]interface{}{ + "total_pgpgout": uint64(0), + "usage_percent": float64(0), + "rss": uint64(0), + "total_writeback": uint64(0), + "active_anon": uint64(0), + "total_pgmafault": uint64(0), + "total_rss": uint64(0), + "total_unevictable": uint64(0), + "active_file": uint64(0), + "total_mapped_file": uint64(0), + "pgpgin": uint64(0), + "total_active_file": uint64(0), + "total_active_anon": uint64(0), + "total_cache": uint64(0), + "inactive_anon": uint64(0), + "pgmajfault": uint64(0), + "total_inactive_anon": uint64(0), + "total_rss_huge": uint64(0), + "rss_huge": uint64(0), + "hierarchical_memory_limit": uint64(0), + "pgpgout": uint64(0), + "unevictable": uint64(0), + "total_inactive_file": uint64(0), + "writeback": uint64(0), + "total_pgfault": uint64(0), + "total_pgpgin": uint64(0), + "cache": uint64(0), + "mapped_file": uint64(0), + "inactive_file": uint64(0), + "max_usage": uint64(0), + "fail_count": uint64(0), + "pgfault": uint64(0), + "usage": uint64(0), + "limit": uint64(18935443456), + }, + map[string]string{ + "cont_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173", + "cont_name": "etcd2", + "cont_image": "quay.io/coreos/etcd:v2.2.2", + }, + ) + + //fmt.Print(info) +} From 8f98c20c51b4b93cb6f33c9bc385a4c3db242bc9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E5=85=89=E6=9D=83?= Date: Fri, 4 Mar 2016 00:09:49 +0800 Subject: [PATCH 119/287] Add flags -usage-list to print all plugins inputs for telegraf --- cmd/telegraf/telegraf.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index a65c5607c..ec8e6315a 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -11,7 +11,7 @@ import ( "github.com/influxdata/telegraf/agent" "github.com/influxdata/telegraf/internal/config" - + "github.com/influxdata/telegraf/plugins/inputs" _ "github.com/influxdata/telegraf/plugins/inputs/all" _ "github.com/influxdata/telegraf/plugins/outputs/all" ) @@ -34,6 +34,7 @@ var fOutputFilters = flag.String("output-filter", "", "filter the outputs to enable, separator is :") var fUsage = flag.String("usage", "", "print usage for a plugin, ie, 'telegraf -usage mysql'") +var fUsageList = flag.Bool("usage-list", false, "print all the plugins inputs") var fInputFiltersLegacy = flag.String("filter", "", "filter the inputs to enable, separator is :") @@ -61,6 +62,7 @@ The flags are: -input-filter filter the input plugins to enable, separator is : -output-filter filter the output plugins to enable, separator is : -usage print usage for a plugin, ie, 'telegraf -usage mysql' + -usage-list print all the plugins input -debug print metrics as they're generated to stdout -quiet run in quiet mode -version print the version to stdout @@ -135,6 +137,13 @@ func main() { return } + if *fUsageList { + fmt.Println("The plugin inputs avaiable:") + for k, _ := range inputs.Inputs { + fmt.Printf(" %s\n", k) + } + } + var ( c *config.Config err error From 3249030257e1382d02ed899de9c2752a66fd3a26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E5=85=89=E6=9D=83?= Date: Sun, 6 Mar 2016 20:08:51 +0800 Subject: [PATCH 120/287] add flags '-input-list' and '-output-list' for telegraf command --- cmd/telegraf/telegraf.go | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index ec8e6315a..b07f0d303 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -13,6 +13,7 @@ import ( "github.com/influxdata/telegraf/internal/config" "github.com/influxdata/telegraf/plugins/inputs" _ "github.com/influxdata/telegraf/plugins/inputs/all" + "github.com/influxdata/telegraf/plugins/outputs" _ "github.com/influxdata/telegraf/plugins/outputs/all" ) @@ -30,12 +31,13 @@ var fSampleConfig = flag.Bool("sample-config", false, var fPidfile = flag.String("pidfile", "", "file to write our pid to") var fInputFilters = flag.String("input-filter", "", "filter the inputs to enable, separator is :") +var fInpuList = flag.Bool("input-list", false, "print all the plugins inputs") var fOutputFilters = flag.String("output-filter", "", "filter the outputs to enable, separator is :") +var fOutputList = flag.Bool("output-list", false, + "print all the available outputs") var fUsage = flag.String("usage", "", "print usage for a plugin, ie, 'telegraf -usage mysql'") -var fUsageList = flag.Bool("usage-list", false, "print all the plugins inputs") - var fInputFiltersLegacy = flag.String("filter", "", "filter the inputs to enable, separator is :") var fOutputFiltersLegacy = flag.String("outputfilter", "", @@ -60,9 +62,10 @@ The flags are: -sample-config print out full sample configuration to stdout -config-directory directory containing additional *.conf files -input-filter filter the input plugins to enable, separator is : + -input-list print all the plugins inputs -output-filter filter the output plugins to enable, separator is : + -output-list print all the available outputs -usage print usage for a plugin, ie, 'telegraf -usage mysql' - -usage-list print all the plugins input -debug print metrics as they're generated to stdout -quiet run in quiet mode -version print the version to stdout @@ -117,6 +120,13 @@ func main() { outputFilters = strings.Split(":"+outputFilter+":", ":") } + if *fOutputList { + fmt.Println("The outputs available:") + for k, _ := range outputs.Outputs { + fmt.Printf(" %s\n", k) + } + } + if *fVersion { v := fmt.Sprintf("Telegraf - Version %s", Version) fmt.Println(v) @@ -137,8 +147,8 @@ func main() { return } - if *fUsageList { - fmt.Println("The plugin inputs avaiable:") + if *fInpuList { + fmt.Println("The plugin inputs available:") for k, _ := range inputs.Inputs { fmt.Printf(" %s\n", k) } From fe44fa648a98f5fb58c7c111481fda65d7140989 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E5=85=89=E6=9D=83?= Date: Mon, 7 Mar 2016 17:41:57 +0800 Subject: [PATCH 121/287] Fix the incorrect indent of input-list help message --- cmd/telegraf/telegraf.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index b07f0d303..dea80cde3 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -62,7 +62,7 @@ The flags are: -sample-config print out full sample configuration to stdout -config-directory directory containing additional *.conf files -input-filter filter the input plugins to enable, separator is : - -input-list print all the plugins inputs + -input-list print all the plugins inputs -output-filter filter the output plugins to enable, separator is : -output-list print all the available outputs -usage print usage for a plugin, ie, 'telegraf -usage mysql' From 7e312797eca487d744b2f887d43459e1934dd75a Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 7 Mar 2016 11:42:01 +0100 Subject: [PATCH 122/287] Grammar corrections and consistency for output-list, input-list closes #788 --- CHANGELOG.md | 1 + cmd/telegraf/telegraf.go | 25 ++++++++++++++----------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e02c3bb9c..73a942730 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ - [#769](https://github.com/influxdata/telegraf/issues/769): httpjson plugin: allow specifying SSL configuration. - [#735](https://github.com/influxdata/telegraf/pull/735): SNMP Table feature. Thanks @titilambert! - [#754](https://github.com/influxdata/telegraf/pull/754): docker plugin: adding `docker info` metrics to output. Thanks @titilambert! +- [#788](https://github.com/influxdata/telegraf/pull/788): -input-list and -output-list command-line options. Thanks @ebookbug! ### Bugfixes - [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":" diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index dea80cde3..d54aaa4e3 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -31,11 +31,12 @@ var fSampleConfig = flag.Bool("sample-config", false, var fPidfile = flag.String("pidfile", "", "file to write our pid to") var fInputFilters = flag.String("input-filter", "", "filter the inputs to enable, separator is :") -var fInpuList = flag.Bool("input-list", false, "print all the plugins inputs") +var fInputList = flag.Bool("input-list", false, + "print available output plugins.") var fOutputFilters = flag.String("output-filter", "", "filter the outputs to enable, separator is :") var fOutputList = flag.Bool("output-list", false, - "print all the available outputs") + "print available output plugins.") var fUsage = flag.String("usage", "", "print usage for a plugin, ie, 'telegraf -usage mysql'") var fInputFiltersLegacy = flag.String("filter", "", @@ -64,7 +65,7 @@ The flags are: -input-filter filter the input plugins to enable, separator is : -input-list print all the plugins inputs -output-filter filter the output plugins to enable, separator is : - -output-list print all the available outputs + -output-list print all the available outputs -usage print usage for a plugin, ie, 'telegraf -usage mysql' -debug print metrics as they're generated to stdout -quiet run in quiet mode @@ -121,10 +122,19 @@ func main() { } if *fOutputList { - fmt.Println("The outputs available:") + fmt.Println("Available Output Plugins:") for k, _ := range outputs.Outputs { fmt.Printf(" %s\n", k) } + return + } + + if *fInputList { + fmt.Println("Available Input Plugins:") + for k, _ := range inputs.Inputs { + fmt.Printf(" %s\n", k) + } + return } if *fVersion { @@ -147,13 +157,6 @@ func main() { return } - if *fInpuList { - fmt.Println("The plugin inputs available:") - for k, _ := range inputs.Inputs { - fmt.Printf(" %s\n", k) - } - } - var ( c *config.Config err error From 3cca312e61cde1c68a692c780edd8777b0e17a9d Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 2 Mar 2016 15:31:46 +0000 Subject: [PATCH 123/287] Adding a TCP input listener closes #481 --- CHANGELOG.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/tcp_listener/README.md | 30 ++ plugins/inputs/tcp_listener/tcp_listener.go | 264 ++++++++++++++++++ .../inputs/tcp_listener/tcp_listener_test.go | 259 +++++++++++++++++ 5 files changed, 555 insertions(+) create mode 100644 plugins/inputs/tcp_listener/README.md create mode 100644 plugins/inputs/tcp_listener/tcp_listener.go create mode 100644 plugins/inputs/tcp_listener/tcp_listener_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 73a942730..fe87e41dc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ - [#735](https://github.com/influxdata/telegraf/pull/735): SNMP Table feature. Thanks @titilambert! - [#754](https://github.com/influxdata/telegraf/pull/754): docker plugin: adding `docker info` metrics to output. Thanks @titilambert! - [#788](https://github.com/influxdata/telegraf/pull/788): -input-list and -output-list command-line options. Thanks @ebookbug! +- [#778](https://github.com/influxdata/telegraf/pull/778): Adding a TCP input listener. ### Bugfixes - [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":" diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 262de37ac..2808ce2b5 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -47,6 +47,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/sqlserver" _ "github.com/influxdata/telegraf/plugins/inputs/statsd" _ "github.com/influxdata/telegraf/plugins/inputs/system" + _ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener" _ "github.com/influxdata/telegraf/plugins/inputs/trig" _ "github.com/influxdata/telegraf/plugins/inputs/twemproxy" _ "github.com/influxdata/telegraf/plugins/inputs/udp_listener" diff --git a/plugins/inputs/tcp_listener/README.md b/plugins/inputs/tcp_listener/README.md new file mode 100644 index 000000000..63a7dea3c --- /dev/null +++ b/plugins/inputs/tcp_listener/README.md @@ -0,0 +1,30 @@ +# TCP listener service input plugin + +The TCP listener is a service input plugin that listens for messages on a TCP +socket and adds those messages to InfluxDB. +The plugin expects messages in the +[Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). + +### Configuration: + +This is a sample configuration for the plugin. + +```toml +# Generic TCP listener +[[inputs.tcp_listener]] + ## Address and port to host TCP listener on + service_address = ":8094" + + ## Number of TCP messages allowed to queue up. Once filled, the + ## TCP listener will start dropping packets. + allowed_pending_messages = 10000 + + ## Maximum number of concurrent TCP connections to allow + max_tcp_connections = 250 + + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +``` diff --git a/plugins/inputs/tcp_listener/tcp_listener.go b/plugins/inputs/tcp_listener/tcp_listener.go new file mode 100644 index 000000000..dd239fedf --- /dev/null +++ b/plugins/inputs/tcp_listener/tcp_listener.go @@ -0,0 +1,264 @@ +package tcp_listener + +import ( + "bufio" + "fmt" + "log" + "net" + "sync" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" +) + +type TcpListener struct { + ServiceAddress string + AllowedPendingMessages int + MaxTCPConnections int `toml:"max_tcp_connections"` + + sync.Mutex + // Lock for preventing a data race during resource cleanup + cleanup sync.Mutex + wg sync.WaitGroup + + in chan []byte + done chan struct{} + // accept channel tracks how many active connections there are, if there + // is an available bool in accept, then we are below the maximum and can + // accept the connection + accept chan bool + + // track the listener here so we can close it in Stop() + listener *net.TCPListener + // track current connections so we can close them in Stop() + conns map[string]*net.TCPConn + + parser parsers.Parser + acc telegraf.Accumulator +} + +var dropwarn = "ERROR: Message queue full. Discarding line [%s] " + + "You may want to increase allowed_pending_messages in the config\n" + +const sampleConfig = ` + ## Address and port to host TCP listener on + service_address = ":8094" + + ## Number of TCP messages allowed to queue up. Once filled, the + ## TCP listener will start dropping packets. + allowed_pending_messages = 10000 + + ## Maximum number of concurrent TCP connections to allow + max_tcp_connections = 250 + + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +` + +func (t *TcpListener) SampleConfig() string { + return sampleConfig +} + +func (t *TcpListener) Description() string { + return "Generic TCP listener" +} + +// All the work is done in the Start() function, so this is just a dummy +// function. +func (t *TcpListener) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (t *TcpListener) SetParser(parser parsers.Parser) { + t.parser = parser +} + +// Start starts the tcp listener service. +func (t *TcpListener) Start(acc telegraf.Accumulator) error { + t.Lock() + defer t.Unlock() + + t.acc = acc + t.in = make(chan []byte, t.AllowedPendingMessages) + t.done = make(chan struct{}) + t.accept = make(chan bool, t.MaxTCPConnections) + t.conns = make(map[string]*net.TCPConn) + for i := 0; i < t.MaxTCPConnections; i++ { + t.accept <- true + } + + // Start listener + var err error + address, _ := net.ResolveTCPAddr("tcp", t.ServiceAddress) + t.listener, err = net.ListenTCP("tcp", address) + if err != nil { + log.Fatalf("ERROR: ListenUDP - %s", err) + return err + } + log.Println("TCP server listening on: ", t.listener.Addr().String()) + + t.wg.Add(2) + go t.tcpListen() + go t.tcpParser() + + log.Printf("Started TCP listener service on %s\n", t.ServiceAddress) + return nil +} + +// Stop cleans up all resources +func (t *TcpListener) Stop() { + t.Lock() + defer t.Unlock() + close(t.done) + t.listener.Close() + + // Close all open TCP connections + // - get all conns from the t.conns map and put into slice + // - this is so the forget() function doesnt conflict with looping + // over the t.conns map + var conns []*net.TCPConn + t.cleanup.Lock() + for _, conn := range t.conns { + conns = append(conns, conn) + } + t.cleanup.Unlock() + for _, conn := range conns { + conn.Close() + } + + t.wg.Wait() + close(t.in) + log.Println("Stopped TCP listener service on ", t.ServiceAddress) +} + +// tcpListen listens for incoming TCP connections. +func (t *TcpListener) tcpListen() error { + defer t.wg.Done() + + for { + select { + case <-t.done: + return nil + default: + // Accept connection: + conn, err := t.listener.AcceptTCP() + if err != nil { + return err + } + + log.Printf("Received TCP Connection from %s", conn.RemoteAddr()) + + select { + case <-t.accept: + // not over connection limit, handle the connection properly. + t.wg.Add(1) + // generate a random id for this TCPConn + id := internal.RandomString(6) + t.remember(id, conn) + go t.handler(conn, id) + default: + // We are over the connection limit, refuse & close. + t.refuser(conn) + } + } + } +} + +// refuser refuses a TCP connection +func (t *TcpListener) refuser(conn *net.TCPConn) { + // Tell the connection why we are closing. + fmt.Fprintf(conn, "Telegraf maximum concurrent TCP connections (%d)"+ + " reached, closing.\nYou may want to increase max_tcp_connections in"+ + " the Telegraf tcp listener configuration.\n", t.MaxTCPConnections) + conn.Close() + log.Printf("Refused TCP Connection from %s", conn.RemoteAddr()) + log.Printf("WARNING: Maximum TCP Connections reached, you may want to" + + " adjust max_tcp_connections") +} + +// handler handles a single TCP Connection +func (t *TcpListener) handler(conn *net.TCPConn, id string) { + // connection cleanup function + defer func() { + t.wg.Done() + conn.Close() + log.Printf("Closed TCP Connection from %s", conn.RemoteAddr()) + // Add one connection potential back to channel when this one closes + t.accept <- true + t.forget(id) + }() + + scanner := bufio.NewScanner(conn) + for { + select { + case <-t.done: + return + default: + if !scanner.Scan() { + return + } + buf := scanner.Bytes() + select { + case t.in <- buf: + default: + log.Printf(dropwarn, string(buf)) + } + } + } +} + +// tcpParser parses the incoming tcp byte packets +func (t *TcpListener) tcpParser() error { + defer t.wg.Done() + for { + select { + case <-t.done: + return nil + case packet := <-t.in: + if len(packet) == 0 { + continue + } + metrics, err := t.parser.Parse(packet) + if err == nil { + t.storeMetrics(metrics) + } else { + log.Printf("Malformed packet: [%s], Error: %s\n", + string(packet), err) + } + } + } +} + +func (t *TcpListener) storeMetrics(metrics []telegraf.Metric) error { + t.Lock() + defer t.Unlock() + for _, m := range metrics { + t.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) + } + return nil +} + +// forget a TCP connection +func (t *TcpListener) forget(id string) { + t.cleanup.Lock() + defer t.cleanup.Unlock() + delete(t.conns, id) +} + +// remember a TCP connection +func (t *TcpListener) remember(id string, conn *net.TCPConn) { + t.cleanup.Lock() + defer t.cleanup.Unlock() + t.conns[id] = conn +} + +func init() { + inputs.Add("tcp_listener", func() telegraf.Input { + return &TcpListener{} + }) +} diff --git a/plugins/inputs/tcp_listener/tcp_listener_test.go b/plugins/inputs/tcp_listener/tcp_listener_test.go new file mode 100644 index 000000000..b4aec9dd2 --- /dev/null +++ b/plugins/inputs/tcp_listener/tcp_listener_test.go @@ -0,0 +1,259 @@ +package tcp_listener + +import ( + "fmt" + "net" + "testing" + "time" + + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + testMsg = "cpu_load_short,host=server01 value=12.0 1422568543702900257\n" + + testMsgs = ` +cpu_load_short,host=server02 value=12.0 1422568543702900257 +cpu_load_short,host=server03 value=12.0 1422568543702900257 +cpu_load_short,host=server04 value=12.0 1422568543702900257 +cpu_load_short,host=server05 value=12.0 1422568543702900257 +cpu_load_short,host=server06 value=12.0 1422568543702900257 +` +) + +func newTestTcpListener() (*TcpListener, chan []byte) { + in := make(chan []byte, 1500) + listener := &TcpListener{ + ServiceAddress: ":8194", + AllowedPendingMessages: 10000, + MaxTCPConnections: 250, + in: in, + done: make(chan struct{}), + } + return listener, in +} + +func TestConnectTCP(t *testing.T) { + listener := TcpListener{ + ServiceAddress: ":8194", + AllowedPendingMessages: 10000, + MaxTCPConnections: 250, + } + listener.parser, _ = parsers.NewInfluxParser() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + time.Sleep(time.Millisecond * 25) + conn, err := net.Dial("tcp", "127.0.0.1:8194") + require.NoError(t, err) + + // send single message to socket + fmt.Fprintf(conn, testMsg) + time.Sleep(time.Millisecond * 15) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01"}, + ) + + // send multiple messages to socket + fmt.Fprintf(conn, testMsgs) + time.Sleep(time.Millisecond * 15) + hostTags := []string{"server02", "server03", + "server04", "server05", "server06"} + for _, hostTag := range hostTags { + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": hostTag}, + ) + } +} + +// Test that MaxTCPConections is respected +func TestConcurrentConns(t *testing.T) { + listener := TcpListener{ + ServiceAddress: ":8195", + AllowedPendingMessages: 10000, + MaxTCPConnections: 2, + } + listener.parser, _ = parsers.NewInfluxParser() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + time.Sleep(time.Millisecond * 25) + _, err := net.Dial("tcp", "127.0.0.1:8195") + assert.NoError(t, err) + _, err = net.Dial("tcp", "127.0.0.1:8195") + assert.NoError(t, err) + + // Connection over the limit: + conn, err := net.Dial("tcp", "127.0.0.1:8195") + assert.NoError(t, err) + net.Dial("tcp", "127.0.0.1:8195") + buf := make([]byte, 1500) + n, err := conn.Read(buf) + assert.NoError(t, err) + assert.Equal(t, + "Telegraf maximum concurrent TCP connections (2) reached, closing.\n"+ + "You may want to increase max_tcp_connections in"+ + " the Telegraf tcp listener configuration.\n", + string(buf[:n])) + + _, err = conn.Write([]byte(testMsg)) + assert.NoError(t, err) + time.Sleep(time.Millisecond * 10) + assert.Zero(t, acc.NFields()) +} + +// Test that MaxTCPConections is respected when max==1 +func TestConcurrentConns1(t *testing.T) { + listener := TcpListener{ + ServiceAddress: ":8196", + AllowedPendingMessages: 10000, + MaxTCPConnections: 1, + } + listener.parser, _ = parsers.NewInfluxParser() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + time.Sleep(time.Millisecond * 25) + _, err := net.Dial("tcp", "127.0.0.1:8196") + assert.NoError(t, err) + + // Connection over the limit: + conn, err := net.Dial("tcp", "127.0.0.1:8196") + assert.NoError(t, err) + net.Dial("tcp", "127.0.0.1:8196") + buf := make([]byte, 1500) + n, err := conn.Read(buf) + assert.NoError(t, err) + assert.Equal(t, + "Telegraf maximum concurrent TCP connections (1) reached, closing.\n"+ + "You may want to increase max_tcp_connections in"+ + " the Telegraf tcp listener configuration.\n", + string(buf[:n])) + + _, err = conn.Write([]byte(testMsg)) + assert.NoError(t, err) + time.Sleep(time.Millisecond * 10) + assert.Zero(t, acc.NFields()) +} + +// Test that MaxTCPConections is respected +func TestCloseConcurrentConns(t *testing.T) { + listener := TcpListener{ + ServiceAddress: ":8195", + AllowedPendingMessages: 10000, + MaxTCPConnections: 2, + } + listener.parser, _ = parsers.NewInfluxParser() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + + time.Sleep(time.Millisecond * 25) + _, err := net.Dial("tcp", "127.0.0.1:8195") + assert.NoError(t, err) + _, err = net.Dial("tcp", "127.0.0.1:8195") + assert.NoError(t, err) + + listener.Stop() +} + +func TestRunParser(t *testing.T) { + var testmsg = []byte(testMsg) + + listener, in := newTestTcpListener() + acc := testutil.Accumulator{} + listener.acc = &acc + defer close(listener.done) + + listener.parser, _ = parsers.NewInfluxParser() + listener.wg.Add(1) + go listener.tcpParser() + + in <- testmsg + time.Sleep(time.Millisecond * 25) + listener.Gather(&acc) + + if a := acc.NFields(); a != 1 { + t.Errorf("got %v, expected %v", a, 1) + } + + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01"}, + ) +} + +func TestRunParserInvalidMsg(t *testing.T) { + var testmsg = []byte("cpu_load_short") + + listener, in := newTestTcpListener() + acc := testutil.Accumulator{} + listener.acc = &acc + defer close(listener.done) + + listener.parser, _ = parsers.NewInfluxParser() + listener.wg.Add(1) + go listener.tcpParser() + + in <- testmsg + time.Sleep(time.Millisecond * 25) + + if a := acc.NFields(); a != 0 { + t.Errorf("got %v, expected %v", a, 0) + } +} + +func TestRunParserGraphiteMsg(t *testing.T) { + var testmsg = []byte("cpu.load.graphite 12 1454780029") + + listener, in := newTestTcpListener() + acc := testutil.Accumulator{} + listener.acc = &acc + defer close(listener.done) + + listener.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) + listener.wg.Add(1) + go listener.tcpParser() + + in <- testmsg + time.Sleep(time.Millisecond * 25) + listener.Gather(&acc) + + acc.AssertContainsFields(t, "cpu_load_graphite", + map[string]interface{}{"value": float64(12)}) +} + +func TestRunParserJSONMsg(t *testing.T) { + var testmsg = []byte("{\"a\": 5, \"b\": {\"c\": 6}}\n") + + listener, in := newTestTcpListener() + acc := testutil.Accumulator{} + listener.acc = &acc + defer close(listener.done) + + listener.parser, _ = parsers.NewJSONParser("udp_json_test", []string{}, nil) + listener.wg.Add(1) + go listener.tcpParser() + + in <- testmsg + time.Sleep(time.Millisecond * 25) + listener.Gather(&acc) + + acc.AssertContainsFields(t, "udp_json_test", + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }) +} From 6139a69fa86e84961b8717984e3c947477daa5e7 Mon Sep 17 00:00:00 2001 From: Matt Morrison Date: Mon, 7 Mar 2016 17:13:29 +1300 Subject: [PATCH 124/287] [SNMP Input] SNMPMap() loops forever if table has more than 32 entries closes #800 closes #801 --- plugins/inputs/snmp/snmp.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 3d4827fc1..2af293d57 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -464,13 +464,14 @@ func (h *Host) SNMPMap(acc telegraf.Accumulator) error { // To get mapping between instance id // and instance name oid_asked := table.mappingTable + oid_next := oid_asked need_more_requests := true // Set max repetition maxRepetition := uint8(32) // Launch requests for need_more_requests { // Launch request - result, err3 := snmpClient.GetBulk([]string{oid_asked}, 0, maxRepetition) + result, err3 := snmpClient.GetBulk([]string{oid_next}, 0, maxRepetition) if err3 != nil { return err3 } @@ -572,6 +573,7 @@ func (h *Host) SNMPMap(acc telegraf.Accumulator) error { // Determine if we need more requests if strings.HasPrefix(lastOid, oid_asked) { need_more_requests = true + oid_next = lastOid } else { need_more_requests = false } From 41534c73f0af3512465d85a8606566225bcd2b14 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 7 Mar 2016 13:56:10 +0100 Subject: [PATCH 125/287] mqtt_consumer: option to set persistent session and client ID closes #797 --- CHANGELOG.md | 1 + plugins/inputs/mqtt_consumer/mqtt_consumer.go | 22 ++++++++- .../mqtt_consumer/mqtt_consumer_test.go | 48 +++++++++++++++++++ 3 files changed, 70 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fe87e41dc..2640d3f21 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ - [#754](https://github.com/influxdata/telegraf/pull/754): docker plugin: adding `docker info` metrics to output. Thanks @titilambert! - [#788](https://github.com/influxdata/telegraf/pull/788): -input-list and -output-list command-line options. Thanks @ebookbug! - [#778](https://github.com/influxdata/telegraf/pull/778): Adding a TCP input listener. +- [#797](https://github.com/influxdata/telegraf/issues/797): Provide option for persistent MQTT consumer client sessions. ### Bugfixes - [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":" diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 42cadfd60..e36889703 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -26,6 +26,9 @@ type MQTTConsumer struct { // Legacy metric buffer support MetricBuffer int + PersistentSession bool + ClientID string `toml:"client_id"` + // Path to CA file SSLCA string `toml:"ssl_ca"` // Path to host cert file @@ -57,6 +60,13 @@ var sampleConfig = ` "sensors/#", ] + # if true, messages that can't be delivered while the subscriber is offline + # will be delivered when it comes back (such as on service restart). + # NOTE: if true, client_id MUST be set + persistent_session = false + # If empty, a random client ID will be generated. + client_id = "" + ## username and password to connect MQTT server. # username = "telegraf" # password = "metricsmetricsmetricsmetrics" @@ -91,6 +101,11 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { m.Lock() defer m.Unlock() + if m.PersistentSession && m.ClientID == "" { + return fmt.Errorf("ERROR MQTT Consumer: When using persistent_session" + + " = true, you MUST also set client_id") + } + m.acc = acc if m.QoS > 2 || m.QoS < 0 { return fmt.Errorf("MQTT Consumer, invalid QoS value: %d", m.QoS) @@ -166,7 +181,11 @@ func (m *MQTTConsumer) Gather(acc telegraf.Accumulator) error { func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { opts := mqtt.NewClientOptions() - opts.SetClientID("Telegraf-Consumer-" + internal.RandomString(5)) + if m.ClientID == "" { + opts.SetClientID("Telegraf-Consumer-" + internal.RandomString(5)) + } else { + opts.SetClientID(m.ClientID) + } tlsCfg, err := internal.GetTLSConfig( m.SSLCert, m.SSLKey, m.SSLCA, m.InsecureSkipVerify) @@ -199,6 +218,7 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { } opts.SetAutoReconnect(true) opts.SetKeepAlive(time.Second * 60) + opts.SetCleanSession(!m.PersistentSession) return opts, nil } diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index b1dd59bcf..e926ebbb2 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -7,6 +7,8 @@ import ( "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" ) @@ -28,6 +30,52 @@ func newTestMQTTConsumer() (*MQTTConsumer, chan mqtt.Message) { return n, in } +// Test that default client has random ID +func TestRandomClientID(t *testing.T) { + m1 := &MQTTConsumer{ + Servers: []string{"localhost:1883"}} + opts, err := m1.createOpts() + assert.NoError(t, err) + + m2 := &MQTTConsumer{ + Servers: []string{"localhost:1883"}} + opts2, err2 := m2.createOpts() + assert.NoError(t, err2) + + assert.NotEqual(t, opts.ClientID, opts2.ClientID) +} + +// Test that default client has random ID +func TestClientID(t *testing.T) { + m1 := &MQTTConsumer{ + Servers: []string{"localhost:1883"}, + ClientID: "telegraf-test", + } + opts, err := m1.createOpts() + assert.NoError(t, err) + + m2 := &MQTTConsumer{ + Servers: []string{"localhost:1883"}, + ClientID: "telegraf-test", + } + opts2, err2 := m2.createOpts() + assert.NoError(t, err2) + + assert.Equal(t, "telegraf-test", opts2.ClientID) + assert.Equal(t, "telegraf-test", opts.ClientID) +} + +// Test that Start() fails if client ID is not set but persistent is +func TestPersistentClientIDFail(t *testing.T) { + m1 := &MQTTConsumer{ + Servers: []string{"localhost:1883"}, + PersistentSession: true, + } + acc := testutil.Accumulator{} + err := m1.Start(&acc) + assert.Error(t, err) +} + // Test that the parser parses NATS messages into metrics func TestRunParser(t *testing.T) { n, in := newTestMQTTConsumer() From 240f99478ad847def48f0a4144fe095d50bb1c79 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 7 Mar 2016 15:46:23 +0100 Subject: [PATCH 126/287] Prevent Inf and NaN from being added, and unit test Accumulator closes #803 --- agent/accumulator.go | 7 +- agent/accumulator_test.go | 302 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 308 insertions(+), 1 deletion(-) create mode 100644 agent/accumulator_test.go diff --git a/agent/accumulator.go b/agent/accumulator.go index b04ff2b53..7ec22cd7f 100644 --- a/agent/accumulator.go +++ b/agent/accumulator.go @@ -105,7 +105,6 @@ func (ac *accumulator) AddFields( continue } } - result[k] = v // Validate uint64 and float64 fields switch val := v.(type) { @@ -116,6 +115,7 @@ func (ac *accumulator) AddFields( } else { result[k] = int64(9223372036854775807) } + continue case float64: // NaNs are invalid values in influxdb, skip measurement if math.IsNaN(val) || math.IsInf(val, 0) { @@ -127,6 +127,8 @@ func (ac *accumulator) AddFields( continue } } + + result[k] = v } fields = nil if len(result) == 0 { @@ -168,5 +170,8 @@ func (ac *accumulator) setDefaultTags(tags map[string]string) { } func (ac *accumulator) addDefaultTag(key, value string) { + if ac.defaultTags == nil { + ac.defaultTags = make(map[string]string) + } ac.defaultTags[key] = value } diff --git a/agent/accumulator_test.go b/agent/accumulator_test.go new file mode 100644 index 000000000..05f9b02aa --- /dev/null +++ b/agent/accumulator_test.go @@ -0,0 +1,302 @@ +package agent + +import ( + "fmt" + "math" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/models" + + "github.com/stretchr/testify/assert" +) + +func TestAdd(t *testing.T) { + a := accumulator{} + now := time.Now() + a.metrics = make(chan telegraf.Metric, 10) + defer close(a.metrics) + a.inputConfig = &internal_models.InputConfig{} + + a.Add("acctest", float64(101), map[string]string{}) + a.Add("acctest", float64(101), map[string]string{"acc": "test"}) + a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) + + testm := <-a.metrics + actual := testm.String() + assert.Contains(t, actual, "acctest value=101") + + testm = <-a.metrics + actual = testm.String() + assert.Contains(t, actual, "acctest,acc=test value=101") + + testm = <-a.metrics + actual = testm.String() + assert.Equal(t, + fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()), + actual) +} + +func TestAddDefaultTags(t *testing.T) { + a := accumulator{} + a.addDefaultTag("default", "tag") + now := time.Now() + a.metrics = make(chan telegraf.Metric, 10) + defer close(a.metrics) + a.inputConfig = &internal_models.InputConfig{} + + a.Add("acctest", float64(101), map[string]string{}) + a.Add("acctest", float64(101), map[string]string{"acc": "test"}) + a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) + + testm := <-a.metrics + actual := testm.String() + assert.Contains(t, actual, "acctest,default=tag value=101") + + testm = <-a.metrics + actual = testm.String() + assert.Contains(t, actual, "acctest,acc=test,default=tag value=101") + + testm = <-a.metrics + actual = testm.String() + assert.Equal(t, + fmt.Sprintf("acctest,acc=test,default=tag value=101 %d", now.UnixNano()), + actual) +} + +func TestAddFields(t *testing.T) { + a := accumulator{} + now := time.Now() + a.metrics = make(chan telegraf.Metric, 10) + defer close(a.metrics) + a.inputConfig = &internal_models.InputConfig{} + + fields := map[string]interface{}{ + "usage": float64(99), + } + a.AddFields("acctest", fields, map[string]string{}) + a.AddFields("acctest", fields, map[string]string{"acc": "test"}) + a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now) + + testm := <-a.metrics + actual := testm.String() + assert.Contains(t, actual, "acctest usage=99") + + testm = <-a.metrics + actual = testm.String() + assert.Contains(t, actual, "acctest,acc=test usage=99") + + testm = <-a.metrics + actual = testm.String() + assert.Equal(t, + fmt.Sprintf("acctest,acc=test usage=99 %d", now.UnixNano()), + actual) +} + +// Test that all Inf fields get dropped, and not added to metrics channel +func TestAddInfFields(t *testing.T) { + inf := math.Inf(1) + ninf := math.Inf(-1) + + a := accumulator{} + now := time.Now() + a.metrics = make(chan telegraf.Metric, 10) + defer close(a.metrics) + a.inputConfig = &internal_models.InputConfig{} + + fields := map[string]interface{}{ + "usage": inf, + "nusage": ninf, + } + a.AddFields("acctest", fields, map[string]string{}) + a.AddFields("acctest", fields, map[string]string{"acc": "test"}) + a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now) + + assert.Len(t, a.metrics, 0) + + // test that non-inf fields are kept and not dropped + fields["notinf"] = float64(100) + a.AddFields("acctest", fields, map[string]string{}) + testm := <-a.metrics + actual := testm.String() + assert.Contains(t, actual, "acctest notinf=100") +} + +// Test that nan fields are dropped and not added +func TestAddNaNFields(t *testing.T) { + nan := math.NaN() + + a := accumulator{} + now := time.Now() + a.metrics = make(chan telegraf.Metric, 10) + defer close(a.metrics) + a.inputConfig = &internal_models.InputConfig{} + + fields := map[string]interface{}{ + "usage": nan, + } + a.AddFields("acctest", fields, map[string]string{}) + a.AddFields("acctest", fields, map[string]string{"acc": "test"}) + a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now) + + assert.Len(t, a.metrics, 0) + + // test that non-nan fields are kept and not dropped + fields["notnan"] = float64(100) + a.AddFields("acctest", fields, map[string]string{}) + testm := <-a.metrics + actual := testm.String() + assert.Contains(t, actual, "acctest notnan=100") +} + +func TestAddUint64Fields(t *testing.T) { + a := accumulator{} + now := time.Now() + a.metrics = make(chan telegraf.Metric, 10) + defer close(a.metrics) + a.inputConfig = &internal_models.InputConfig{} + + fields := map[string]interface{}{ + "usage": uint64(99), + } + a.AddFields("acctest", fields, map[string]string{}) + a.AddFields("acctest", fields, map[string]string{"acc": "test"}) + a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now) + + testm := <-a.metrics + actual := testm.String() + assert.Contains(t, actual, "acctest usage=99i") + + testm = <-a.metrics + actual = testm.String() + assert.Contains(t, actual, "acctest,acc=test usage=99i") + + testm = <-a.metrics + actual = testm.String() + assert.Equal(t, + fmt.Sprintf("acctest,acc=test usage=99i %d", now.UnixNano()), + actual) +} + +func TestAddUint64Overflow(t *testing.T) { + a := accumulator{} + now := time.Now() + a.metrics = make(chan telegraf.Metric, 10) + defer close(a.metrics) + a.inputConfig = &internal_models.InputConfig{} + + fields := map[string]interface{}{ + "usage": uint64(9223372036854775808), + } + a.AddFields("acctest", fields, map[string]string{}) + a.AddFields("acctest", fields, map[string]string{"acc": "test"}) + a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now) + + testm := <-a.metrics + actual := testm.String() + assert.Contains(t, actual, "acctest usage=9223372036854775807i") + + testm = <-a.metrics + actual = testm.String() + assert.Contains(t, actual, "acctest,acc=test usage=9223372036854775807i") + + testm = <-a.metrics + actual = testm.String() + assert.Equal(t, + fmt.Sprintf("acctest,acc=test usage=9223372036854775807i %d", now.UnixNano()), + actual) +} + +func TestAddInts(t *testing.T) { + a := accumulator{} + a.addDefaultTag("default", "tag") + now := time.Now() + a.metrics = make(chan telegraf.Metric, 10) + defer close(a.metrics) + a.inputConfig = &internal_models.InputConfig{} + + a.Add("acctest", int(101), map[string]string{}) + a.Add("acctest", int32(101), map[string]string{"acc": "test"}) + a.Add("acctest", int64(101), map[string]string{"acc": "test"}, now) + + testm := <-a.metrics + actual := testm.String() + assert.Contains(t, actual, "acctest,default=tag value=101i") + + testm = <-a.metrics + actual = testm.String() + assert.Contains(t, actual, "acctest,acc=test,default=tag value=101i") + + testm = <-a.metrics + actual = testm.String() + assert.Equal(t, + fmt.Sprintf("acctest,acc=test,default=tag value=101i %d", now.UnixNano()), + actual) +} + +func TestAddFloats(t *testing.T) { + a := accumulator{} + a.addDefaultTag("default", "tag") + now := time.Now() + a.metrics = make(chan telegraf.Metric, 10) + defer close(a.metrics) + a.inputConfig = &internal_models.InputConfig{} + + a.Add("acctest", float32(101), map[string]string{"acc": "test"}) + a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) + + testm := <-a.metrics + actual := testm.String() + assert.Contains(t, actual, "acctest,acc=test,default=tag value=101") + + testm = <-a.metrics + actual = testm.String() + assert.Equal(t, + fmt.Sprintf("acctest,acc=test,default=tag value=101 %d", now.UnixNano()), + actual) +} + +func TestAddStrings(t *testing.T) { + a := accumulator{} + a.addDefaultTag("default", "tag") + now := time.Now() + a.metrics = make(chan telegraf.Metric, 10) + defer close(a.metrics) + a.inputConfig = &internal_models.InputConfig{} + + a.Add("acctest", "test", map[string]string{"acc": "test"}) + a.Add("acctest", "foo", map[string]string{"acc": "test"}, now) + + testm := <-a.metrics + actual := testm.String() + assert.Contains(t, actual, "acctest,acc=test,default=tag value=\"test\"") + + testm = <-a.metrics + actual = testm.String() + assert.Equal(t, + fmt.Sprintf("acctest,acc=test,default=tag value=\"foo\" %d", now.UnixNano()), + actual) +} + +func TestAddBools(t *testing.T) { + a := accumulator{} + a.addDefaultTag("default", "tag") + now := time.Now() + a.metrics = make(chan telegraf.Metric, 10) + defer close(a.metrics) + a.inputConfig = &internal_models.InputConfig{} + + a.Add("acctest", true, map[string]string{"acc": "test"}) + a.Add("acctest", false, map[string]string{"acc": "test"}, now) + + testm := <-a.metrics + actual := testm.String() + assert.Contains(t, actual, "acctest,acc=test,default=tag value=true") + + testm = <-a.metrics + actual = testm.String() + assert.Equal(t, + fmt.Sprintf("acctest,acc=test,default=tag value=false %d", now.UnixNano()), + actual) +} From cd66e203bd8e00d6cfa2e70b143562af27f2159f Mon Sep 17 00:00:00 2001 From: Thibault Cohen Date: Sun, 6 Mar 2016 01:42:14 -0500 Subject: [PATCH 127/287] Improve procstat closes #799 --- CHANGELOG.md | 1 + plugins/inputs/procstat/README.md | 4 ++++ plugins/inputs/procstat/spec_processor.go | 10 ++++++++++ 3 files changed, 15 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2640d3f21..8632e7cb6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ - [#788](https://github.com/influxdata/telegraf/pull/788): -input-list and -output-list command-line options. Thanks @ebookbug! - [#778](https://github.com/influxdata/telegraf/pull/778): Adding a TCP input listener. - [#797](https://github.com/influxdata/telegraf/issues/797): Provide option for persistent MQTT consumer client sessions. +- [#799](https://github.com/influxdata/telegraf/pull/799): Add number of threads for procstat input plugin. Thanks @titilambert! ### Bugfixes - [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":" diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md index 90552c2a6..ef96500a3 100644 --- a/plugins/inputs/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -35,6 +35,10 @@ The above configuration would result in output like: # Measurements Note: prefix can be set by the user, per process. + +Threads related measurement names: +- procstat_[prefix_]num_threads value=5 + File descriptor related measurement names: - procstat_[prefix_]num_fds value=4 diff --git a/plugins/inputs/procstat/spec_processor.go b/plugins/inputs/procstat/spec_processor.go index b09ed4f21..bb248f003 100644 --- a/plugins/inputs/procstat/spec_processor.go +++ b/plugins/inputs/procstat/spec_processor.go @@ -52,6 +52,7 @@ func NewSpecProcessor( } func (p *SpecProcessor) pushMetrics() { + p.pushNThreadsStats() p.pushFDStats() p.pushCtxStats() p.pushIOStats() @@ -60,6 +61,15 @@ func (p *SpecProcessor) pushMetrics() { p.flush() } +func (p *SpecProcessor) pushNThreadsStats() error { + numThreads, err := p.proc.NumThreads() + if err != nil { + return fmt.Errorf("NumThreads error: %s\n", err) + } + p.add("num_threads", numThreads) + return nil +} + func (p *SpecProcessor) pushFDStats() error { fds, err := p.proc.NumFDs() if err != nil { From 0060df987752ad561b6c0684a684d326f89bf5e5 Mon Sep 17 00:00:00 2001 From: Prune Sebastien THOMAS Date: Tue, 1 Mar 2016 14:11:18 -0500 Subject: [PATCH 128/287] added zookeeper_chroot option added a plugin option zookeeper_chroot to set up the kafka endpoint in zookeeper, which may not be / (default). This chroot is then configured in the consumergroup config.Zookeeper.Chroot This is workaround the fact that this plugins does not handle the urls like "zookeeper_server:port/chroot" As the peers are stored in an array, it makes no sens to have them beeing URL. Peers should all be members of the same cluster, so they all have the same chroot. --- plugins/inputs/kafka_consumer/kafka_consumer.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index bc0d225c6..0d2a49f89 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -17,6 +17,7 @@ type Kafka struct { ConsumerGroup string Topics []string ZookeeperPeers []string + ZookeeperChroot string Consumer *consumergroup.ConsumerGroup // Legacy metric buffer support @@ -48,6 +49,8 @@ var sampleConfig = ` topics = ["telegraf"] ## an array of Zookeeper connection strings zookeeper_peers = ["localhost:2181"] + ## Zookeeper Chroot + zookeeper_chroot = "/" ## the name of the consumer group consumer_group = "telegraf_metrics_consumers" ## Offset (must be either "oldest" or "newest") @@ -80,6 +83,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error { k.acc = acc config := consumergroup.NewConfig() + config.Zookeeper.Chroot = k.ZookeeperChroot switch strings.ToLower(k.Offset) { case "oldest", "": config.Offsets.Initial = sarama.OffsetOldest From bd3d0c330f6284b4b47954bc53fb7546a711d3e0 Mon Sep 17 00:00:00 2001 From: Prune Sebastien THOMAS Date: Tue, 1 Mar 2016 14:38:12 -0500 Subject: [PATCH 129/287] parsed with gofmt closes #776 --- CHANGELOG.md | 1 + plugins/inputs/kafka_consumer/kafka_consumer.go | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8632e7cb6..2eb9fa652 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ - [#778](https://github.com/influxdata/telegraf/pull/778): Adding a TCP input listener. - [#797](https://github.com/influxdata/telegraf/issues/797): Provide option for persistent MQTT consumer client sessions. - [#799](https://github.com/influxdata/telegraf/pull/799): Add number of threads for procstat input plugin. Thanks @titilambert! +- [#776](https://github.com/influxdata/telegraf/pull/776): Add Zookeeper chroot option to kafka_consumer. Thanks @prune998! ### Bugfixes - [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":" diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 0d2a49f89..07c87199f 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -14,11 +14,11 @@ import ( ) type Kafka struct { - ConsumerGroup string - Topics []string - ZookeeperPeers []string + ConsumerGroup string + Topics []string + ZookeeperPeers []string ZookeeperChroot string - Consumer *consumergroup.ConsumerGroup + Consumer *consumergroup.ConsumerGroup // Legacy metric buffer support MetricBuffer int From 805db7ca5071eaa2eb9275ca212dab10a230c7b9 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 9 Mar 2016 13:11:59 +0100 Subject: [PATCH 130/287] Break out fcgi code into orig Go files, don't ignore errs closes #816 --- plugins/inputs/phpfpm/child.go | 331 ++++++++++++++++++ .../inputs/phpfpm/{phpfpm_fcgi.go => fcgi.go} | 76 ---- plugins/inputs/phpfpm/fcgi_client.go | 86 +++++ plugins/inputs/phpfpm/fcgi_test.go | 280 +++++++++++++++ plugins/inputs/phpfpm/phpfpm.go | 10 +- 5 files changed, 705 insertions(+), 78 deletions(-) create mode 100644 plugins/inputs/phpfpm/child.go rename plugins/inputs/phpfpm/{phpfpm_fcgi.go => fcgi.go} (79%) create mode 100644 plugins/inputs/phpfpm/fcgi_client.go create mode 100644 plugins/inputs/phpfpm/fcgi_test.go diff --git a/plugins/inputs/phpfpm/child.go b/plugins/inputs/phpfpm/child.go new file mode 100644 index 000000000..2ebdf2ffb --- /dev/null +++ b/plugins/inputs/phpfpm/child.go @@ -0,0 +1,331 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package phpfpm + +// This file implements FastCGI from the perspective of a child process. + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/cgi" + "os" + "strings" + "sync" + "time" +) + +// request holds the state for an in-progress request. As soon as it's complete, +// it's converted to an http.Request. +type request struct { + pw *io.PipeWriter + reqId uint16 + params map[string]string + buf [1024]byte + rawParams []byte + keepConn bool +} + +func newRequest(reqId uint16, flags uint8) *request { + r := &request{ + reqId: reqId, + params: map[string]string{}, + keepConn: flags&flagKeepConn != 0, + } + r.rawParams = r.buf[:0] + return r +} + +// parseParams reads an encoded []byte into Params. +func (r *request) parseParams() { + text := r.rawParams + r.rawParams = nil + for len(text) > 0 { + keyLen, n := readSize(text) + if n == 0 { + return + } + text = text[n:] + valLen, n := readSize(text) + if n == 0 { + return + } + text = text[n:] + if int(keyLen)+int(valLen) > len(text) { + return + } + key := readString(text, keyLen) + text = text[keyLen:] + val := readString(text, valLen) + text = text[valLen:] + r.params[key] = val + } +} + +// response implements http.ResponseWriter. +type response struct { + req *request + header http.Header + w *bufWriter + wroteHeader bool +} + +func newResponse(c *child, req *request) *response { + return &response{ + req: req, + header: http.Header{}, + w: newWriter(c.conn, typeStdout, req.reqId), + } +} + +func (r *response) Header() http.Header { + return r.header +} + +func (r *response) Write(data []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + return r.w.Write(data) +} + +func (r *response) WriteHeader(code int) { + if r.wroteHeader { + return + } + r.wroteHeader = true + if code == http.StatusNotModified { + // Must not have body. + r.header.Del("Content-Type") + r.header.Del("Content-Length") + r.header.Del("Transfer-Encoding") + } else if r.header.Get("Content-Type") == "" { + r.header.Set("Content-Type", "text/html; charset=utf-8") + } + + if r.header.Get("Date") == "" { + r.header.Set("Date", time.Now().UTC().Format(http.TimeFormat)) + } + + fmt.Fprintf(r.w, "Status: %d %s\r\n", code, http.StatusText(code)) + r.header.Write(r.w) + r.w.WriteString("\r\n") +} + +func (r *response) Flush() { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + r.w.Flush() +} + +func (r *response) Close() error { + r.Flush() + return r.w.Close() +} + +type child struct { + conn *conn + handler http.Handler + + mu sync.Mutex // protects requests: + requests map[uint16]*request // keyed by request ID +} + +func newChild(rwc io.ReadWriteCloser, handler http.Handler) *child { + return &child{ + conn: newConn(rwc), + handler: handler, + requests: make(map[uint16]*request), + } +} + +func (c *child) serve() { + defer c.conn.Close() + defer c.cleanUp() + var rec record + for { + if err := rec.read(c.conn.rwc); err != nil { + return + } + if err := c.handleRecord(&rec); err != nil { + return + } + } +} + +var errCloseConn = errors.New("fcgi: connection should be closed") + +var emptyBody = ioutil.NopCloser(strings.NewReader("")) + +// ErrRequestAborted is returned by Read when a handler attempts to read the +// body of a request that has been aborted by the web server. +var ErrRequestAborted = errors.New("fcgi: request aborted by web server") + +// ErrConnClosed is returned by Read when a handler attempts to read the body of +// a request after the connection to the web server has been closed. +var ErrConnClosed = errors.New("fcgi: connection to web server closed") + +func (c *child) handleRecord(rec *record) error { + c.mu.Lock() + req, ok := c.requests[rec.h.Id] + c.mu.Unlock() + if !ok && rec.h.Type != typeBeginRequest && rec.h.Type != typeGetValues { + // The spec says to ignore unknown request IDs. + return nil + } + + switch rec.h.Type { + case typeBeginRequest: + if req != nil { + // The server is trying to begin a request with the same ID + // as an in-progress request. This is an error. + return errors.New("fcgi: received ID that is already in-flight") + } + + var br beginRequest + if err := br.read(rec.content()); err != nil { + return err + } + if br.role != roleResponder { + c.conn.writeEndRequest(rec.h.Id, 0, statusUnknownRole) + return nil + } + req = newRequest(rec.h.Id, br.flags) + c.mu.Lock() + c.requests[rec.h.Id] = req + c.mu.Unlock() + return nil + case typeParams: + // NOTE(eds): Technically a key-value pair can straddle the boundary + // between two packets. We buffer until we've received all parameters. + if len(rec.content()) > 0 { + req.rawParams = append(req.rawParams, rec.content()...) + return nil + } + req.parseParams() + return nil + case typeStdin: + content := rec.content() + if req.pw == nil { + var body io.ReadCloser + if len(content) > 0 { + // body could be an io.LimitReader, but it shouldn't matter + // as long as both sides are behaving. + body, req.pw = io.Pipe() + } else { + body = emptyBody + } + go c.serveRequest(req, body) + } + if len(content) > 0 { + // TODO(eds): This blocks until the handler reads from the pipe. + // If the handler takes a long time, it might be a problem. + req.pw.Write(content) + } else if req.pw != nil { + req.pw.Close() + } + return nil + case typeGetValues: + values := map[string]string{"FCGI_MPXS_CONNS": "1"} + c.conn.writePairs(typeGetValuesResult, 0, values) + return nil + case typeData: + // If the filter role is implemented, read the data stream here. + return nil + case typeAbortRequest: + c.mu.Lock() + delete(c.requests, rec.h.Id) + c.mu.Unlock() + c.conn.writeEndRequest(rec.h.Id, 0, statusRequestComplete) + if req.pw != nil { + req.pw.CloseWithError(ErrRequestAborted) + } + if !req.keepConn { + // connection will close upon return + return errCloseConn + } + return nil + default: + b := make([]byte, 8) + b[0] = byte(rec.h.Type) + c.conn.writeRecord(typeUnknownType, 0, b) + return nil + } +} + +func (c *child) serveRequest(req *request, body io.ReadCloser) { + r := newResponse(c, req) + httpReq, err := cgi.RequestFromMap(req.params) + if err != nil { + // there was an error reading the request + r.WriteHeader(http.StatusInternalServerError) + c.conn.writeRecord(typeStderr, req.reqId, []byte(err.Error())) + } else { + httpReq.Body = body + c.handler.ServeHTTP(r, httpReq) + } + r.Close() + c.mu.Lock() + delete(c.requests, req.reqId) + c.mu.Unlock() + c.conn.writeEndRequest(req.reqId, 0, statusRequestComplete) + + // Consume the entire body, so the host isn't still writing to + // us when we close the socket below in the !keepConn case, + // otherwise we'd send a RST. (golang.org/issue/4183) + // TODO(bradfitz): also bound this copy in time. Or send + // some sort of abort request to the host, so the host + // can properly cut off the client sending all the data. + // For now just bound it a little and + io.CopyN(ioutil.Discard, body, 100<<20) + body.Close() + + if !req.keepConn { + c.conn.Close() + } +} + +func (c *child) cleanUp() { + c.mu.Lock() + defer c.mu.Unlock() + for _, req := range c.requests { + if req.pw != nil { + // race with call to Close in c.serveRequest doesn't matter because + // Pipe(Reader|Writer).Close are idempotent + req.pw.CloseWithError(ErrConnClosed) + } + } +} + +// Serve accepts incoming FastCGI connections on the listener l, creating a new +// goroutine for each. The goroutine reads requests and then calls handler +// to reply to them. +// If l is nil, Serve accepts connections from os.Stdin. +// If handler is nil, http.DefaultServeMux is used. +func Serve(l net.Listener, handler http.Handler) error { + if l == nil { + var err error + l, err = net.FileListener(os.Stdin) + if err != nil { + return err + } + defer l.Close() + } + if handler == nil { + handler = http.DefaultServeMux + } + for { + rw, err := l.Accept() + if err != nil { + return err + } + c := newChild(rw, handler) + go c.serve() + } +} diff --git a/plugins/inputs/phpfpm/phpfpm_fcgi.go b/plugins/inputs/phpfpm/fcgi.go similarity index 79% rename from plugins/inputs/phpfpm/phpfpm_fcgi.go rename to plugins/inputs/phpfpm/fcgi.go index 03aac7634..689660ea0 100644 --- a/plugins/inputs/phpfpm/phpfpm_fcgi.go +++ b/plugins/inputs/phpfpm/fcgi.go @@ -17,11 +17,6 @@ import ( "errors" "io" "sync" - - "net" - "strconv" - - "strings" ) // recType is a record type, as defined by @@ -277,74 +272,3 @@ func (w *streamWriter) Close() error { // send empty record to close the stream return w.c.writeRecord(w.recType, w.reqId, nil) } - -func NewClient(h string, args ...interface{}) (fcgi *conn, err error) { - var con net.Conn - if len(args) != 1 { - err = errors.New("fcgi: not enough params") - return - } - switch args[0].(type) { - case int: - addr := h + ":" + strconv.FormatInt(int64(args[0].(int)), 10) - con, err = net.Dial("tcp", addr) - case string: - laddr := net.UnixAddr{Name: args[0].(string), Net: h} - con, err = net.DialUnix(h, nil, &laddr) - default: - err = errors.New("fcgi: we only accept int (port) or string (socket) params.") - } - fcgi = &conn{ - rwc: con, - } - return -} - -func (client *conn) Request(env map[string]string, requestData string) (retout []byte, reterr []byte, err error) { - defer client.rwc.Close() - var reqId uint16 = 1 - - err = client.writeBeginRequest(reqId, uint16(roleResponder), 0) - if err != nil { - return - } - - err = client.writePairs(typeParams, reqId, env) - if err != nil { - return - } - - if len(requestData) > 0 { - if err = client.writeRecord(typeStdin, reqId, []byte(requestData)); err != nil { - return - } - } - - rec := &record{} - var err1 error - - // recive untill EOF or FCGI_END_REQUEST -READ_LOOP: - for { - err1 = rec.read(client.rwc) - if err1 != nil && strings.Contains(err1.Error(), "use of closed network connection") { - if err1 != io.EOF { - err = err1 - } - break - } - - switch { - case rec.h.Type == typeStdout: - retout = append(retout, rec.content()...) - case rec.h.Type == typeStderr: - reterr = append(reterr, rec.content()...) - case rec.h.Type == typeEndRequest: - fallthrough - default: - break READ_LOOP - } - } - - return -} diff --git a/plugins/inputs/phpfpm/fcgi_client.go b/plugins/inputs/phpfpm/fcgi_client.go new file mode 100644 index 000000000..56978ad3a --- /dev/null +++ b/plugins/inputs/phpfpm/fcgi_client.go @@ -0,0 +1,86 @@ +package phpfpm + +import ( + "errors" + "io" + "net" + "strconv" + "strings" +) + +// Create an fcgi client +func newFcgiClient(h string, args ...interface{}) (*conn, error) { + var con net.Conn + if len(args) != 1 { + return nil, errors.New("fcgi: not enough params") + } + + var err error + switch args[0].(type) { + case int: + addr := h + ":" + strconv.FormatInt(int64(args[0].(int)), 10) + con, err = net.Dial("tcp", addr) + case string: + laddr := net.UnixAddr{Name: args[0].(string), Net: h} + con, err = net.DialUnix(h, nil, &laddr) + default: + err = errors.New("fcgi: we only accept int (port) or string (socket) params.") + } + fcgi := &conn{ + rwc: con, + } + + return fcgi, err +} + +func (client *conn) Request( + env map[string]string, + requestData string, +) (retout []byte, reterr []byte, err error) { + defer client.rwc.Close() + var reqId uint16 = 1 + + err = client.writeBeginRequest(reqId, uint16(roleResponder), 0) + if err != nil { + return + } + + err = client.writePairs(typeParams, reqId, env) + if err != nil { + return + } + + if len(requestData) > 0 { + if err = client.writeRecord(typeStdin, reqId, []byte(requestData)); err != nil { + return + } + } + + rec := &record{} + var err1 error + + // recive untill EOF or FCGI_END_REQUEST +READ_LOOP: + for { + err1 = rec.read(client.rwc) + if err1 != nil && strings.Contains(err1.Error(), "use of closed network connection") { + if err1 != io.EOF { + err = err1 + } + break + } + + switch { + case rec.h.Type == typeStdout: + retout = append(retout, rec.content()...) + case rec.h.Type == typeStderr: + reterr = append(reterr, rec.content()...) + case rec.h.Type == typeEndRequest: + fallthrough + default: + break READ_LOOP + } + } + + return +} diff --git a/plugins/inputs/phpfpm/fcgi_test.go b/plugins/inputs/phpfpm/fcgi_test.go new file mode 100644 index 000000000..15e0030a7 --- /dev/null +++ b/plugins/inputs/phpfpm/fcgi_test.go @@ -0,0 +1,280 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package phpfpm + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "net/http" + "testing" +) + +var sizeTests = []struct { + size uint32 + bytes []byte +}{ + {0, []byte{0x00}}, + {127, []byte{0x7F}}, + {128, []byte{0x80, 0x00, 0x00, 0x80}}, + {1000, []byte{0x80, 0x00, 0x03, 0xE8}}, + {33554431, []byte{0x81, 0xFF, 0xFF, 0xFF}}, +} + +func TestSize(t *testing.T) { + b := make([]byte, 4) + for i, test := range sizeTests { + n := encodeSize(b, test.size) + if !bytes.Equal(b[:n], test.bytes) { + t.Errorf("%d expected %x, encoded %x", i, test.bytes, b) + } + size, n := readSize(test.bytes) + if size != test.size { + t.Errorf("%d expected %d, read %d", i, test.size, size) + } + if len(test.bytes) != n { + t.Errorf("%d did not consume all the bytes", i) + } + } +} + +var streamTests = []struct { + desc string + recType recType + reqId uint16 + content []byte + raw []byte +}{ + {"single record", typeStdout, 1, nil, + []byte{1, byte(typeStdout), 0, 1, 0, 0, 0, 0}, + }, + // this data will have to be split into two records + {"two records", typeStdin, 300, make([]byte, 66000), + bytes.Join([][]byte{ + // header for the first record + {1, byte(typeStdin), 0x01, 0x2C, 0xFF, 0xFF, 1, 0}, + make([]byte, 65536), + // header for the second + {1, byte(typeStdin), 0x01, 0x2C, 0x01, 0xD1, 7, 0}, + make([]byte, 472), + // header for the empty record + {1, byte(typeStdin), 0x01, 0x2C, 0, 0, 0, 0}, + }, + nil), + }, +} + +type nilCloser struct { + io.ReadWriter +} + +func (c *nilCloser) Close() error { return nil } + +func TestStreams(t *testing.T) { + var rec record +outer: + for _, test := range streamTests { + buf := bytes.NewBuffer(test.raw) + var content []byte + for buf.Len() > 0 { + if err := rec.read(buf); err != nil { + t.Errorf("%s: error reading record: %v", test.desc, err) + continue outer + } + content = append(content, rec.content()...) + } + if rec.h.Type != test.recType { + t.Errorf("%s: got type %d expected %d", test.desc, rec.h.Type, test.recType) + continue + } + if rec.h.Id != test.reqId { + t.Errorf("%s: got request ID %d expected %d", test.desc, rec.h.Id, test.reqId) + continue + } + if !bytes.Equal(content, test.content) { + t.Errorf("%s: read wrong content", test.desc) + continue + } + buf.Reset() + c := newConn(&nilCloser{buf}) + w := newWriter(c, test.recType, test.reqId) + if _, err := w.Write(test.content); err != nil { + t.Errorf("%s: error writing record: %v", test.desc, err) + continue + } + if err := w.Close(); err != nil { + t.Errorf("%s: error closing stream: %v", test.desc, err) + continue + } + if !bytes.Equal(buf.Bytes(), test.raw) { + t.Errorf("%s: wrote wrong content", test.desc) + } + } +} + +type writeOnlyConn struct { + buf []byte +} + +func (c *writeOnlyConn) Write(p []byte) (int, error) { + c.buf = append(c.buf, p...) + return len(p), nil +} + +func (c *writeOnlyConn) Read(p []byte) (int, error) { + return 0, errors.New("conn is write-only") +} + +func (c *writeOnlyConn) Close() error { + return nil +} + +func TestGetValues(t *testing.T) { + var rec record + rec.h.Type = typeGetValues + + wc := new(writeOnlyConn) + c := newChild(wc, nil) + err := c.handleRecord(&rec) + if err != nil { + t.Fatalf("handleRecord: %v", err) + } + + const want = "\x01\n\x00\x00\x00\x12\x06\x00" + + "\x0f\x01FCGI_MPXS_CONNS1" + + "\x00\x00\x00\x00\x00\x00\x01\n\x00\x00\x00\x00\x00\x00" + if got := string(wc.buf); got != want { + t.Errorf(" got: %q\nwant: %q\n", got, want) + } +} + +func nameValuePair11(nameData, valueData string) []byte { + return bytes.Join( + [][]byte{ + {byte(len(nameData)), byte(len(valueData))}, + []byte(nameData), + []byte(valueData), + }, + nil, + ) +} + +func makeRecord( + recordType recType, + requestId uint16, + contentData []byte, +) []byte { + requestIdB1 := byte(requestId >> 8) + requestIdB0 := byte(requestId) + + contentLength := len(contentData) + contentLengthB1 := byte(contentLength >> 8) + contentLengthB0 := byte(contentLength) + return bytes.Join([][]byte{ + {1, byte(recordType), requestIdB1, requestIdB0, contentLengthB1, + contentLengthB0, 0, 0}, + contentData, + }, + nil) +} + +// a series of FastCGI records that start a request and begin sending the +// request body +var streamBeginTypeStdin = bytes.Join([][]byte{ + // set up request 1 + makeRecord(typeBeginRequest, 1, + []byte{0, byte(roleResponder), 0, 0, 0, 0, 0, 0}), + // add required parameters to request 1 + makeRecord(typeParams, 1, nameValuePair11("REQUEST_METHOD", "GET")), + makeRecord(typeParams, 1, nameValuePair11("SERVER_PROTOCOL", "HTTP/1.1")), + makeRecord(typeParams, 1, nil), + // begin sending body of request 1 + makeRecord(typeStdin, 1, []byte("0123456789abcdef")), +}, + nil) + +var cleanUpTests = []struct { + input []byte + err error +}{ + // confirm that child.handleRecord closes req.pw after aborting req + { + bytes.Join([][]byte{ + streamBeginTypeStdin, + makeRecord(typeAbortRequest, 1, nil), + }, + nil), + ErrRequestAborted, + }, + // confirm that child.serve closes all pipes after error reading record + { + bytes.Join([][]byte{ + streamBeginTypeStdin, + nil, + }, + nil), + ErrConnClosed, + }, +} + +type nopWriteCloser struct { + io.ReadWriter +} + +func (nopWriteCloser) Close() error { + return nil +} + +// Test that child.serve closes the bodies of aborted requests and closes the +// bodies of all requests before returning. Causes deadlock if either condition +// isn't met. See issue 6934. +func TestChildServeCleansUp(t *testing.T) { + for _, tt := range cleanUpTests { + input := make([]byte, len(tt.input)) + copy(input, tt.input) + rc := nopWriteCloser{bytes.NewBuffer(input)} + done := make(chan bool) + c := newChild(rc, http.HandlerFunc(func( + w http.ResponseWriter, + r *http.Request, + ) { + // block on reading body of request + _, err := io.Copy(ioutil.Discard, r.Body) + if err != tt.err { + t.Errorf("Expected %#v, got %#v", tt.err, err) + } + // not reached if body of request isn't closed + done <- true + })) + go c.serve() + // wait for body of request to be closed or all goroutines to block + <-done + } +} + +type rwNopCloser struct { + io.Reader + io.Writer +} + +func (rwNopCloser) Close() error { + return nil +} + +// Verifies it doesn't crash. Issue 11824. +func TestMalformedParams(t *testing.T) { + input := []byte{ + // beginRequest, requestId=1, contentLength=8, role=1, keepConn=1 + 1, 1, 0, 1, 0, 8, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, + // params, requestId=1, contentLength=10, k1Len=50, v1Len=50 (malformed, wrong length) + 1, 4, 0, 1, 0, 10, 0, 0, 50, 50, 3, 4, 5, 6, 7, 8, 9, 10, + // end of params + 1, 4, 0, 1, 0, 0, 0, 0, + } + rw := rwNopCloser{bytes.NewReader(input), ioutil.Discard} + c := newChild(rw, http.DefaultServeMux) + c.serve() +} diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index c07262342..199b0005b 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -112,6 +112,7 @@ func (g *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { statusPath string ) + var err error if strings.HasPrefix(addr, "fcgi://") || strings.HasPrefix(addr, "cgi://") { u, err := url.Parse(addr) if err != nil { @@ -120,7 +121,7 @@ func (g *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { socketAddr := strings.Split(u.Host, ":") fcgiIp := socketAddr[0] fcgiPort, _ := strconv.Atoi(socketAddr[1]) - fcgi, _ = NewClient(fcgiIp, fcgiPort) + fcgi, err = newFcgiClient(fcgiIp, fcgiPort) } else { socketAddr := strings.Split(addr, ":") if len(socketAddr) >= 2 { @@ -134,8 +135,13 @@ func (g *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { if _, err := os.Stat(socketPath); os.IsNotExist(err) { return fmt.Errorf("Socket doesn't exist '%s': %s", socketPath, err) } - fcgi, _ = NewClient("unix", socketPath) + fcgi, err = newFcgiClient("unix", socketPath) } + + if err != nil { + return err + } + return g.gatherFcgi(fcgi, statusPath, acc) } From 845abcdd77c0099e2acaa0f74a67f89b3824480b Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 9 Mar 2016 14:44:32 +0100 Subject: [PATCH 131/287] Only log the overwritten metric warning on 1st overwrite per buffer see #807 --- internal/models/running_output.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/internal/models/running_output.go b/internal/models/running_output.go index 9d111c757..33fa4e120 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -82,9 +82,11 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) { } } } else { - log.Printf("WARNING: overwriting cached metrics, you may want to " + - "increase the metric_buffer_limit setting in your [agent] " + - "config if you do not wish to overwrite metrics.\n") + if ro.overwriteI == 0 { + log.Printf("WARNING: overwriting cached metrics, you may want to " + + "increase the metric_buffer_limit setting in your [agent] " + + "config if you do not wish to overwrite metrics.\n") + } if ro.overwriteI == len(ro.metrics) { ro.overwriteI = 0 } From b102ae141accd23211bc3009e4a4cf6a3e8547be Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 9 Mar 2016 15:46:37 +0100 Subject: [PATCH 132/287] CONFIGURATION drop->fielddrop --- docs/CONFIGURATION.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 58dbdf261..853dc6d05 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -97,7 +97,7 @@ fields which begin with `time_`. percpu = true totalcpu = false # filter all fields beginning with 'time_' - drop = ["time_*"] + fielddrop = ["time_*"] ``` #### Input Config: tagpass and tagdrop @@ -106,7 +106,7 @@ fields which begin with `time_`. [[inputs.cpu]] percpu = true totalcpu = false - drop = ["cpu_time"] + fielddrop = ["cpu_time"] # Don't collect CPU data for cpu6 & cpu7 [inputs.cpu.tagdrop] cpu = [ "cpu6", "cpu7" ] @@ -199,7 +199,7 @@ to avoid measurement collisions: percpu = true totalcpu = false name_override = "percpu_usage" - drop = ["cpu_time*"] + fielddrop = ["cpu_time*"] ``` ## `[outputs.xxx]` Configuration From 5ffa2a30be196bebdf8038970bbf7ed74895174e Mon Sep 17 00:00:00 2001 From: Thibault Cohen Date: Sun, 6 Mar 2016 01:04:54 -0500 Subject: [PATCH 133/287] Add processes status stats in system input plugin --- plugins/inputs/system/processes.go | 61 +++++++++++++++++++++++++ plugins/inputs/system/processes_test.go | 21 +++++++++ 2 files changed, 82 insertions(+) create mode 100644 plugins/inputs/system/processes.go create mode 100644 plugins/inputs/system/processes_test.go diff --git a/plugins/inputs/system/processes.go b/plugins/inputs/system/processes.go new file mode 100644 index 000000000..c4b791e3c --- /dev/null +++ b/plugins/inputs/system/processes.go @@ -0,0 +1,61 @@ +package system + +import ( + "fmt" + "log" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/shirou/gopsutil/process" +) + +type Processes struct { +} + +func (_ *Processes) Description() string { + return "Get the number of processes and group them by status (Linux only)" +} + +func (_ *Processes) SampleConfig() string { return "" } + +func (s *Processes) Gather(acc telegraf.Accumulator) error { + pids, err := process.Pids() + if err != nil { + return fmt.Errorf("error getting pids list: %s", err) + } + // TODO handle other OS (Windows/BSD/Solaris/OSX) + fields := map[string]interface{}{ + "paging": uint64(0), + "blocked": uint64(0), + "zombie": uint64(0), + "stopped": uint64(0), + "running": uint64(0), + "sleeping": uint64(0), + } + for _, pid := range pids { + process, err := process.NewProcess(pid) + if err != nil { + log.Printf("Can not get process %d status: %s", pid, err) + continue + } + status, err := process.Status() + if err != nil { + log.Printf("Can not get process %d status: %s\n", pid, err) + continue + } + _, exists := fields[status] + if !exists { + log.Printf("Status '%s' for process with pid: %d\n", status, pid) + continue + } + fields[status] = fields[status].(uint64) + uint64(1) + } + + acc.AddFields("processes", fields, nil) + return nil +} +func init() { + inputs.Add("processes", func() telegraf.Input { + return &Processes{} + }) +} diff --git a/plugins/inputs/system/processes_test.go b/plugins/inputs/system/processes_test.go new file mode 100644 index 000000000..246884711 --- /dev/null +++ b/plugins/inputs/system/processes_test.go @@ -0,0 +1,21 @@ +package system + +import ( + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestProcesses(t *testing.T) { + processes := &Processes{} + var acc testutil.Accumulator + + err := processes.Gather(&acc) + require.NoError(t, err) + + assert.True(t, acc.HasUIntField("processes", "running")) + assert.True(t, acc.HasUIntField("processes", "sleeping")) + assert.True(t, acc.HasUIntField("processes", "stopped")) +} From 2f45b8b7f54ce516fc5b4ed2f3191b3f3b726f95 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 8 Mar 2016 11:42:31 +0100 Subject: [PATCH 134/287] Cross platform support for the 'processes' plugin closes #798 --- CHANGELOG.md | 2 + README.md | 4 +- etc/telegraf.conf | 4 + plugins/inputs/system/PROCESSES_README.md | 58 ++++++ plugins/inputs/system/processes.go | 219 ++++++++++++++++++---- plugins/inputs/system/processes_test.go | 133 ++++++++++++- scripts/circle-test.sh | 2 +- 7 files changed, 384 insertions(+), 38 deletions(-) create mode 100644 plugins/inputs/system/PROCESSES_README.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 2eb9fa652..5ef68bd45 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ - [#797](https://github.com/influxdata/telegraf/issues/797): Provide option for persistent MQTT consumer client sessions. - [#799](https://github.com/influxdata/telegraf/pull/799): Add number of threads for procstat input plugin. Thanks @titilambert! - [#776](https://github.com/influxdata/telegraf/pull/776): Add Zookeeper chroot option to kafka_consumer. Thanks @prune998! +- [#811](https://github.com/influxdata/telegraf/pull/811): Add processes plugin for classifying total procs on system. Thanks @titilambert! ### Bugfixes - [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":" @@ -24,6 +25,7 @@ - [#773](https://github.com/influxdata/telegraf/issues/773): Fix duplicate measurements in snmp plugin. Thanks @titilambert! - [#708](https://github.com/influxdata/telegraf/issues/708): packaging: build ARM package - [#713](https://github.com/influxdata/telegraf/issues/713): packaging: insecure permissions error on log directory +- [#816](https://github.com/influxdata/telegraf/issues/816): Fix phpfpm panic if fcgi endpoint unreachable. ## v0.10.4.1 diff --git a/README.md b/README.md index e9c20996a..fb9363100 100644 --- a/README.md +++ b/README.md @@ -214,11 +214,13 @@ Currently implemented sources: * disk * diskio * swap + * processes Telegraf can also collect metrics via the following service plugins: * statsd -* udp listener +* udp_listener +* tcp_listener * mqtt_consumer * kafka_consumer * nats_consumer diff --git a/etc/telegraf.conf b/etc/telegraf.conf index a6057ecd2..3deb7f895 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -123,6 +123,10 @@ [[inputs.mem]] # no configuration +# Get the number of processes and group them by status +[[inputs.processes]] + # no configuration + # Read metrics about swap memory usage [[inputs.swap]] # no configuration diff --git a/plugins/inputs/system/PROCESSES_README.md b/plugins/inputs/system/PROCESSES_README.md new file mode 100644 index 000000000..006e043fb --- /dev/null +++ b/plugins/inputs/system/PROCESSES_README.md @@ -0,0 +1,58 @@ +# Processes Input Plugin + +This plugin gathers info about the total number of processes and groups +them by status (zombie, sleeping, running, etc.) + +On linux this plugin requires access to procfs (/proc), on other OSes +it requires access to execute `ps`. + +### Configuration: + +```toml +# Get the number of processes and group them by status +[[inputs.processes]] + # no configuration +``` + +### Measurements & Fields: + +- processes + - blocked (aka disk sleep or uninterruptible sleep) + - running + - sleeping + - stopped + - total + - zombie + - wait (freebsd only) + - idle (bsd only) + - paging (linux only) + - total_threads (linux only) + +### Process State Mappings + +Different OSes use slightly different State codes for their processes, these +state codes are documented in `man ps`, and I will give a mapping of what major +OS state codes correspond to in telegraf metrics: + +``` +Linux FreeBSD Darwin meaning + R R R running + S S S sleeping + Z Z Z zombie + T T T stopped + none I I idle (sleeping for longer than about 20 seconds) + D D,L U blocked (waiting in uninterruptible sleep, or locked) + W W none paging (linux kernel < 2.6 only), wait (freebsd) +``` + +### Tags: + +None + +### Example Output: + +``` +$ telegraf -config ~/ws/telegraf.conf -input-filter processes -test +* Plugin: processes, Collection 1 +> processes blocked=8i,running=1i,sleeping=265i,stopped=0i,total=274i,zombie=0i,paging=0i,total_threads=687i 1457478636980905042 +``` diff --git a/plugins/inputs/system/processes.go b/plugins/inputs/system/processes.go index c4b791e3c..b7ee32066 100644 --- a/plugins/inputs/system/processes.go +++ b/plugins/inputs/system/processes.go @@ -1,61 +1,216 @@ +// +build !windows + package system import ( + "bytes" "fmt" + "io/ioutil" "log" + "os" + "os/exec" + "path" + "runtime" + "strconv" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/shirou/gopsutil/process" ) type Processes struct { + execPS func() ([]byte, error) + readProcFile func(statFile string) ([]byte, error) + + forcePS bool + forceProc bool } -func (_ *Processes) Description() string { - return "Get the number of processes and group them by status (Linux only)" +func (p *Processes) Description() string { + return "Get the number of processes and group them by status" } -func (_ *Processes) SampleConfig() string { return "" } +func (p *Processes) SampleConfig() string { return "" } -func (s *Processes) Gather(acc telegraf.Accumulator) error { - pids, err := process.Pids() - if err != nil { - return fmt.Errorf("error getting pids list: %s", err) +func (p *Processes) Gather(acc telegraf.Accumulator) error { + // Get an empty map of metric fields + fields := getEmptyFields() + + // Decide if we will use 'ps' to get stats (use procfs otherwise) + usePS := true + if runtime.GOOS == "linux" { + usePS = false } - // TODO handle other OS (Windows/BSD/Solaris/OSX) - fields := map[string]interface{}{ - "paging": uint64(0), - "blocked": uint64(0), - "zombie": uint64(0), - "stopped": uint64(0), - "running": uint64(0), - "sleeping": uint64(0), + if p.forcePS { + usePS = true + } else if p.forceProc { + usePS = false } - for _, pid := range pids { - process, err := process.NewProcess(pid) - if err != nil { - log.Printf("Can not get process %d status: %s", pid, err) - continue + + // Gather stats from 'ps' or procfs + if usePS { + if err := p.gatherFromPS(fields); err != nil { + return err } - status, err := process.Status() - if err != nil { - log.Printf("Can not get process %d status: %s\n", pid, err) - continue + } else { + if err := p.gatherFromProc(fields); err != nil { + return err } - _, exists := fields[status] - if !exists { - log.Printf("Status '%s' for process with pid: %d\n", status, pid) - continue - } - fields[status] = fields[status].(uint64) + uint64(1) } acc.AddFields("processes", fields, nil) return nil } + +// Gets empty fields of metrics based on the OS +func getEmptyFields() map[string]interface{} { + fields := map[string]interface{}{ + "blocked": int64(0), + "zombie": int64(0), + "stopped": int64(0), + "running": int64(0), + "sleeping": int64(0), + "total": int64(0), + } + switch runtime.GOOS { + case "freebsd": + fields["idle"] = int64(0) + fields["wait"] = int64(0) + case "darwin": + fields["idle"] = int64(0) + case "openbsd": + fields["idle"] = int64(0) + case "linux": + fields["paging"] = int64(0) + fields["total_threads"] = int64(0) + } + return fields +} + +// exec `ps` to get all process states +func (p *Processes) gatherFromPS(fields map[string]interface{}) error { + out, err := p.execPS() + if err != nil { + return err + } + + for i, status := range bytes.Fields(out) { + if i == 0 && string(status) == "STAT" { + // This is a header, skip it + continue + } + switch status[0] { + case 'W': + fields["wait"] = fields["wait"].(int64) + int64(1) + case 'U', 'D', 'L': + // Also known as uninterruptible sleep or disk sleep + fields["blocked"] = fields["blocked"].(int64) + int64(1) + case 'Z': + fields["zombie"] = fields["zombie"].(int64) + int64(1) + case 'T': + fields["stopped"] = fields["stopped"].(int64) + int64(1) + case 'R': + fields["running"] = fields["running"].(int64) + int64(1) + case 'S': + fields["sleeping"] = fields["sleeping"].(int64) + int64(1) + case 'I': + fields["idle"] = fields["idle"].(int64) + int64(1) + default: + log.Printf("processes: Unknown state [ %s ] from ps", + string(status[0])) + } + fields["total"] = fields["total"].(int64) + int64(1) + } + return nil +} + +// get process states from /proc/(pid)/stat files +func (p *Processes) gatherFromProc(fields map[string]interface{}) error { + files, err := ioutil.ReadDir("/proc") + if err != nil { + return err + } + + for _, file := range files { + if !file.IsDir() { + continue + } + + statFile := path.Join("/proc", file.Name(), "stat") + data, err := p.readProcFile(statFile) + if err != nil { + return err + } + if data == nil { + continue + } + + stats := bytes.Fields(data) + if len(stats) < 3 { + return fmt.Errorf("Something is terribly wrong with %s", statFile) + } + switch stats[2][0] { + case 'R': + fields["running"] = fields["running"].(int64) + int64(1) + case 'S': + fields["sleeping"] = fields["sleeping"].(int64) + int64(1) + case 'D': + fields["blocked"] = fields["blocked"].(int64) + int64(1) + case 'Z': + fields["zombies"] = fields["zombies"].(int64) + int64(1) + case 'T', 't': + fields["stopped"] = fields["stopped"].(int64) + int64(1) + case 'W': + fields["paging"] = fields["paging"].(int64) + int64(1) + default: + log.Printf("processes: Unknown state [ %s ] in file %s", + string(stats[2][0]), statFile) + } + fields["total"] = fields["total"].(int64) + int64(1) + + threads, err := strconv.Atoi(string(stats[19])) + if err != nil { + log.Printf("processes: Error parsing thread count: %s", err) + continue + } + fields["total_threads"] = fields["total_threads"].(int64) + int64(threads) + } + return nil +} + +func readProcFile(statFile string) ([]byte, error) { + if _, err := os.Stat(statFile); os.IsNotExist(err) { + return nil, nil + } else if err != nil { + return nil, err + } + + data, err := ioutil.ReadFile(statFile) + if err != nil { + return nil, err + } + + return data, nil +} + +func execPS() ([]byte, error) { + bin, err := exec.LookPath("ps") + if err != nil { + return nil, err + } + + out, err := exec.Command(bin, "axo", "state").Output() + if err != nil { + return nil, err + } + + return out, err +} + func init() { inputs.Add("processes", func() telegraf.Input { - return &Processes{} + return &Processes{ + execPS: execPS, + readProcFile: readProcFile, + } }) } diff --git a/plugins/inputs/system/processes_test.go b/plugins/inputs/system/processes_test.go index 246884711..0e2b5e105 100644 --- a/plugins/inputs/system/processes_test.go +++ b/plugins/inputs/system/processes_test.go @@ -1,6 +1,8 @@ package system import ( + "fmt" + "runtime" "testing" "github.com/influxdata/telegraf/testutil" @@ -9,13 +11,136 @@ import ( ) func TestProcesses(t *testing.T) { - processes := &Processes{} + processes := &Processes{ + execPS: execPS, + readProcFile: readProcFile, + } var acc testutil.Accumulator err := processes.Gather(&acc) require.NoError(t, err) - assert.True(t, acc.HasUIntField("processes", "running")) - assert.True(t, acc.HasUIntField("processes", "sleeping")) - assert.True(t, acc.HasUIntField("processes", "stopped")) + assert.True(t, acc.HasIntField("processes", "running")) + assert.True(t, acc.HasIntField("processes", "sleeping")) + assert.True(t, acc.HasIntField("processes", "stopped")) + assert.True(t, acc.HasIntField("processes", "total")) + total, ok := acc.Get("processes") + require.True(t, ok) + assert.True(t, total.Fields["total"].(int64) > 0) } + +func TestFromPS(t *testing.T) { + processes := &Processes{ + execPS: testExecPS, + forcePS: true, + } + + var acc testutil.Accumulator + err := processes.Gather(&acc) + require.NoError(t, err) + + fields := getEmptyFields() + fields["blocked"] = int64(1) + fields["running"] = int64(4) + fields["sleeping"] = int64(34) + fields["total"] = int64(39) + + acc.AssertContainsTaggedFields(t, "processes", fields, map[string]string{}) +} + +func TestFromPSError(t *testing.T) { + processes := &Processes{ + execPS: testExecPSError, + forcePS: true, + } + + var acc testutil.Accumulator + err := processes.Gather(&acc) + require.Error(t, err) +} + +func TestFromProcFiles(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skip("This test only runs on linux") + } + tester := tester{} + processes := &Processes{ + readProcFile: tester.testProcFile, + forceProc: true, + } + + var acc testutil.Accumulator + err := processes.Gather(&acc) + require.NoError(t, err) + + fields := getEmptyFields() + fields["sleeping"] = tester.calls + fields["total_threads"] = tester.calls * 2 + fields["total"] = tester.calls + + acc.AssertContainsTaggedFields(t, "processes", fields, map[string]string{}) +} + +func testExecPS() ([]byte, error) { + return []byte(testPSOut), nil +} + +// struct for counting calls to testProcFile +type tester struct { + calls int64 +} + +func (t *tester) testProcFile(_ string) ([]byte, error) { + t.calls++ + return []byte(fmt.Sprintf(testProcStat, "S", "2")), nil +} + +func testExecPSError() ([]byte, error) { + return []byte(testPSOut), fmt.Errorf("ERROR!") +} + +const testPSOut = ` +STAT +S +S +S +S +R +R +S +S +Ss +Ss +S +SNs +Ss +Ss +S +R+ +S +U +S +S +S +S +Ss +S+ +Ss +S +S+ +S+ +Ss +S+ +Ss +S +R+ +Ss +S +S+ +S+ +Ss +S+ +` + +const testProcStat = `10 (rcuob/0) %s 2 0 0 0 -1 2129984 0 0 0 0 0 0 0 0 20 0 %s 0 11 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +` diff --git a/scripts/circle-test.sh b/scripts/circle-test.sh index 9a3e0e678..f0288c73e 100755 --- a/scripts/circle-test.sh +++ b/scripts/circle-test.sh @@ -68,7 +68,7 @@ telegraf -sample-config > $tmpdir/config.toml exit_if_fail telegraf -config $tmpdir/config.toml \ -test -input-filter cpu:mem -mv $GOPATH/bin/telegraf $CIRCLE_ARTIFACTS +cat $GOPATH/bin/telegraf | gzip > $CIRCLE_ARTIFACTS/telegraf.gz eval "git describe --exact-match HEAD" if [ $? -eq 0 ]; then From 7b09623fa8be8e5fd8046ff47da2cc4923f43f10 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 9 Mar 2016 17:22:34 +0100 Subject: [PATCH 135/287] Add number of users to 'system' plugin see #235 --- CHANGELOG.md | 1 + plugins/inputs/system/SYSTEM_README.md | 35 ++++++++++++++++++++++++++ plugins/inputs/system/system.go | 6 +++++ 3 files changed, 42 insertions(+) create mode 100644 plugins/inputs/system/SYSTEM_README.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 5ef68bd45..28ce825c5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ - [#799](https://github.com/influxdata/telegraf/pull/799): Add number of threads for procstat input plugin. Thanks @titilambert! - [#776](https://github.com/influxdata/telegraf/pull/776): Add Zookeeper chroot option to kafka_consumer. Thanks @prune998! - [#811](https://github.com/influxdata/telegraf/pull/811): Add processes plugin for classifying total procs on system. Thanks @titilambert! +- [#235](https://github.com/influxdata/telegraf/issues/235): Add number of users to the `system` input plugin. ### Bugfixes - [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":" diff --git a/plugins/inputs/system/SYSTEM_README.md b/plugins/inputs/system/SYSTEM_README.md new file mode 100644 index 000000000..fc873c7e8 --- /dev/null +++ b/plugins/inputs/system/SYSTEM_README.md @@ -0,0 +1,35 @@ +# System Input Plugin + +The system plugin gathers general stats on system load, uptime, +and number of users logged in. It is basically equivalent +to the unix `uptime` command. + +### Configuration: + +```toml +# Read metrics about system load & uptime +[[inputs.system]] + # no configuration +``` + +### Measurements & Fields: + +- system + - load1 (float) + - load15 (float) + - load5 (float) + - n_users (integer) + - uptime (integer, seconds) + - uptime_format (string) + +### Tags: + +None + +### Example Output: + +``` +$ telegraf -config ~/ws/telegraf.conf -input-filter system -test +* Plugin: system, Collection 1 +> system load1=2.05,load15=2.38,load5=2.03,n_users=4i,uptime=239043i,uptime_format="2 days, 18:24" 1457546165399253452 +``` diff --git a/plugins/inputs/system/system.go b/plugins/inputs/system/system.go index 9922d5a92..42b0310a4 100644 --- a/plugins/inputs/system/system.go +++ b/plugins/inputs/system/system.go @@ -31,11 +31,17 @@ func (_ *SystemStats) Gather(acc telegraf.Accumulator) error { return err } + users, err := host.Users() + if err != nil { + return err + } + fields := map[string]interface{}{ "load1": loadavg.Load1, "load5": loadavg.Load5, "load15": loadavg.Load15, "uptime": hostinfo.Uptime, + "n_users": len(users), "uptime_format": format_uptime(hostinfo.Uptime), } acc.AddFields("system", fields, nil) From aa15e7916e3c705fbde0d42857d30c04363b1b32 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 9 Mar 2016 22:55:26 +0100 Subject: [PATCH 136/287] processes: Fix zombie process procfs panic fixes #822 --- plugins/inputs/system/processes.go | 4 ++-- plugins/inputs/system/processes_test.go | 9 +++++++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/system/processes.go b/plugins/inputs/system/processes.go index b7ee32066..aae0e6ba4 100644 --- a/plugins/inputs/system/processes.go +++ b/plugins/inputs/system/processes.go @@ -65,7 +65,7 @@ func (p *Processes) Gather(acc telegraf.Accumulator) error { func getEmptyFields() map[string]interface{} { fields := map[string]interface{}{ "blocked": int64(0), - "zombie": int64(0), + "zombies": int64(0), "stopped": int64(0), "running": int64(0), "sleeping": int64(0), @@ -105,7 +105,7 @@ func (p *Processes) gatherFromPS(fields map[string]interface{}) error { // Also known as uninterruptible sleep or disk sleep fields["blocked"] = fields["blocked"].(int64) + int64(1) case 'Z': - fields["zombie"] = fields["zombie"].(int64) + int64(1) + fields["zombies"] = fields["zombies"].(int64) + int64(1) case 'T': fields["stopped"] = fields["stopped"].(int64) + int64(1) case 'R': diff --git a/plugins/inputs/system/processes_test.go b/plugins/inputs/system/processes_test.go index 0e2b5e105..de9b6aa5b 100644 --- a/plugins/inputs/system/processes_test.go +++ b/plugins/inputs/system/processes_test.go @@ -40,10 +40,11 @@ func TestFromPS(t *testing.T) { require.NoError(t, err) fields := getEmptyFields() - fields["blocked"] = int64(1) + fields["blocked"] = int64(4) + fields["zombies"] = int64(1) fields["running"] = int64(4) fields["sleeping"] = int64(34) - fields["total"] = int64(39) + fields["total"] = int64(43) acc.AssertContainsTaggedFields(t, "processes", fields, map[string]string{}) } @@ -139,6 +140,10 @@ S S+ S+ Ss +L +U +Z +D S+ ` From 3f2a04b25b5262fdeb8ff3ae996ead7f43287662 Mon Sep 17 00:00:00 2001 From: Chris Goller Date: Wed, 9 Mar 2016 11:50:06 -0600 Subject: [PATCH 137/287] Fix build-for-docker Makefile target syntax. closes #819 --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index ef316bd03..c87f78b55 100644 --- a/Makefile +++ b/Makefile @@ -22,8 +22,8 @@ build-windows: ./cmd/telegraf/telegraf.go build-for-docker: - CGO_ENABLED=0 GOOS=linux go -o telegraf -ldflags \ - "-X main.Version=$(VERSION)" \ + CGO_ENABLED=0 GOOS=linux go build -installsuffix cgo -o telegraf -ldflags \ + "-s -X main.Version=$(VERSION)" \ ./cmd/telegraf/telegraf.go # Build with race detector From 0752879fc8e212840a53ccc1a240646b08358618 Mon Sep 17 00:00:00 2001 From: Thibault Cohen Date: Wed, 9 Mar 2016 00:55:36 -0500 Subject: [PATCH 138/287] SNMP fix concurrency issue closes #823 --- plugins/inputs/snmp/snmp.go | 73 +++++++++++++++++++++---------------- 1 file changed, 41 insertions(+), 32 deletions(-) diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 2af293d57..ba270cb1d 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -23,6 +23,13 @@ type Snmp struct { Table []Table Subtable []Subtable SnmptranslateFile string + + nameToOid map[string]string + initNode Node + subTableMap map[string]Subtable + + // TODO change as unexportable + //OidInstanceMapping map[string]map[string]string } type Host struct { @@ -110,16 +117,7 @@ type Node struct { subnodes map[string]Node } -var initNode = Node{ - id: "1", - name: "", - subnodes: make(map[string]Node), -} - -var SubTableMap = make(map[string]Subtable) - -var NameToOid = make(map[string]string) - +// TODO move this var to snmp struct var OidInstanceMapping = make(map[string]map[string]string) var sampleConfig = ` @@ -286,14 +284,24 @@ func findnodename(node Node, ids []string) (string, string) { } func (s *Snmp) Gather(acc telegraf.Accumulator) error { + // TODO put this in cache on first run // Create subtables mapping - if len(SubTableMap) == 0 { + if len(s.subTableMap) == 0 { + s.subTableMap = make(map[string]Subtable) for _, sb := range s.Subtable { - SubTableMap[sb.Name] = sb + s.subTableMap[sb.Name] = sb } } + // TODO put this in cache on first run // Create oid tree - if s.SnmptranslateFile != "" && len(initNode.subnodes) == 0 { + if s.SnmptranslateFile != "" && len(s.initNode.subnodes) == 0 { + s.nameToOid = make(map[string]string) + s.initNode = Node{ + id: "1", + name: "", + subnodes: make(map[string]Node), + } + data, err := ioutil.ReadFile(s.SnmptranslateFile) if err != nil { log.Printf("Reading SNMPtranslate file error: %s", err) @@ -305,8 +313,8 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { if oids[2] != "" { oid_name := oids[1] oid := oids[2] - fillnode(initNode, oid_name, strings.Split(string(oid), ".")) - NameToOid[oid_name] = oid + fillnode(s.initNode, oid_name, strings.Split(string(oid), ".")) + s.nameToOid[oid_name] = oid } } } @@ -330,7 +338,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { // Get Easy GET oids for _, oidstring := range host.GetOids { oid := Data{} - if val, ok := NameToOid[oidstring]; ok { + if val, ok := s.nameToOid[oidstring]; ok { // TODO should we add the 0 instance ? oid.Name = oidstring oid.Oid = val @@ -351,7 +359,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { // Get GET oids for _, oid := range s.Get { if oid.Name == oid_name { - if val, ok := NameToOid[oid.Oid]; ok { + if val, ok := s.nameToOid[oid.Oid]; ok { // TODO should we add the 0 instance ? if oid.Instance != "" { oid.rawOid = "." + val + "." + oid.Instance @@ -367,7 +375,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { // Get GETBULK oids for _, oid := range s.Bulk { if oid.Name == oid_name { - if val, ok := NameToOid[oid.Oid]; ok { + if val, ok := s.nameToOid[oid.Oid]; ok { oid.rawOid = "." + val } else { oid.rawOid = oid.Oid @@ -389,26 +397,27 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { } } // Launch Mapping + // TODO put this in cache on first run // TODO save mapping and computed oids // to do it only the first time - // only if len(OidInstanceMapping) == 0 + // only if len(s.OidInstanceMapping) == 0 if len(OidInstanceMapping) >= 0 { - if err := host.SNMPMap(acc); err != nil { + if err := host.SNMPMap(acc, s.nameToOid, s.subTableMap); err != nil { return err } } // Launch Get requests - if err := host.SNMPGet(acc); err != nil { + if err := host.SNMPGet(acc, s.initNode); err != nil { return err } - if err := host.SNMPBulk(acc); err != nil { + if err := host.SNMPBulk(acc, s.initNode); err != nil { return err } } return nil } -func (h *Host) SNMPMap(acc telegraf.Accumulator) error { +func (h *Host) SNMPMap(acc telegraf.Accumulator, nameToOid map[string]string, subTableMap map[string]Subtable) error { // Get snmp client snmpClient, err := h.GetSNMPClient() if err != nil { @@ -426,7 +435,7 @@ func (h *Host) SNMPMap(acc telegraf.Accumulator) error { // This is just a bulk request oid := Data{} oid.Oid = table.oid - if val, ok := NameToOid[oid.Oid]; ok { + if val, ok := nameToOid[oid.Oid]; ok { oid.rawOid = "." + val } else { oid.rawOid = oid.Oid @@ -441,7 +450,7 @@ func (h *Host) SNMPMap(acc telegraf.Accumulator) error { // ... we create a new Data (oid) object oid := Data{} // Looking for more information about this subtable - ssb, exists := SubTableMap[sb] + ssb, exists := subTableMap[sb] if exists { // We found a subtable section in config files oid.Oid = ssb.Oid @@ -528,7 +537,7 @@ func (h *Host) SNMPMap(acc telegraf.Accumulator) error { // Add table oid in bulk oid list oid := Data{} oid.Oid = table.oid - if val, ok := NameToOid[oid.Oid]; ok { + if val, ok := nameToOid[oid.Oid]; ok { oid.rawOid = "." + val } else { oid.rawOid = oid.Oid @@ -545,7 +554,7 @@ func (h *Host) SNMPMap(acc telegraf.Accumulator) error { // ... we create a new Data (oid) object oid := Data{} // Looking for more information about this subtable - ssb, exists := SubTableMap[sb] + ssb, exists := subTableMap[sb] if exists { // We found a subtable section in config files oid.Oid = ssb.Oid + key @@ -587,7 +596,7 @@ func (h *Host) SNMPMap(acc telegraf.Accumulator) error { return nil } -func (h *Host) SNMPGet(acc telegraf.Accumulator) error { +func (h *Host) SNMPGet(acc telegraf.Accumulator, initNode Node) error { // Get snmp client snmpClient, err := h.GetSNMPClient() if err != nil { @@ -620,7 +629,7 @@ func (h *Host) SNMPGet(acc telegraf.Accumulator) error { return err3 } // Handle response - _, err = h.HandleResponse(oidsList, result, acc) + _, err = h.HandleResponse(oidsList, result, acc, initNode) if err != nil { return err } @@ -628,7 +637,7 @@ func (h *Host) SNMPGet(acc telegraf.Accumulator) error { return nil } -func (h *Host) SNMPBulk(acc telegraf.Accumulator) error { +func (h *Host) SNMPBulk(acc telegraf.Accumulator, initNode Node) error { // Get snmp client snmpClient, err := h.GetSNMPClient() if err != nil { @@ -663,7 +672,7 @@ func (h *Host) SNMPBulk(acc telegraf.Accumulator) error { return err3 } // Handle response - last_oid, err := h.HandleResponse(oidsList, result, acc) + last_oid, err := h.HandleResponse(oidsList, result, acc, initNode) if err != nil { return err } @@ -715,7 +724,7 @@ func (h *Host) GetSNMPClient() (*gosnmp.GoSNMP, error) { return snmpClient, nil } -func (h *Host) HandleResponse(oids map[string]Data, result *gosnmp.SnmpPacket, acc telegraf.Accumulator) (string, error) { +func (h *Host) HandleResponse(oids map[string]Data, result *gosnmp.SnmpPacket, acc telegraf.Accumulator, initNode Node) (string, error) { var lastOid string for _, variable := range result.Variables { lastOid = variable.Name From ecbbb8426f0617efdbb70c174ca71e9b309a4e4e Mon Sep 17 00:00:00 2001 From: Thibault Cohen Date: Thu, 10 Mar 2016 14:41:03 -0500 Subject: [PATCH 139/287] Fix #828 closes #828 closes #829 --- CHANGELOG.md | 1 + plugins/inputs/net_response/README.md | 6 +++--- plugins/inputs/net_response/net_response.go | 2 +- plugins/inputs/net_response/net_response_test.go | 6 +++--- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 28ce825c5..8392d62b3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ - [#708](https://github.com/influxdata/telegraf/issues/708): packaging: build ARM package - [#713](https://github.com/influxdata/telegraf/issues/713): packaging: insecure permissions error on log directory - [#816](https://github.com/influxdata/telegraf/issues/816): Fix phpfpm panic if fcgi endpoint unreachable. +- [#828](https://github.com/influxdata/telegraf/issues/828): fix net_response plugin overwriting host tag. ## v0.10.4.1 diff --git a/plugins/inputs/net_response/README.md b/plugins/inputs/net_response/README.md index 69e72a379..d6a0e1278 100644 --- a/plugins/inputs/net_response/README.md +++ b/plugins/inputs/net_response/README.md @@ -52,7 +52,7 @@ It can also check response text. ### Tags: - All measurements have the following tags: - - host + - server - port - protocol @@ -60,7 +60,7 @@ It can also check response text. ``` $ ./telegraf -config telegraf.conf -input-filter net_response -test -net_response,host=127.0.0.1,port=22,protocol=tcp response_time=0.18070360500000002,string_found=true 1454785464182527094 -net_response,host=127.0.0.1,port=2222,protocol=tcp response_time=1.090124776,string_found=false 1454784433658942325 +net_response,server=192.168.2.2,port=22,protocol=tcp response_time=0.18070360500000002,string_found=true 1454785464182527094 +net_response,server=192.168.2.2,port=2222,protocol=tcp response_time=1.090124776,string_found=false 1454784433658942325 ``` diff --git a/plugins/inputs/net_response/net_response.go b/plugins/inputs/net_response/net_response.go index 66bf2ae7b..7b5cfa785 100644 --- a/plugins/inputs/net_response/net_response.go +++ b/plugins/inputs/net_response/net_response.go @@ -169,7 +169,7 @@ func (c *NetResponse) Gather(acc telegraf.Accumulator) error { return errors.New("Bad port") } // Prepare data - tags := map[string]string{"host": host, "port": port} + tags := map[string]string{"server": host, "port": port} var fields map[string]interface{} // Gather data if c.Protocol == "tcp" { diff --git a/plugins/inputs/net_response/net_response_test.go b/plugins/inputs/net_response/net_response_test.go index 538d059c0..a6dfbcc94 100644 --- a/plugins/inputs/net_response/net_response_test.go +++ b/plugins/inputs/net_response/net_response_test.go @@ -69,7 +69,7 @@ func TestTCPOK1(t *testing.T) { "string_found": true, "response_time": 1.0, }, - map[string]string{"host": "127.0.0.1", + map[string]string{"server": "127.0.0.1", "port": "2004", "protocol": "tcp", }, @@ -109,7 +109,7 @@ func TestTCPOK2(t *testing.T) { "string_found": false, "response_time": 1.0, }, - map[string]string{"host": "127.0.0.1", + map[string]string{"server": "127.0.0.1", "port": "2004", "protocol": "tcp", }, @@ -164,7 +164,7 @@ func TestUDPOK1(t *testing.T) { "string_found": true, "response_time": 1.0, }, - map[string]string{"host": "127.0.0.1", + map[string]string{"server": "127.0.0.1", "port": "2004", "protocol": "udp", }, From db8c24cc7bf19f0758c72ac9915dde900e9f27b5 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 10 Mar 2016 13:40:03 +0100 Subject: [PATCH 140/287] Add a "kernel" plugin for /proc/stat statistics see #235 --- CHANGELOG.md | 3 +- README.md | 1 + etc/telegraf.conf | 4 + plugins/inputs/system/KERNEL_README.md | 64 ++++++++++ plugins/inputs/system/kernel.go | 110 +++++++++++++++++ plugins/inputs/system/kernel_test.go | 164 +++++++++++++++++++++++++ 6 files changed, 345 insertions(+), 1 deletion(-) create mode 100644 plugins/inputs/system/KERNEL_README.md create mode 100644 plugins/inputs/system/kernel.go create mode 100644 plugins/inputs/system/kernel_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 8392d62b3..4f7d245b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## v0.10.5 [unreleased] +## v0.11.0 [unreleased] ### Release Notes @@ -16,6 +16,7 @@ - [#776](https://github.com/influxdata/telegraf/pull/776): Add Zookeeper chroot option to kafka_consumer. Thanks @prune998! - [#811](https://github.com/influxdata/telegraf/pull/811): Add processes plugin for classifying total procs on system. Thanks @titilambert! - [#235](https://github.com/influxdata/telegraf/issues/235): Add number of users to the `system` input plugin. +- [#826](https://github.com/influxdata/telegraf/pull/826): "kernel" linux plugin for /proc/stat metrics (context switches, interrupts, etc.) ### Bugfixes - [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":" diff --git a/README.md b/README.md index fb9363100..8f9b0bc33 100644 --- a/README.md +++ b/README.md @@ -215,6 +215,7 @@ Currently implemented sources: * diskio * swap * processes + * kernel (/proc/stat) Telegraf can also collect metrics via the following service plugins: diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 3deb7f895..0e740f5c8 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -119,6 +119,10 @@ # Uncomment the following line if you do not need disk serial numbers. # skip_serial_number = true +# Get kernel statistics from /proc/stat +[[inputs.kernel]] + # no configuration + # Read metrics about memory usage [[inputs.mem]] # no configuration diff --git a/plugins/inputs/system/KERNEL_README.md b/plugins/inputs/system/KERNEL_README.md new file mode 100644 index 000000000..3285e59ef --- /dev/null +++ b/plugins/inputs/system/KERNEL_README.md @@ -0,0 +1,64 @@ +# Kernel Input Plugin + +This plugin is only available on Linux. + +The kernel plugin gathers info about the kernel that doesn't fit into other +plugins. In general, it is the statistics available in `/proc/stat` that are +not covered by other plugins. + +The metrics are documented in `man proc` under the `/proc/stat` section. + +``` +/proc/stat +kernel/system statistics. Varies with architecture. Common entries include: + +page 5741 1808 +The number of pages the system paged in and the number that were paged out (from disk). + +swap 1 0 +The number of swap pages that have been brought in and out. + +intr 1462898 +This line shows counts of interrupts serviced since boot time, for each of +the possible system interrupts. The first column is the total of all +interrupts serviced; each subsequent column is the total for a particular interrupt. + +ctxt 115315 +The number of context switches that the system underwent. + +btime 769041601 +boot time, in seconds since the Epoch, 1970-01-01 00:00:00 +0000 (UTC). + +processes 86031 +Number of forks since boot. +``` + +### Configuration: + +```toml +# Get kernel statistics from /proc/stat +[[inputs.kernel]] + # no configuration +``` + +### Measurements & Fields: + +- kernel + - boot_time (integer, seconds since epoch, `btime`) + - context_switches (integer, `ctxt`) + - disk_pages_in (integer, `page (0)`) + - disk_pages_out (integer, `page (1)`) + - interrupts (integer, `intr`) + - processes_forked (integer, `processes`) + +### Tags: + +None + +### Example Output: + +``` +$ telegraf -config ~/ws/telegraf.conf -input-filter kernel -test +* Plugin: kernel, Collection 1 +> kernel boot_time=1457505775i,context_switches=2626618i,disk_pages_in=5741i,disk_pages_out=1808i,interrupts=1472736i,processes_forked=10673i 1457613402960879816 +``` diff --git a/plugins/inputs/system/kernel.go b/plugins/inputs/system/kernel.go new file mode 100644 index 000000000..900400146 --- /dev/null +++ b/plugins/inputs/system/kernel.go @@ -0,0 +1,110 @@ +// +build linux + +package system + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "strconv" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// /proc/stat file line prefixes to gather stats on: +var ( + interrupts = []byte("intr") + context_switches = []byte("ctxt") + processes_forked = []byte("processes") + disk_pages = []byte("page") + boot_time = []byte("btime") +) + +type Kernel struct { + statFile string +} + +func (k *Kernel) Description() string { + return "Get kernel statistics from /proc/stat" +} + +func (k *Kernel) SampleConfig() string { return "" } + +func (k *Kernel) Gather(acc telegraf.Accumulator) error { + data, err := k.getProcStat() + if err != nil { + return err + } + + fields := make(map[string]interface{}) + + dataFields := bytes.Fields(data) + for i, field := range dataFields { + switch { + case bytes.Equal(field, interrupts): + m, err := strconv.Atoi(string(dataFields[i+1])) + if err != nil { + return err + } + fields["interrupts"] = int64(m) + case bytes.Equal(field, context_switches): + m, err := strconv.Atoi(string(dataFields[i+1])) + if err != nil { + return err + } + fields["context_switches"] = int64(m) + case bytes.Equal(field, processes_forked): + m, err := strconv.Atoi(string(dataFields[i+1])) + if err != nil { + return err + } + fields["processes_forked"] = int64(m) + case bytes.Equal(field, boot_time): + m, err := strconv.Atoi(string(dataFields[i+1])) + if err != nil { + return err + } + fields["boot_time"] = int64(m) + case bytes.Equal(field, disk_pages): + in, err := strconv.Atoi(string(dataFields[i+1])) + if err != nil { + return err + } + out, err := strconv.Atoi(string(dataFields[i+2])) + if err != nil { + return err + } + fields["disk_pages_in"] = int64(in) + fields["disk_pages_out"] = int64(out) + } + } + + acc.AddFields("kernel", fields, map[string]string{}) + + return nil +} + +func (k *Kernel) getProcStat() ([]byte, error) { + if _, err := os.Stat(k.statFile); os.IsNotExist(err) { + return nil, fmt.Errorf("kernel: %s does not exist!", k.statFile) + } else if err != nil { + return nil, err + } + + data, err := ioutil.ReadFile(k.statFile) + if err != nil { + return nil, err + } + + return data, nil +} + +func init() { + inputs.Add("kernel", func() telegraf.Input { + return &Kernel{ + statFile: "/proc/stat", + } + }) +} diff --git a/plugins/inputs/system/kernel_test.go b/plugins/inputs/system/kernel_test.go new file mode 100644 index 000000000..398cba4cc --- /dev/null +++ b/plugins/inputs/system/kernel_test.go @@ -0,0 +1,164 @@ +// +build linux + +package system + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/assert" +) + +func TestFullProcFile(t *testing.T) { + tmpfile := makeFakeStatFile([]byte(statFile_Full)) + defer os.Remove(tmpfile) + + k := Kernel{ + statFile: tmpfile, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + assert.NoError(t, err) + + fields := map[string]interface{}{ + "boot_time": int64(1457505775), + "context_switches": int64(2626618), + "disk_pages_in": int64(5741), + "disk_pages_out": int64(1808), + "interrupts": int64(1472736), + "processes_forked": int64(10673), + } + acc.AssertContainsFields(t, "kernel", fields) +} + +func TestPartialProcFile(t *testing.T) { + tmpfile := makeFakeStatFile([]byte(statFile_Partial)) + defer os.Remove(tmpfile) + + k := Kernel{ + statFile: tmpfile, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + assert.NoError(t, err) + + fields := map[string]interface{}{ + "boot_time": int64(1457505775), + "context_switches": int64(2626618), + "disk_pages_in": int64(5741), + "disk_pages_out": int64(1808), + "interrupts": int64(1472736), + } + acc.AssertContainsFields(t, "kernel", fields) +} + +func TestInvalidProcFile1(t *testing.T) { + tmpfile := makeFakeStatFile([]byte(statFile_Invalid)) + defer os.Remove(tmpfile) + + k := Kernel{ + statFile: tmpfile, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + assert.Error(t, err) +} + +func TestInvalidProcFile2(t *testing.T) { + tmpfile := makeFakeStatFile([]byte(statFile_Invalid2)) + defer os.Remove(tmpfile) + + k := Kernel{ + statFile: tmpfile, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + assert.Error(t, err) +} + +func TestNoProcFile(t *testing.T) { + tmpfile := makeFakeStatFile([]byte(statFile_Invalid2)) + os.Remove(tmpfile) + + k := Kernel{ + statFile: tmpfile, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + assert.Error(t, err) + assert.Contains(t, err.Error(), "does not exist") +} + +const statFile_Full = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 +cpu0 6796 252 5655 10444977 175 0 101 0 0 0 +intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ctxt 2626618 +btime 1457505775 +processes 10673 +procs_running 2 +procs_blocked 0 +softirq 1031662 0 649485 20946 111071 11620 0 1 0 994 237545 +page 5741 1808 +swap 1 0 +` + +const statFile_Partial = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 +cpu0 6796 252 5655 10444977 175 0 101 0 0 0 +intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ctxt 2626618 +btime 1457505775 +procs_running 2 +procs_blocked 0 +softirq 1031662 0 649485 20946 111071 11620 0 1 0 994 237545 +page 5741 1808 +` + +// missing btime measurement +const statFile_Invalid = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 +cpu0 6796 252 5655 10444977 175 0 101 0 0 0 +intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ctxt 2626618 +btime +processes 10673 +procs_running 2 +procs_blocked 0 +softirq 1031662 0 649485 20946 111071 11620 0 1 0 994 237545 +page 5741 1808 +swap 1 0 +` + +// missing second page measurement +const statFile_Invalid2 = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 +cpu0 6796 252 5655 10444977 175 0 101 0 0 0 +intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ctxt 2626618 +processes 10673 +procs_running 2 +page 5741 +procs_blocked 0 +softirq 1031662 0 649485 20946 111071 11620 0 1 0 994 237545 +` + +func makeFakeStatFile(content []byte) string { + tmpfile, err := ioutil.TempFile("", "kerneltest") + if err != nil { + panic(err) + } + + if _, err := tmpfile.Write(content); err != nil { + panic(err) + } + if err := tmpfile.Close(); err != nil { + panic(err) + } + + return tmpfile.Name() +} From 8c6c144f28199ebe9f6aedb068c82d18fd795577 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 14 Mar 2016 10:28:01 +0000 Subject: [PATCH 141/287] influxdb output: If all write fails, trigger a reconnect closes #836 --- plugins/outputs/influxdb/influxdb.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 5eef553a2..fca6b1db1 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -191,6 +191,12 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { break } } + + // If all of the writes failed, create a new connection array so that + // i.Connect() will be called on the next gather. + if err != nil { + i.conns = make([]client.Client) + } return err } From d3925890b14343d794e8fda88a0369119f901d5b Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 14 Mar 2016 10:29:43 +0000 Subject: [PATCH 142/287] github wh: return from eventHandler when err != nil closes #837 --- plugins/inputs/github_webhooks/github_webhooks.go | 3 +++ plugins/outputs/influxdb/influxdb.go | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/github_webhooks/github_webhooks.go b/plugins/inputs/github_webhooks/github_webhooks.go index bc3f184be..726eef037 100644 --- a/plugins/inputs/github_webhooks/github_webhooks.go +++ b/plugins/inputs/github_webhooks/github_webhooks.go @@ -73,14 +73,17 @@ func (gh *GithubWebhooks) Stop() { // Handles the / route func (gh *GithubWebhooks) eventHandler(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() eventType := r.Header["X-Github-Event"][0] data, err := ioutil.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) + return } e, err := NewEvent(data, eventType) if err != nil { w.WriteHeader(http.StatusBadRequest) + return } gh.Lock() gh.events = append(gh.events, e) diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index fca6b1db1..d72a07754 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -195,7 +195,7 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { // If all of the writes failed, create a new connection array so that // i.Connect() will be called on the next gather. if err != nil { - i.conns = make([]client.Client) + i.conns = make([]client.Client, 0) } return err } From a4d60d9750a9618bb9b4cb112e3e99219b7ba725 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 14 Mar 2016 10:49:28 +0000 Subject: [PATCH 143/287] Update Godeps_windows closes #839 --- Godeps_windows | 63 ++++++++++++++++++++++++-------------------------- 1 file changed, 30 insertions(+), 33 deletions(-) diff --git a/Godeps_windows b/Godeps_windows index dd46184ec..c4a2561d1 100644 --- a/Godeps_windows +++ b/Godeps_windows @@ -1,56 +1,53 @@ git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git 617c801af238c3af2d9e72c5d4a0f02edad03ce5 -github.com/Shopify/sarama d37c73f2b2bce85f7fa16b6a550d26c5372892ef -github.com/Sirupsen/logrus f7f79f729e0fbe2fcc061db48a9ba0263f588252 +github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9 +github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5 -github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339 -github.com/aws/aws-sdk-go 87b1e60a50b09e4812dee560b33a238f67305804 -github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d +github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687 +github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857 +github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4 github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99 -github.com/dancannon/gorethink 6f088135ff288deb9d5546f4c71919207f891a70 +github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3 github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367 -github.com/fsouza/go-dockerclient 7b651349f9479f5114913eefbfd3c4eeddd79ab4 -github.com/go-ini/ini afbd495e5aaea13597b5e14fe514ddeaa4d76fc3 +github.com/fsouza/go-dockerclient a49c8269a6899cae30da1f8a4b82e0ce945f9967 github.com/go-ole/go-ole 50055884d646dd9434f16bbb5c9801749b9bafe4 -github.com/go-sql-driver/mysql 7c7f556282622f94213bc028b4d0a7b6151ba239 -github.com/golang/protobuf 6aaa8d47701fa6cf07e914ec01fde3d4a1fe79c3 -github.com/golang/snappy 723cc1e459b8eea2dea4583200fd60757d40097a +github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee +github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032 +github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7 github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2 -github.com/gorilla/context 1c83b3eabd45b6d76072b66b746c20815fb2872d -github.com/gorilla/mux 26a6070f849969ba72b72256e9f14cf519751690 +github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a +github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478 -github.com/influxdata/config bae7cb98197d842374d3b8403905924094930f24 -github.com/influxdata/influxdb ef571fc104dc24b77cd3710c156cd95e5cfd7aa5 -github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264 -github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38 -github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f +github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da +github.com/influxdata/influxdb e3fef5593c21644f2b43af55d6e17e70910b0e48 +github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0 +github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720 +github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36 github.com/lxn/win 9a7734ea4db26bc593d52f6a8a957afdad39c5c1 github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453 -github.com/miekg/dns e0d84d97e59bcb6561eae269c4e94d25b66822cb +github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504 github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b -github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9 -github.com/nats-io/nats 6a83f1a633cfbfd90aa648ac99fb38c06a8b40df -github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f -github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2 -github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f +github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3 +github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa +github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980 +github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831 github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 -github.com/prometheus/common 14ca1097bbe21584194c15e391a9dab95ad42a59 +github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37 github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8 github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f -github.com/shirou/gopsutil e77438504d45b9985c99a75730fe65220ceea00e +github.com/shirou/gopsutil 1de1357e7737a536c7f4ff6be7bd27977db4d2cb github.com/shirou/w32 ada3ba68f000aa1b58580e45c9d308fe0b7fc5c5 github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744 -github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94 -github.com/stretchr/testify f390dcf405f7b83c997eac1b06768bb9f44dec18 +github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3 github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8 github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363 -golang.org/x/net 04b9de9b512f58addf28c9853d50ebef61c3953e -golang.org/x/text 6d3c22c4525a4da167968fa2479be5524d2e8bd0 -gopkg.in/dancannon/gorethink.v1 6f088135ff288deb9d5546f4c71919207f891a70 +golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172 +golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34 +gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715 -gopkg.in/mgo.v2 03c9f3ee4c14c8e51ee521a6a7d0425658dd6f64 -gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4 +gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886 +gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4 From 2fbcb5c6d851b8140b88580cd4259eb2cda9a98a Mon Sep 17 00:00:00 2001 From: Thomas Menard Date: Mon, 14 Mar 2016 10:32:07 +0100 Subject: [PATCH 144/287] Fix postgresql password exposure in metrics Fix the password exposure in the metrics or tags. closes #821 closes #845 --- CHANGELOG.md | 1 + plugins/inputs/postgresql/postgresql.go | 37 +++++++++++++++++++++---- 2 files changed, 32 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f7d245b2..3545c35c6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,7 @@ - [#713](https://github.com/influxdata/telegraf/issues/713): packaging: insecure permissions error on log directory - [#816](https://github.com/influxdata/telegraf/issues/816): Fix phpfpm panic if fcgi endpoint unreachable. - [#828](https://github.com/influxdata/telegraf/issues/828): fix net_response plugin overwriting host tag. +- [#821](https://github.com/influxdata/telegraf/issues/821): Remove postgres password from server tag. Thanks @menardorama! ## v0.10.4.1 diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index fe2a56576..d8d0d1978 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -4,20 +4,22 @@ import ( "bytes" "database/sql" "fmt" + "regexp" "sort" "strings" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - _ "github.com/lib/pq" + "github.com/lib/pq" ) type Postgresql struct { - Address string - Databases []string - OrderedColumns []string - AllColumns []string + Address string + Databases []string + OrderedColumns []string + AllColumns []string + sanitizedAddress string } var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true} @@ -133,6 +135,23 @@ type scanner interface { Scan(dest ...interface{}) error } +var passwordKVMatcher, _ = regexp.Compile("password=\\S+ ?") + +func (p *Postgresql) SanitizedAddress() (_ string, err error) { + var canonicalizedAddress string + if strings.HasPrefix(p.Address, "postgres://") || strings.HasPrefix(p.Address, "postgresql://") { + canonicalizedAddress, err = pq.ParseURL(p.Address) + if err != nil { + return p.sanitizedAddress, err + } + } else { + canonicalizedAddress = p.Address + } + p.sanitizedAddress = passwordKVMatcher.ReplaceAllString(canonicalizedAddress, "") + + return p.sanitizedAddress, err +} + func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator) error { var columnVars []interface{} var dbname bytes.Buffer @@ -165,7 +184,13 @@ func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator) error { dbname.WriteString("postgres") } - tags := map[string]string{"server": p.Address, "db": dbname.String()} + var tagAddress string + tagAddress, err = p.SanitizedAddress() + if err != nil { + return err + } + + tags := map[string]string{"server": tagAddress, "db": dbname.String()} fields := make(map[string]interface{}) for col, val := range columnMap { From 4ea0c707c12a9c25f04c9e53fafffb087c3dcd16 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 14 Mar 2016 12:56:33 +0000 Subject: [PATCH 145/287] Input plugin for running ntp queries see #235 --- CHANGELOG.md | 1 + plugins/inputs/EXAMPLE_README.md | 2 - plugins/inputs/all/all.go | 1 + plugins/inputs/ntpq/README.md | 60 ++++ plugins/inputs/ntpq/ntpq.go | 202 +++++++++++++ plugins/inputs/ntpq/ntpq_test.go | 422 ++++++++++++++++++++++++++++ plugins/inputs/ntpq/ntpq_windows.go | 3 + 7 files changed, 689 insertions(+), 2 deletions(-) create mode 100644 plugins/inputs/ntpq/README.md create mode 100644 plugins/inputs/ntpq/ntpq.go create mode 100644 plugins/inputs/ntpq/ntpq_test.go create mode 100644 plugins/inputs/ntpq/ntpq_windows.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 3545c35c6..cfba45536 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ - [#811](https://github.com/influxdata/telegraf/pull/811): Add processes plugin for classifying total procs on system. Thanks @titilambert! - [#235](https://github.com/influxdata/telegraf/issues/235): Add number of users to the `system` input plugin. - [#826](https://github.com/influxdata/telegraf/pull/826): "kernel" linux plugin for /proc/stat metrics (context switches, interrupts, etc.) +- [#847](https://github.com/influxdata/telegraf/pull/847): `ntpq`: Input plugin for running ntp query executable and gathering metrics. ### Bugfixes - [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":" diff --git a/plugins/inputs/EXAMPLE_README.md b/plugins/inputs/EXAMPLE_README.md index 9207cd2ab..6bebf1e88 100644 --- a/plugins/inputs/EXAMPLE_README.md +++ b/plugins/inputs/EXAMPLE_README.md @@ -30,8 +30,6 @@ The example plugin gathers metrics about example things ### Example Output: -Give an example `-test` output here - ``` $ ./telegraf -config telegraf.conf -input-filter example -test measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455 diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 2808ce2b5..a3300df66 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -29,6 +29,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/net_response" _ "github.com/influxdata/telegraf/plugins/inputs/nginx" _ "github.com/influxdata/telegraf/plugins/inputs/nsq" + _ "github.com/influxdata/telegraf/plugins/inputs/ntpq" _ "github.com/influxdata/telegraf/plugins/inputs/passenger" _ "github.com/influxdata/telegraf/plugins/inputs/phpfpm" _ "github.com/influxdata/telegraf/plugins/inputs/ping" diff --git a/plugins/inputs/ntpq/README.md b/plugins/inputs/ntpq/README.md new file mode 100644 index 000000000..80bf80f39 --- /dev/null +++ b/plugins/inputs/ntpq/README.md @@ -0,0 +1,60 @@ +# ntpq Input Plugin + +Get standard NTP query metrics, requires ntpq executable. + +Below is the documentation of the various headers returned from the NTP query +command when running `ntpq -p`. + +- remote – The remote peer or server being synced to. “LOCAL” is this local host +(included in case there are no remote peers or servers available); +- refid – Where or what the remote peer or server is itself synchronised to; +- st (stratum) – The remote peer or server Stratum +- t (type) – Type (u: unicast or manycast client, b: broadcast or multicast client, +l: local reference clock, s: symmetric peer, A: manycast server, +B: broadcast server, M: multicast server, see “Automatic Server Discovery“); +- when – When last polled (seconds ago, “h” hours ago, or “d” days ago); +- poll – Polling frequency: rfc5905 suggests this ranges in NTPv4 from 4 (16s) +to 17 (36h) (log2 seconds), however observation suggests the actual displayed +value is seconds for a much smaller range of 64 (26) to 1024 (210) seconds; +- reach – An 8-bit left-shift shift register value recording polls (bit set = +successful, bit reset = fail) displayed in octal; +- delay – Round trip communication delay to the remote peer or server (milliseconds); +- offset – Mean offset (phase) in the times reported between this local host and +the remote peer or server (RMS, milliseconds); +- jitter – Mean deviation (jitter) in the time reported for that remote peer or +server (RMS of difference of multiple time samples, milliseconds); + +### Configuration: + +```toml +# Get standard NTP query metrics, requires ntpq executable +[[inputs.ntpq]] + ## If false, set the -n ntpq flag. Can reduce metric gather times. + dns_lookup = true +``` + +### Measurements & Fields: + +- ntpq + - delay (float, milliseconds) + - jitter (float, milliseconds) + - offset (float, milliseconds) + - poll (int, seconds) + - reach (int) + - when (int, seconds) + +### Tags: + +- All measurements have the following tags: + - refid + - remote + - type + - stratum + +### Example Output: + +``` +$ telegraf -config ~/ws/telegraf.conf -input-filter ntpq -test +* Plugin: ntpq, Collection 1 +> ntpq,refid=.GPSs.,remote=*time.apple.com,stratum=1,type=u delay=91.797,jitter=3.735,offset=12.841,poll=64i,reach=377i,when=35i 1457960478909556134 +``` diff --git a/plugins/inputs/ntpq/ntpq.go b/plugins/inputs/ntpq/ntpq.go new file mode 100644 index 000000000..5e8ff6536 --- /dev/null +++ b/plugins/inputs/ntpq/ntpq.go @@ -0,0 +1,202 @@ +// +build !windows + +package ntpq + +import ( + "bufio" + "bytes" + "log" + "os/exec" + "strconv" + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Mapping of ntpq header names to tag keys +var tagHeaders map[string]string = map[string]string{ + "remote": "remote", + "refid": "refid", + "st": "stratum", + "t": "type", +} + +// Mapping of the ntpq tag key to the index in the command output +var tagI map[string]int = map[string]int{ + "remote": -1, + "refid": -1, + "stratum": -1, + "type": -1, +} + +// Mapping of float metrics to their index in the command output +var floatI map[string]int = map[string]int{ + "delay": -1, + "offset": -1, + "jitter": -1, +} + +// Mapping of int metrics to their index in the command output +var intI map[string]int = map[string]int{ + "when": -1, + "poll": -1, + "reach": -1, +} + +type NTPQ struct { + runQ func() ([]byte, error) + + DNSLookup bool `toml:"dns_lookup"` +} + +func (n *NTPQ) Description() string { + return "Get standard NTP query metrics, requires ntpq executable." +} + +func (n *NTPQ) SampleConfig() string { + return ` + ## If false, set the -n ntpq flag. Can reduce metric gather time. + dns_lookup = true +` +} + +func (n *NTPQ) Gather(acc telegraf.Accumulator) error { + out, err := n.runQ() + if err != nil { + return err + } + + lineCounter := 0 + scanner := bufio.NewScanner(bytes.NewReader(out)) + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) < 2 { + continue + } + + // If lineCounter == 0, then this is the header line + if lineCounter == 0 { + for i, field := range fields { + // Check if field is a tag: + if tagKey, ok := tagHeaders[field]; ok { + tagI[tagKey] = i + continue + } + + // check if field is a float metric: + if _, ok := floatI[field]; ok { + floatI[field] = i + continue + } + + // check if field is an int metric: + if _, ok := intI[field]; ok { + intI[field] = i + continue + } + } + } else { + tags := make(map[string]string) + mFields := make(map[string]interface{}) + + // Get tags from output + for key, index := range tagI { + if index == -1 { + continue + } + tags[key] = fields[index] + } + + // Get integer metrics from output + for key, index := range intI { + if index == -1 { + continue + } + + if key == "when" { + when := fields[index] + switch { + case strings.HasSuffix(when, "h"): + m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "h")) + if err != nil { + log.Printf("ERROR ntpq: parsing int: %s", fields[index]) + continue + } + // seconds in an hour + mFields[key] = int64(m) * 360 + continue + case strings.HasSuffix(when, "d"): + m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "d")) + if err != nil { + log.Printf("ERROR ntpq: parsing int: %s", fields[index]) + continue + } + // seconds in a day + mFields[key] = int64(m) * 86400 + continue + case strings.HasSuffix(when, "m"): + m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "m")) + if err != nil { + log.Printf("ERROR ntpq: parsing int: %s", fields[index]) + continue + } + // seconds in a day + mFields[key] = int64(m) * 60 + continue + } + } + + m, err := strconv.Atoi(fields[index]) + if err != nil { + log.Printf("ERROR ntpq: parsing int: %s", fields[index]) + continue + } + mFields[key] = int64(m) + } + + // get float metrics from output + for key, index := range floatI { + if index == -1 { + continue + } + + m, err := strconv.ParseFloat(fields[index], 64) + if err != nil { + log.Printf("ERROR ntpq: parsing float: %s", fields[index]) + continue + } + mFields[key] = m + } + + acc.AddFields("ntpq", mFields, tags) + } + + lineCounter++ + } + return nil +} + +func (n *NTPQ) runq() ([]byte, error) { + bin, err := exec.LookPath("ntpq") + if err != nil { + return nil, err + } + + var cmd *exec.Cmd + if n.DNSLookup { + cmd = exec.Command(bin, "-p") + } else { + cmd = exec.Command(bin, "-p", "-n") + } + + return cmd.Output() +} + +func init() { + inputs.Add("ntpq", func() telegraf.Input { + n := &NTPQ{} + n.runQ = n.runq + return n + }) +} diff --git a/plugins/inputs/ntpq/ntpq_test.go b/plugins/inputs/ntpq/ntpq_test.go new file mode 100644 index 000000000..228eddc62 --- /dev/null +++ b/plugins/inputs/ntpq/ntpq_test.go @@ -0,0 +1,422 @@ +// +build !windows + +package ntpq + +import ( + "fmt" + "testing" + + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/assert" +) + +func TestSingleNTPQ(t *testing.T) { + tt := tester{ + ret: []byte(singleNTPQ), + err: nil, + } + n := &NTPQ{ + runQ: tt.runqTest, + } + + acc := testutil.Accumulator{} + assert.NoError(t, n.Gather(&acc)) + + fields := map[string]interface{}{ + "when": int64(101), + "poll": int64(256), + "reach": int64(37), + "delay": float64(51.016), + "offset": float64(233.010), + "jitter": float64(17.462), + } + tags := map[string]string{ + "remote": "*uschi5-ntp-002.", + "refid": "10.177.80.46", + "stratum": "2", + "type": "u", + } + acc.AssertContainsTaggedFields(t, "ntpq", fields, tags) +} + +func TestBadIntNTPQ(t *testing.T) { + tt := tester{ + ret: []byte(badIntParseNTPQ), + err: nil, + } + n := &NTPQ{ + runQ: tt.runqTest, + } + + acc := testutil.Accumulator{} + assert.NoError(t, n.Gather(&acc)) + + fields := map[string]interface{}{ + "when": int64(101), + "reach": int64(37), + "delay": float64(51.016), + "offset": float64(233.010), + "jitter": float64(17.462), + } + tags := map[string]string{ + "remote": "*uschi5-ntp-002.", + "refid": "10.177.80.46", + "stratum": "2", + "type": "u", + } + acc.AssertContainsTaggedFields(t, "ntpq", fields, tags) +} + +func TestBadFloatNTPQ(t *testing.T) { + tt := tester{ + ret: []byte(badFloatParseNTPQ), + err: nil, + } + n := &NTPQ{ + runQ: tt.runqTest, + } + + acc := testutil.Accumulator{} + assert.NoError(t, n.Gather(&acc)) + + fields := map[string]interface{}{ + "when": int64(2), + "poll": int64(256), + "reach": int64(37), + "delay": float64(51.016), + "jitter": float64(17.462), + } + tags := map[string]string{ + "remote": "*uschi5-ntp-002.", + "refid": "10.177.80.46", + "stratum": "2", + "type": "u", + } + acc.AssertContainsTaggedFields(t, "ntpq", fields, tags) +} + +func TestDaysNTPQ(t *testing.T) { + tt := tester{ + ret: []byte(whenDaysNTPQ), + err: nil, + } + n := &NTPQ{ + runQ: tt.runqTest, + } + + acc := testutil.Accumulator{} + assert.NoError(t, n.Gather(&acc)) + + fields := map[string]interface{}{ + "when": int64(172800), + "poll": int64(256), + "reach": int64(37), + "delay": float64(51.016), + "offset": float64(233.010), + "jitter": float64(17.462), + } + tags := map[string]string{ + "remote": "*uschi5-ntp-002.", + "refid": "10.177.80.46", + "stratum": "2", + "type": "u", + } + acc.AssertContainsTaggedFields(t, "ntpq", fields, tags) +} + +func TestHoursNTPQ(t *testing.T) { + tt := tester{ + ret: []byte(whenHoursNTPQ), + err: nil, + } + n := &NTPQ{ + runQ: tt.runqTest, + } + + acc := testutil.Accumulator{} + assert.NoError(t, n.Gather(&acc)) + + fields := map[string]interface{}{ + "when": int64(720), + "poll": int64(256), + "reach": int64(37), + "delay": float64(51.016), + "offset": float64(233.010), + "jitter": float64(17.462), + } + tags := map[string]string{ + "remote": "*uschi5-ntp-002.", + "refid": "10.177.80.46", + "stratum": "2", + "type": "u", + } + acc.AssertContainsTaggedFields(t, "ntpq", fields, tags) +} + +func TestMinutesNTPQ(t *testing.T) { + tt := tester{ + ret: []byte(whenMinutesNTPQ), + err: nil, + } + n := &NTPQ{ + runQ: tt.runqTest, + } + + acc := testutil.Accumulator{} + assert.NoError(t, n.Gather(&acc)) + + fields := map[string]interface{}{ + "when": int64(120), + "poll": int64(256), + "reach": int64(37), + "delay": float64(51.016), + "offset": float64(233.010), + "jitter": float64(17.462), + } + tags := map[string]string{ + "remote": "*uschi5-ntp-002.", + "refid": "10.177.80.46", + "stratum": "2", + "type": "u", + } + acc.AssertContainsTaggedFields(t, "ntpq", fields, tags) +} + +func TestBadWhenNTPQ(t *testing.T) { + tt := tester{ + ret: []byte(whenBadNTPQ), + err: nil, + } + n := &NTPQ{ + runQ: tt.runqTest, + } + + acc := testutil.Accumulator{} + assert.NoError(t, n.Gather(&acc)) + + fields := map[string]interface{}{ + "poll": int64(256), + "reach": int64(37), + "delay": float64(51.016), + "offset": float64(233.010), + "jitter": float64(17.462), + } + tags := map[string]string{ + "remote": "*uschi5-ntp-002.", + "refid": "10.177.80.46", + "stratum": "2", + "type": "u", + } + acc.AssertContainsTaggedFields(t, "ntpq", fields, tags) +} + +func TestMultiNTPQ(t *testing.T) { + tt := tester{ + ret: []byte(multiNTPQ), + err: nil, + } + n := &NTPQ{ + runQ: tt.runqTest, + } + + acc := testutil.Accumulator{} + assert.NoError(t, n.Gather(&acc)) + + fields := map[string]interface{}{ + "delay": float64(54.033), + "jitter": float64(449514), + "offset": float64(243.426), + "poll": int64(1024), + "reach": int64(377), + "when": int64(740), + } + tags := map[string]string{ + "refid": "10.177.80.37", + "remote": "83.137.98.96", + "stratum": "2", + "type": "u", + } + acc.AssertContainsTaggedFields(t, "ntpq", fields, tags) + + fields = map[string]interface{}{ + "delay": float64(60.785), + "jitter": float64(449539), + "offset": float64(232.597), + "poll": int64(1024), + "reach": int64(377), + "when": int64(739), + } + tags = map[string]string{ + "refid": "10.177.80.37", + "remote": "81.7.16.52", + "stratum": "2", + "type": "u", + } + acc.AssertContainsTaggedFields(t, "ntpq", fields, tags) +} + +func TestBadHeaderNTPQ(t *testing.T) { + resetVars() + tt := tester{ + ret: []byte(badHeaderNTPQ), + err: nil, + } + n := &NTPQ{ + runQ: tt.runqTest, + } + + acc := testutil.Accumulator{} + assert.NoError(t, n.Gather(&acc)) + + fields := map[string]interface{}{ + "when": int64(101), + "poll": int64(256), + "reach": int64(37), + "delay": float64(51.016), + "offset": float64(233.010), + "jitter": float64(17.462), + } + tags := map[string]string{ + "remote": "*uschi5-ntp-002.", + "refid": "10.177.80.46", + "type": "u", + } + acc.AssertContainsTaggedFields(t, "ntpq", fields, tags) +} + +func TestMissingDelayColumnNTPQ(t *testing.T) { + resetVars() + tt := tester{ + ret: []byte(missingDelayNTPQ), + err: nil, + } + n := &NTPQ{ + runQ: tt.runqTest, + } + + acc := testutil.Accumulator{} + assert.NoError(t, n.Gather(&acc)) + + fields := map[string]interface{}{ + "when": int64(101), + "poll": int64(256), + "reach": int64(37), + "offset": float64(233.010), + "jitter": float64(17.462), + } + tags := map[string]string{ + "remote": "*uschi5-ntp-002.", + "refid": "10.177.80.46", + "type": "u", + } + acc.AssertContainsTaggedFields(t, "ntpq", fields, tags) +} + +func TestFailedNTPQ(t *testing.T) { + tt := tester{ + ret: []byte(singleNTPQ), + err: fmt.Errorf("Test failure"), + } + n := &NTPQ{ + runQ: tt.runqTest, + } + + acc := testutil.Accumulator{} + assert.Error(t, n.Gather(&acc)) +} + +type tester struct { + ret []byte + err error +} + +func (t *tester) runqTest() ([]byte, error) { + return t.ret, t.err +} + +func resetVars() { + // Mapping of ntpq header names to tag keys + tagHeaders = map[string]string{ + "remote": "remote", + "refid": "refid", + "st": "stratum", + "t": "type", + } + + // Mapping of the ntpq tag key to the index in the command output + tagI = map[string]int{ + "remote": -1, + "refid": -1, + "stratum": -1, + "type": -1, + } + + // Mapping of float metrics to their index in the command output + floatI = map[string]int{ + "delay": -1, + "offset": -1, + "jitter": -1, + } + + // Mapping of int metrics to their index in the command output + intI = map[string]int{ + "when": -1, + "poll": -1, + "reach": -1, + } +} + +var singleNTPQ = ` remote refid st t when poll reach delay offset jitter +============================================================================== +*uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 17.462 +` + +var badHeaderNTPQ = `remote refid foobar t when poll reach delay offset jitter +============================================================================== +*uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 17.462 +` + +var missingDelayNTPQ = `remote refid foobar t when poll reach offset jitter +============================================================================== +*uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 233.010 17.462 +` + +var whenDaysNTPQ = ` remote refid st t when poll reach delay offset jitter +============================================================================== +*uschi5-ntp-002. 10.177.80.46 2 u 2d 256 37 51.016 233.010 17.462 +` + +var whenHoursNTPQ = ` remote refid st t when poll reach delay offset jitter +============================================================================== +*uschi5-ntp-002. 10.177.80.46 2 u 2h 256 37 51.016 233.010 17.462 +` + +var whenMinutesNTPQ = ` remote refid st t when poll reach delay offset jitter +============================================================================== +*uschi5-ntp-002. 10.177.80.46 2 u 2m 256 37 51.016 233.010 17.462 +` + +var whenBadNTPQ = ` remote refid st t when poll reach delay offset jitter +============================================================================== +*uschi5-ntp-002. 10.177.80.46 2 u 2q 256 37 51.016 233.010 17.462 +` + +var badFloatParseNTPQ = ` remote refid st t when poll reach delay offset jitter +============================================================================== +*uschi5-ntp-002. 10.177.80.46 2 u 2 256 37 51.016 foobar 17.462 +` + +var badIntParseNTPQ = ` remote refid st t when poll reach delay offset jitter +============================================================================== +*uschi5-ntp-002. 10.177.80.46 2 u 101 foobar 37 51.016 233.010 17.462 +` + +var multiNTPQ = ` remote refid st t when poll reach delay offset jitter +============================================================================== + 83.137.98.96 10.177.80.37 2 u 740 1024 377 54.033 243.426 449514. + 81.7.16.52 10.177.80.37 2 u 739 1024 377 60.785 232.597 449539. + 131.188.3.221 10.177.80.37 2 u 783 1024 377 111.820 261.921 449528. + 5.9.29.107 10.177.80.37 2 u 703 1024 377 205.704 160.406 449602. + 91.189.94.4 10.177.80.37 2 u 673 1024 377 143.047 274.726 449445. +` diff --git a/plugins/inputs/ntpq/ntpq_windows.go b/plugins/inputs/ntpq/ntpq_windows.go new file mode 100644 index 000000000..a1f1a55fa --- /dev/null +++ b/plugins/inputs/ntpq/ntpq_windows.go @@ -0,0 +1,3 @@ +// +build windows + +package ntpq From b6dc9c004b2967bfc729c5968e9a95f4e22df125 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 14 Mar 2016 17:19:46 +0000 Subject: [PATCH 146/287] Release 0.11.0 --- CHANGELOG.md | 2 +- README.md | 22 +++++++++++----------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cfba45536..9c886921a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## v0.11.0 [unreleased] +## v0.11.0 [2016-02-15] ### Release Notes diff --git a/README.md b/README.md index 8f9b0bc33..b32aba561 100644 --- a/README.md +++ b/README.md @@ -27,12 +27,12 @@ the [release blog post](https://influxdata.com/blog/announcing-telegraf-0-10-0/) ### Linux deb and rpm Packages: Latest: -* http://get.influxdb.org/telegraf/telegraf_0.10.4.1-1_amd64.deb -* http://get.influxdb.org/telegraf/telegraf-0.10.4.1-1.x86_64.rpm +* http://get.influxdb.org/telegraf/telegraf_0.11.0-1_amd64.deb +* http://get.influxdb.org/telegraf/telegraf-0.11.0-1.x86_64.rpm Latest (arm): -* http://get.influxdb.org/telegraf/telegraf_0.10.4.1-1_arm.deb -* http://get.influxdb.org/telegraf/telegraf-0.10.4.1-1.arm.rpm +* http://get.influxdb.org/telegraf/telegraf_0.11.0-1_arm.deb +* http://get.influxdb.org/telegraf/telegraf-0.11.0-1.arm.rpm 0.2.x: * http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb @@ -56,9 +56,9 @@ for instructions, replacing the `influxdb` package name with `telegraf`. ### Linux tarballs: Latest: -* http://get.influxdb.org/telegraf/telegraf-0.10.4.1-1_linux_amd64.tar.gz -* http://get.influxdb.org/telegraf/telegraf-0.10.4.1-1_linux_i386.tar.gz -* http://get.influxdb.org/telegraf/telegraf-0.10.4.1-1_linux_arm.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.11.0-1_linux_amd64.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.11.0-1_linux_i386.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.11.0-1_linux_arm.tar.gz 0.2.x: * http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz @@ -70,13 +70,13 @@ Latest: To install the full directory structure with config file, run: ``` -sudo tar -C / -zxvf ./telegraf-0.10.4.1-1_linux_amd64.tar.gz +sudo tar -C / -zxvf ./telegraf-0.11.0-1_linux_amd64.tar.gz ``` To extract only the binary, run: ``` -tar -zxvf telegraf-0.10.4.1-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf +tar -zxvf telegraf-0.11.0-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf ``` ### Ansible Role: @@ -93,8 +93,8 @@ brew install telegraf ### Windows Binaries (EXPERIMENTAL) Latest: -* http://get.influxdb.org/telegraf/telegraf-0.10.4.1-1_windows_amd64.zip -* http://get.influxdb.org/telegraf/telegraf-0.10.4.1-1_windows_i386.zip +* http://get.influxdb.org/telegraf/telegraf-0.11.0-1_windows_amd64.zip +* http://get.influxdb.org/telegraf/telegraf-0.11.0-1_windows_i386.zip ### From Source: From 2cb32a683ed9dfb2101dc0fc30d92c5a7228d204 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 14 Mar 2016 17:42:20 +0000 Subject: [PATCH 147/287] README fixes for 0.11.0 --- README.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index b32aba561..eedf2efab 100644 --- a/README.md +++ b/README.md @@ -31,8 +31,8 @@ Latest: * http://get.influxdb.org/telegraf/telegraf-0.11.0-1.x86_64.rpm Latest (arm): -* http://get.influxdb.org/telegraf/telegraf_0.11.0-1_arm.deb -* http://get.influxdb.org/telegraf/telegraf-0.11.0-1.arm.rpm +* http://get.influxdb.org/telegraf/telegraf_0.11.0-1_armhf.deb +* http://get.influxdb.org/telegraf/telegraf-0.11.0-1.armhf.rpm 0.2.x: * http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb @@ -58,7 +58,7 @@ for instructions, replacing the `influxdb` package name with `telegraf`. Latest: * http://get.influxdb.org/telegraf/telegraf-0.11.0-1_linux_amd64.tar.gz * http://get.influxdb.org/telegraf/telegraf-0.11.0-1_linux_i386.tar.gz -* http://get.influxdb.org/telegraf/telegraf-0.11.0-1_linux_arm.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.11.0-1_linux_armhf.tar.gz 0.2.x: * http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz @@ -93,8 +93,8 @@ brew install telegraf ### Windows Binaries (EXPERIMENTAL) Latest: -* http://get.influxdb.org/telegraf/telegraf-0.11.0-1_windows_amd64.zip -* http://get.influxdb.org/telegraf/telegraf-0.11.0-1_windows_i386.zip +* http://get.influxdb.org/telegraf/telegraf-0.10.4.1-1_windows_amd64.zip +* http://get.influxdb.org/telegraf/telegraf-0.10.4.1-1_windows_i386.zip ### From Source: @@ -186,6 +186,7 @@ Currently implemented sources: * net_response * nginx * nsq +* ntpq * phpfpm * phusion passenger * ping From 4071c78b2b9861daecc0624cd3e794b6456f2bde Mon Sep 17 00:00:00 2001 From: LordFPL Date: Wed, 16 Mar 2016 08:57:33 +0100 Subject: [PATCH 148/287] (very) Little error in changelog We are not going in the past, no ? ;) --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9c886921a..2b5478569 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## v0.11.0 [2016-02-15] +## v0.11.0 [2016-03-15] ### Release Notes From 675b6dc305926fca6ffb5a3d1b451383e4151cca Mon Sep 17 00:00:00 2001 From: Ross McDonald Date: Tue, 15 Mar 2016 08:11:55 -0500 Subject: [PATCH 149/287] Corrected issue with windows builds where the correct configuration and filesystem would be used. closes #852 closes #854 --- scripts/build.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/scripts/build.py b/scripts/build.py index 950fa40e9..15d7e8e41 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -468,19 +468,28 @@ def build_packages(build_output, version, nightly=False, rc=None, iteration=1): arch, '{}-{}-{}'.format(PACKAGE_NAME, version, iteration)) create_dir(build_root) - create_package_fs(build_root) # Copy packaging scripts to build directory - package_scripts(build_root) + if platform == 'windows': + package_scripts(build_root, windows=True) + else: + create_package_fs(build_root) + package_scripts(build_root) for binary in targets: - # Copy newly-built binaries to packaging directory if platform == 'windows': + # For windows, we just want to copy the binary into the root directory binary = binary + '.exe' - # Where the binary currently is located - fr = os.path.join(current_location, binary) - # Where the binary should go in the package filesystem - to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary) + # Where the binary should go in the package filesystem + to = os.path.join(build_root, binary) + # Where the binary currently is located + fr = os.path.join(current_location, binary) + else: + # Where the binary currently is located + fr = os.path.join(current_location, binary) + # Where the binary should go in the package filesystem + to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary) + if debug: print("[{}][{}] - Moving from '{}' to '{}'".format(platform, arch, @@ -566,6 +575,7 @@ def build_packages(build_output, version, nightly=False, rc=None, iteration=1): return outfiles finally: # Cleanup + print("Cleaning up build dir: {}".format(tmp_build_dir)) shutil.rmtree(tmp_build_dir) def print_usage(): From 035e4cf90a3892b2f4e21e3b6bb60020864de71a Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 16 Mar 2016 10:20:52 -0600 Subject: [PATCH 150/287] Fix bug with httpjson client pointer receiver fixes #859 --- CHANGELOG.md | 8 ++++++++ plugins/inputs/httpjson/httpjson.go | 10 ++++++---- plugins/inputs/httpjson/httpjson_test.go | 16 ++++++++-------- 3 files changed, 22 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2b5478569..78825e308 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,11 @@ +## v0.11.1 [unreleased] + +### Features + +### Bugfixes +- [#852](https://github.com/influxdata/telegraf/issues/852): Windows zip package fix +- [#859](https://github.com/influxdata/telegraf/issues/859): httpjson plugin panic + ## v0.11.0 [2016-03-15] ### Release Notes diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index 061995892..6fe4da1e5 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -55,15 +55,15 @@ type RealHTTPClient struct { client *http.Client } -func (c RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) { +func (c *RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) { return c.client.Do(req) } -func (c RealHTTPClient) SetHTTPClient(client *http.Client) { +func (c *RealHTTPClient) SetHTTPClient(client *http.Client) { c.client = client } -func (c RealHTTPClient) HTTPClient() *http.Client { +func (c *RealHTTPClient) HTTPClient() *http.Client { return c.client } @@ -289,6 +289,8 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) { func init() { inputs.Add("httpjson", func() telegraf.Input { - return &HttpJson{client: RealHTTPClient{}} + return &HttpJson{ + client: &RealHTTPClient{}, + } }) } diff --git a/plugins/inputs/httpjson/httpjson_test.go b/plugins/inputs/httpjson/httpjson_test.go index 1a1187d44..31447b307 100644 --- a/plugins/inputs/httpjson/httpjson_test.go +++ b/plugins/inputs/httpjson/httpjson_test.go @@ -125,7 +125,7 @@ type mockHTTPClient struct { // Mock implementation of MakeRequest. Usually returns an http.Response with // hard-coded responseBody and statusCode. However, if the request uses a // nonstandard method, it uses status code 405 (method not allowed) -func (c mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) { +func (c *mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) { resp := http.Response{} resp.StatusCode = c.statusCode @@ -147,10 +147,10 @@ func (c mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) { return &resp, nil } -func (c mockHTTPClient) SetHTTPClient(_ *http.Client) { +func (c *mockHTTPClient) SetHTTPClient(_ *http.Client) { } -func (c mockHTTPClient) HTTPClient() *http.Client { +func (c *mockHTTPClient) HTTPClient() *http.Client { return nil } @@ -164,7 +164,7 @@ func (c mockHTTPClient) HTTPClient() *http.Client { func genMockHttpJson(response string, statusCode int) []*HttpJson { return []*HttpJson{ &HttpJson{ - client: mockHTTPClient{responseBody: response, statusCode: statusCode}, + client: &mockHTTPClient{responseBody: response, statusCode: statusCode}, Servers: []string{ "http://server1.example.com/metrics/", "http://server2.example.com/metrics/", @@ -181,7 +181,7 @@ func genMockHttpJson(response string, statusCode int) []*HttpJson { }, }, &HttpJson{ - client: mockHTTPClient{responseBody: response, statusCode: statusCode}, + client: &mockHTTPClient{responseBody: response, statusCode: statusCode}, Servers: []string{ "http://server3.example.com/metrics/", "http://server4.example.com/metrics/", @@ -241,7 +241,7 @@ func TestHttpJsonGET_URL(t *testing.T) { Servers: []string{ts.URL + "?api_key=mykey"}, Name: "", Method: "GET", - client: RealHTTPClient{client: &http.Client{}}, + client: &RealHTTPClient{client: &http.Client{}}, } var acc testutil.Accumulator @@ -314,7 +314,7 @@ func TestHttpJsonGET(t *testing.T) { Name: "", Method: "GET", Parameters: params, - client: RealHTTPClient{client: &http.Client{}}, + client: &RealHTTPClient{client: &http.Client{}}, } var acc testutil.Accumulator @@ -388,7 +388,7 @@ func TestHttpJsonPOST(t *testing.T) { Name: "", Method: "POST", Parameters: params, - client: RealHTTPClient{client: &http.Client{}}, + client: &RealHTTPClient{client: &http.Client{}}, } var acc testutil.Accumulator From e4e7d7fbfc9dd244146d1ded02543f0775ac6988 Mon Sep 17 00:00:00 2001 From: Pierre Fersing Date: Tue, 23 Feb 2016 18:25:07 +0100 Subject: [PATCH 151/287] Improved install script for packaged telegraf: * Start/stop service on Debian/Ubuntu * Disable init-script/Systemd-unit on package removal closes #747 --- scripts/build.py | 6 ++++++ scripts/post-install.sh | 3 +++ scripts/post-remove.sh | 46 +++++++++++++++++++++++++++++++++++++++++ scripts/pre-remove.sh | 15 ++++++++++++++ 4 files changed, 70 insertions(+) create mode 100644 scripts/post-remove.sh create mode 100644 scripts/pre-remove.sh diff --git a/scripts/build.py b/scripts/build.py index 15d7e8e41..0998bb7df 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -31,6 +31,8 @@ DEFAULT_CONFIG = "etc/telegraf.conf" DEFAULT_WINDOWS_CONFIG = "etc/telegraf_windows.conf" POSTINST_SCRIPT = "scripts/post-install.sh" PREINST_SCRIPT = "scripts/pre-install.sh" +POSTREMOVE_SCRIPT = "scripts/post-remove.sh" +PREREMOVE_SCRIPT = "scripts/pre-remove.sh" # Default AWS S3 bucket for uploads DEFAULT_BUCKET = "get.influxdb.org/telegraf" @@ -61,6 +63,8 @@ fpm_common_args = "-f -s dir --log error \ --config-files {} \ --after-install {} \ --before-install {} \ + --after-remove {} \ + --before-remove {} \ --description \"{}\"".format( VENDOR, PACKAGE_URL, @@ -70,6 +74,8 @@ fpm_common_args = "-f -s dir --log error \ LOGROTATE_DIR + '/telegraf', POSTINST_SCRIPT, PREINST_SCRIPT, + POSTREMOVE_SCRIPT, + PREREMOVE_SCRIPT, DESCRIPTION) targets = { diff --git a/scripts/post-install.sh b/scripts/post-install.sh index 0982dc855..d4c5df443 100644 --- a/scripts/post-install.sh +++ b/scripts/post-install.sh @@ -13,6 +13,7 @@ function install_init { function install_systemd { cp -f $SCRIPT_DIR/telegraf.service /lib/systemd/system/telegraf.service systemctl enable telegraf + systemctl daemon-reload || true } function install_update_rcd { @@ -63,10 +64,12 @@ elif [[ -f /etc/debian_version ]]; then which systemctl &>/dev/null if [[ $? -eq 0 ]]; then install_systemd + deb-systemd-invoke restart telegraf.service else # Assuming sysv install_init install_update_rcd + invoke-rc.d telegraf restart fi elif [[ -f /etc/os-release ]]; then source /etc/os-release diff --git a/scripts/post-remove.sh b/scripts/post-remove.sh new file mode 100644 index 000000000..96b178f4d --- /dev/null +++ b/scripts/post-remove.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +function disable_systemd { + systemctl disable telegraf + rm -f /lib/systemd/system/telegraf.service +} + +function disable_update_rcd { + update-rc.d -f telegraf remove + rm -f /etc/init.d/telegraf +} + +function disable_chkconfig { + chkconfig --del telegraf + rm -f /etc/init.d/telegraf +} + +if [[ -f /etc/redhat-release ]]; then + # RHEL-variant logic + if [[ "$1" = "0" ]]; then + # InfluxDB is no longer installed, remove from init system + rm -f /etc/default/telegraf + + which systemctl &>/dev/null + if [[ $? -eq 0 ]]; then + disable_systemd + else + # Assuming sysv + disable_chkconfig + fi + fi +elif [[ -f /etc/debian_version ]]; then + # Debian/Ubuntu logic + if [[ "$1" != "upgrade" ]]; then + # Remove/purge + rm -f /etc/default/telegraf + + which systemctl &>/dev/null + if [[ $? -eq 0 ]]; then + disable_systemd + else + # Assuming sysv + disable_update_rcd + fi + fi +fi diff --git a/scripts/pre-remove.sh b/scripts/pre-remove.sh new file mode 100644 index 000000000..a57184630 --- /dev/null +++ b/scripts/pre-remove.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +BIN_DIR=/usr/bin + +# Distribution-specific logic +if [[ -f /etc/debian_version ]]; then + # Debian/Ubuntu logic + which systemctl &>/dev/null + if [[ $? -eq 0 ]]; then + deb-systemd-invoke stop telegraf.service + else + # Assuming sysv + invoke-rc.d telegraf stop + fi +fi From c2bb9db0120ffb1b8b99de2abd7a5feff871369e Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 16 Mar 2016 11:20:05 -0600 Subject: [PATCH 152/287] Changelog update --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 78825e308..9f51c92a0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,8 @@ ## v0.11.1 [unreleased] ### Features +- [#747](https://github.com/influxdata/telegraf/pull/747): Start telegraf on install & remove on uninstall. Thanks @pierref! +- [#794](https://github.com/influxdata/telegraf/pull/794): Add service reload ability. Thanks @entertainyou! ### Bugfixes - [#852](https://github.com/influxdata/telegraf/issues/852): Windows zip package fix From f8e9fafda38c1d430bfb52e8c20cf16a3d50320a Mon Sep 17 00:00:00 2001 From: HUANG Wei Date: Sat, 5 Mar 2016 11:11:53 +0800 Subject: [PATCH 153/287] Add reload configuration for telegraf service scripts. closes #794 --- scripts/init.sh | 16 ++++++++++++++++ scripts/telegraf.service | 1 + 2 files changed, 17 insertions(+) diff --git a/scripts/init.sh b/scripts/init.sh index 81932bb48..09a4d24bd 100755 --- a/scripts/init.sh +++ b/scripts/init.sh @@ -159,6 +159,22 @@ case $1 in fi ;; + reload) + # Reload the daemon. + if [ -e $pidfile ]; then + pidofproc -p $pidfile $daemon > /dev/null 2>&1 && status="0" || status="$?" + if [ "$status" = 0 ]; then + if killproc -p $pidfile SIGHUP; then + log_success_msg "$name process was reloaded" + else + log_failure_msg "$name failed to reload service" + fi + fi + else + log_failure_msg "$name process is not running" + fi + ;; + restart) # Restart the daemon. $0 stop && sleep 2 && $0 start diff --git a/scripts/telegraf.service b/scripts/telegraf.service index 6f4450402..dcc2b9713 100644 --- a/scripts/telegraf.service +++ b/scripts/telegraf.service @@ -7,6 +7,7 @@ After=network.target EnvironmentFile=-/etc/default/telegraf User=telegraf ExecStart=/usr/bin/telegraf -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d ${TELEGRAF_OPTS} +ExecReload=/bin/kill -HUP $MAINPID Restart=on-failure KillMode=process From 822706367b2ead93185142659021de33477bf8a0 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 16 Mar 2016 14:22:01 -0600 Subject: [PATCH 154/287] provide args for telegraf for consistency with influxd: - telegraf version - telegraf config closes #857 --- cmd/telegraf/telegraf.go | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index d54aaa4e3..436d1a38e 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -96,8 +96,9 @@ func main() { reload <- false flag.Usage = func() { usageExit(0) } flag.Parse() + args := flag.Args() - if flag.NFlag() == 0 { + if flag.NFlag() == 0 && len(args) == 0 { usageExit(0) } @@ -121,6 +122,18 @@ func main() { outputFilters = strings.Split(":"+outputFilter+":", ":") } + if len(args) > 0 { + switch args[0] { + case "version": + v := fmt.Sprintf("Telegraf - Version %s", Version) + fmt.Println(v) + return + case "config": + config.PrintSampleConfig(inputFilters, outputFilters) + return + } + } + if *fOutputList { fmt.Println("Available Output Plugins:") for k, _ := range outputs.Outputs { From 59568f5311fd96fd4f1d2315f6af1ea01b78c9c0 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 16 Mar 2016 14:45:35 -0600 Subject: [PATCH 155/287] Release 0.11.1 --- README.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index eedf2efab..97470aece 100644 --- a/README.md +++ b/README.md @@ -27,12 +27,12 @@ the [release blog post](https://influxdata.com/blog/announcing-telegraf-0-10-0/) ### Linux deb and rpm Packages: Latest: -* http://get.influxdb.org/telegraf/telegraf_0.11.0-1_amd64.deb -* http://get.influxdb.org/telegraf/telegraf-0.11.0-1.x86_64.rpm +* http://get.influxdb.org/telegraf/telegraf_0.11.1-1_amd64.deb +* http://get.influxdb.org/telegraf/telegraf-0.11.1-1.x86_64.rpm Latest (arm): -* http://get.influxdb.org/telegraf/telegraf_0.11.0-1_armhf.deb -* http://get.influxdb.org/telegraf/telegraf-0.11.0-1.armhf.rpm +* http://get.influxdb.org/telegraf/telegraf_0.11.1-1_armhf.deb +* http://get.influxdb.org/telegraf/telegraf-0.11.1-1.armhf.rpm 0.2.x: * http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb @@ -56,9 +56,9 @@ for instructions, replacing the `influxdb` package name with `telegraf`. ### Linux tarballs: Latest: -* http://get.influxdb.org/telegraf/telegraf-0.11.0-1_linux_amd64.tar.gz -* http://get.influxdb.org/telegraf/telegraf-0.11.0-1_linux_i386.tar.gz -* http://get.influxdb.org/telegraf/telegraf-0.11.0-1_linux_armhf.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.11.1-1_linux_amd64.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.11.1-1_linux_i386.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.11.1-1_linux_armhf.tar.gz 0.2.x: * http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz @@ -70,13 +70,13 @@ Latest: To install the full directory structure with config file, run: ``` -sudo tar -C / -zxvf ./telegraf-0.11.0-1_linux_amd64.tar.gz +sudo tar -C / -zxvf ./telegraf-0.11.1-1_linux_amd64.tar.gz ``` To extract only the binary, run: ``` -tar -zxvf telegraf-0.11.0-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf +tar -zxvf telegraf-0.11.1-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf ``` ### Ansible Role: @@ -93,8 +93,8 @@ brew install telegraf ### Windows Binaries (EXPERIMENTAL) Latest: -* http://get.influxdb.org/telegraf/telegraf-0.10.4.1-1_windows_amd64.zip -* http://get.influxdb.org/telegraf/telegraf-0.10.4.1-1_windows_i386.zip +* http://get.influxdb.org/telegraf/telegraf-0.11.1-1_windows_amd64.zip +* http://get.influxdb.org/telegraf/telegraf-0.11.1-1_windows_i386.zip ### From Source: From 5afe819ebd374ab9c01921a3c060c55605f92655 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 16 Mar 2016 14:55:40 -0600 Subject: [PATCH 156/287] Changelog update for 0.11.1 --- CHANGELOG.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f51c92a0..3106f3f06 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,13 @@ -## v0.11.1 [unreleased] +## v0.11.2 [unreleased] + +### Features + +### Bugfixes + +## v0.11.1 [2016-03-17] + +### Release Notes +- Primarily this release was cut to fix [#859](https://github.com/influxdata/telegraf/issues/859) ### Features - [#747](https://github.com/influxdata/telegraf/pull/747): Start telegraf on install & remove on uninstall. Thanks @pierref! From 57f7582b4da0ae5e878dc83dcbe150c703b41822 Mon Sep 17 00:00:00 2001 From: marknmel Date: Tue, 15 Mar 2016 09:29:05 -0400 Subject: [PATCH 157/287] Cleanup of Exec Inputs documentation - redux Hi @sparrc (Sorry for the noise - new pr) closes #853 Please find some improvements to readability including the \n for the exec/telegraf line-protocol input. I hope you (and others) find it easier to read. /Mark This is an ammend --- plugins/inputs/exec/README.md | 60 ++++++++++++++++++++--------------- 1 file changed, 35 insertions(+), 25 deletions(-) diff --git a/plugins/inputs/exec/README.md b/plugins/inputs/exec/README.md index eddc86ada..730da1fd5 100644 --- a/plugins/inputs/exec/README.md +++ b/plugins/inputs/exec/README.md @@ -1,28 +1,20 @@ # Exec Input Plugin -The exec plugin can execute arbitrary commands which output: +Please also see: [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md) -* JSON -* InfluxDB [line-protocol](https://docs.influxdata.com/influxdb/v0.9/write_protocols/line/) +The exec input plugin can execute arbitrary commands which output: + +* JSON [javascript object notation](http://www.json.org/) +* InfluxDB [line-protocol](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/) * Graphite [graphite-protocol](http://graphite.readthedocs.org/en/latest/feeding-carbon.html) -> Graphite understands messages with this format: -> ``` -metric_path value timestamp\n -``` +### Example 1 - JSON -> __metric_path__ is the metric namespace that you want to populate. +#### Configuration -> __value__ is the value that you want to assign to the metric at this time. - -> __timestamp__ is the unix epoch time. - - -If using JSON, only numeric values are parsed and turned into floats. Booleans -and strings will be ignored. - -### Configuration +In this example a script called ```/tmp/test.sh``` and a script called ```/tmp/test2.sh``` +are configured for ```[[inputs.exec]]``` in JSON format. ``` # Read flattened metrics from one or more commands that output JSON to stdout @@ -64,8 +56,6 @@ Other options for modifying the measurement names are: name_prefix = "prefix_" ``` -### Example 1 - Let's say that we have the above configuration, and mycollector outputs the following JSON: @@ -85,10 +75,16 @@ The collected metrics will be stored as fields under the measurement ``` exec_mycollector a=0.5,b_c=0.1,b_d=5 1452815002357578567 ``` +If using JSON, only numeric values are parsed and turned into floats. Booleans +and strings will be ignored. -### Example 2 +### Example 2 - Influx Line-Protocol -Now let's say we have the following configuration: +In this example an application called ```/usr/bin/line_protocol_collector``` +and a script called ```/tmp/test2.sh``` are configured for ```[[inputs.exec]]``` +in influx line-protocol format. + +#### Configuration ``` [[inputs.exec]] @@ -103,7 +99,7 @@ Now let's say we have the following configuration: data_format = "influx" ``` -And line_protocol_collector outputs the following line protocol: +The line_protocol_collector application outputs the following line protocol: ``` cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 @@ -117,16 +113,19 @@ cpu,cpu=cpu6,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 You will get data in InfluxDB exactly as it is defined above, tags are cpu=cpuN, host=foo, and datacenter=us-east with fields usage_idle -and usage_busy. They will receive a timestamp at collection time. +and usage_busy. They will receive a timestamp at collection time. +Each line must end in \n, just as the Influx line protocol does. -### Example 3 +### Example 3 - Graphite We can also change the data_format to "graphite" to use the metrics collecting scripts such as (compatible with graphite): * Nagios [Mertics Plugins] (https://exchange.nagios.org/directory/Plugins) * Sensu [Mertics Plugins] (https://github.com/sensu-plugins) +In this example a script called /tmp/test.sh and a script called /tmp/test2.sh are configured for [[inputs.exec]] in graphite format. + #### Configuration ``` # Read flattened metrics from one or more commands that output JSON to stdout @@ -161,6 +160,17 @@ We can also change the data_format to "graphite" to use the metrics collecting s "measurement*" ] ``` +Graphite messages are in this format: + +``` +metric_path value timestamp\n +``` + +__metric_path__ is the metric namespace that you want to populate. + +__value__ is the value that you want to assign to the metric at this time. + +__timestamp__ is the unix epoch time. And test.sh/test2.sh will output: @@ -177,4 +187,4 @@ sensu.metric.net.server0.eth0.rx_dropped 0 1444234982 The templates configuration will be used to parse the graphite metrics to support influxdb/opentsdb tagging store engines. More detail information about templates, please refer to [The graphite Input] (https://github.com/influxdata/influxdb/blob/master/services/graphite/README.md) - + From bac1c223deb42119117852c5866ffa0de42659f5 Mon Sep 17 00:00:00 2001 From: Thibault Cohen Date: Tue, 1 Mar 2016 11:12:23 -0500 Subject: [PATCH 158/287] Improve prometheus plugin closes #707 --- CHANGELOG.md | 1 + plugins/inputs/prometheus/README.md | 75 ++++++++ plugins/inputs/prometheus/parser.go | 171 +++++++++++++++++ plugins/inputs/prometheus/parser_test.go | 175 ++++++++++++++++++ plugins/inputs/prometheus/prometheus.go | 58 +++--- plugins/inputs/prometheus/prometheus_test.go | 14 +- .../prometheus_client_test.go | 4 +- 7 files changed, 452 insertions(+), 46 deletions(-) create mode 100644 plugins/inputs/prometheus/README.md create mode 100644 plugins/inputs/prometheus/parser.go create mode 100644 plugins/inputs/prometheus/parser_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 3106f3f06..79587140a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ ## v0.11.2 [unreleased] ### Features +- [#707](https://github.com/influxdata/telegraf/pull/707): Improved prometheus plugin. Thanks @titilambert! ### Bugfixes diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md new file mode 100644 index 000000000..c5c952515 --- /dev/null +++ b/plugins/inputs/prometheus/README.md @@ -0,0 +1,75 @@ +# Prometheus Input Plugin + +The prometheus input plugin gathers metrics from any webpage +exposing metrics with Prometheus format + +### Configuration: + +Example for Kubernetes apiserver +```toml +# Get all metrics from Kube-apiserver +[[inputs.prometheus]] + # An array of urls to scrape metrics from. + urls = ["http://my-kube-apiserver:8080/metrics"] +``` + +You can use more complex configuration +to filter and some tags + +```toml +# Get all metrics from Kube-apiserver +[[inputs.prometheus]] + # An array of urls to scrape metrics from. + urls = ["http://my-kube-apiserver:8080/metrics"] + # Get only metrics with "apiserver_" string is in metric name + namepass = ["apiserver_"] + # Add a metric name prefix + name_prefix = "k8s_" + # Add tags to be able to make beautiful dashboards + [inputs.prometheus.tags] + kubeservice = "kube-apiserver" +``` + +### Measurements & Fields & Tags: + +Measurements and fields could be any thing. +It just depends of what you're quering. + +Example: + +``` +# HELP go_gc_duration_seconds A summary of the GC invocation durations. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 0.00010425500000000001 +go_gc_duration_seconds{quantile="0.25"} 0.000139108 +go_gc_duration_seconds{quantile="0.5"} 0.00015749400000000002 +go_gc_duration_seconds{quantile="0.75"} 0.000331463 +go_gc_duration_seconds{quantile="1"} 0.000667154 +go_gc_duration_seconds_sum 0.0018183950000000002 +go_gc_duration_seconds_count 7 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 15 +``` + +- go_goroutines, + - gauge (integer, unit) +- go_gc_duration_seconds + - field3 (integer, bytes) + +- All measurements have the following tags: + - url=http://my-kube-apiserver:8080/metrics +- go_goroutines has the following tags: + - kubeservice=kube-apiserver +- go_gc_duration_seconds has the following tags: + - kubeservice=kube-apiserver + +### Example Output: + +Example of output with configuration given above: + +``` +$ ./telegraf -config telegraf.conf -test +k8s_go_goroutines,kubeservice=kube-apiserver,url=http://my-kube-apiserver:8080/metrics gauge=536 1456857329391929813 +k8s_go_gc_duration_seconds,kubeservice=kube-apiserver,url=http://my-kube-apiserver:8080/metrics 0=0.038002142,0.25=0.041732467,0.5=0.04336492,0.75=0.047271799,1=0.058295811,count=0,sum=208.334617406 1456857329391929813 +``` diff --git a/plugins/inputs/prometheus/parser.go b/plugins/inputs/prometheus/parser.go new file mode 100644 index 000000000..c6ad211f8 --- /dev/null +++ b/plugins/inputs/prometheus/parser.go @@ -0,0 +1,171 @@ +package prometheus + +// Parser inspired from +// https://github.com/prometheus/prom2json/blob/master/main.go + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "mime" + + "github.com/influxdata/telegraf" + + "github.com/matttproud/golang_protobuf_extensions/pbutil" + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" +) + +// PrometheusParser is an object for Parsing incoming metrics. +type PrometheusParser struct { + // PromFormat + PromFormat map[string]string + // DefaultTags will be added to every parsed metric + // DefaultTags map[string]string +} + +// Parse returns a slice of Metrics from a text representation of a +// metrics +func (p *PrometheusParser) Parse(buf []byte) ([]telegraf.Metric, error) { + var metrics []telegraf.Metric + var parser expfmt.TextParser + // parse even if the buffer begins with a newline + buf = bytes.TrimPrefix(buf, []byte("\n")) + // Read raw data + buffer := bytes.NewBuffer(buf) + reader := bufio.NewReader(buffer) + + // Get format + mediatype, params, err := mime.ParseMediaType(p.PromFormat["Content-Type"]) + // Prepare output + metricFamilies := make(map[string]*dto.MetricFamily) + if err == nil && mediatype == "application/vnd.google.protobuf" && + params["encoding"] == "delimited" && + params["proto"] == "io.prometheus.client.MetricFamily" { + for { + metricFamily := &dto.MetricFamily{} + if _, err = pbutil.ReadDelimited(reader, metricFamily); err != nil { + if err == io.EOF { + break + } + return nil, fmt.Errorf("reading metric family protocol buffer failed: %s", err) + } + metricFamilies[metricFamily.GetName()] = metricFamily + } + } else { + metricFamilies, err = parser.TextToMetricFamilies(reader) + if err != nil { + return nil, fmt.Errorf("reading text format failed: %s", err) + } + // read metrics + for metricName, mf := range metricFamilies { + for _, m := range mf.Metric { + // reading tags + tags := makeLabels(m) + /* + for key, value := range p.DefaultTags { + tags[key] = value + } + */ + // reading fields + fields := make(map[string]interface{}) + if mf.GetType() == dto.MetricType_SUMMARY { + // summary metric + fields = makeQuantiles(m) + fields["count"] = float64(m.GetHistogram().GetSampleCount()) + fields["sum"] = float64(m.GetSummary().GetSampleSum()) + } else if mf.GetType() == dto.MetricType_HISTOGRAM { + // historgram metric + fields = makeBuckets(m) + fields["count"] = float64(m.GetHistogram().GetSampleCount()) + fields["sum"] = float64(m.GetSummary().GetSampleSum()) + + } else { + // standard metric + fields = getNameAndValue(m) + } + // converting to telegraf metric + if len(fields) > 0 { + metric, err := telegraf.NewMetric(metricName, tags, fields) + if err == nil { + metrics = append(metrics, metric) + } + } + } + } + } + return metrics, err +} + +// Parse one line +func (p *PrometheusParser) ParseLine(line string) (telegraf.Metric, error) { + metrics, err := p.Parse([]byte(line + "\n")) + + if err != nil { + return nil, err + } + + if len(metrics) < 1 { + return nil, fmt.Errorf( + "Can not parse the line: %s, for data format: prometheus", line) + } + + return metrics[0], nil +} + +/* +// Set default tags +func (p *PrometheusParser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} +*/ + +// Get Quantiles from summary metric +func makeQuantiles(m *dto.Metric) map[string]interface{} { + fields := make(map[string]interface{}) + for _, q := range m.GetSummary().Quantile { + if !math.IsNaN(q.GetValue()) { + fields[fmt.Sprint(q.GetQuantile())] = float64(q.GetValue()) + } + } + return fields +} + +// Get Buckets from histogram metric +func makeBuckets(m *dto.Metric) map[string]interface{} { + fields := make(map[string]interface{}) + for _, b := range m.GetHistogram().Bucket { + fields[fmt.Sprint(b.GetUpperBound())] = float64(b.GetCumulativeCount()) + } + return fields +} + +// Get labels from metric +func makeLabels(m *dto.Metric) map[string]string { + result := map[string]string{} + for _, lp := range m.Label { + result[lp.GetName()] = lp.GetValue() + } + return result +} + +// Get name and value from metric +func getNameAndValue(m *dto.Metric) map[string]interface{} { + fields := make(map[string]interface{}) + if m.Gauge != nil { + if !math.IsNaN(m.GetGauge().GetValue()) { + fields["gauge"] = float64(m.GetGauge().GetValue()) + } + } else if m.Counter != nil { + if !math.IsNaN(m.GetGauge().GetValue()) { + fields["counter"] = float64(m.GetCounter().GetValue()) + } + } else if m.Untyped != nil { + if !math.IsNaN(m.GetGauge().GetValue()) { + fields["value"] = float64(m.GetUntyped().GetValue()) + } + } + return fields +} diff --git a/plugins/inputs/prometheus/parser_test.go b/plugins/inputs/prometheus/parser_test.go new file mode 100644 index 000000000..5c33260be --- /dev/null +++ b/plugins/inputs/prometheus/parser_test.go @@ -0,0 +1,175 @@ +package prometheus + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +var exptime = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) + +const validUniqueGauge = `# HELP cadvisor_version_info A metric with a constant '1' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision. +# TYPE cadvisor_version_info gauge +cadvisor_version_info{cadvisorRevision="",cadvisorVersion="",dockerVersion="1.8.2",kernelVersion="3.10.0-229.20.1.el7.x86_64",osVersion="CentOS Linux 7 (Core)"} 1 +` + +const validUniqueCounter = `# HELP get_token_fail_count Counter of failed Token() requests to the alternate token source +# TYPE get_token_fail_count counter +get_token_fail_count 0 +` + +const validUniqueLine = `# HELP get_token_fail_count Counter of failed Token() requests to the alternate token source +` + +const validUniqueSummary = `# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. +# TYPE http_request_duration_microseconds summary +http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 552048.506 +http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 5.876804288e+06 +http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 5.876804288e+06 +http_request_duration_microseconds_sum{handler="prometheus"} 1.8909097205e+07 +http_request_duration_microseconds_count{handler="prometheus"} 9 +` + +const validUniqueHistogram = `# HELP apiserver_request_latencies Response latency distribution in microseconds for each verb, resource and client. +# TYPE apiserver_request_latencies histogram +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="125000"} 1994 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="250000"} 1997 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="500000"} 2000 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="1e+06"} 2005 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="2e+06"} 2012 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="4e+06"} 2017 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="8e+06"} 2024 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="+Inf"} 2025 +apiserver_request_latencies_sum{resource="bindings",verb="POST"} 1.02726334e+08 +apiserver_request_latencies_count{resource="bindings",verb="POST"} 2025 +` + +const validData = `# HELP cadvisor_version_info A metric with a constant '1' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision. +# TYPE cadvisor_version_info gauge +cadvisor_version_info{cadvisorRevision="",cadvisorVersion="",dockerVersion="1.8.2",kernelVersion="3.10.0-229.20.1.el7.x86_64",osVersion="CentOS Linux 7 (Core)"} 1 +# HELP go_gc_duration_seconds A summary of the GC invocation durations. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 0.013534896000000001 +go_gc_duration_seconds{quantile="0.25"} 0.02469263 +go_gc_duration_seconds{quantile="0.5"} 0.033727822000000005 +go_gc_duration_seconds{quantile="0.75"} 0.03840335 +go_gc_duration_seconds{quantile="1"} 0.049956604 +go_gc_duration_seconds_sum 1970.341293002 +go_gc_duration_seconds_count 65952 +# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. +# TYPE http_request_duration_microseconds summary +http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 552048.506 +http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 5.876804288e+06 +http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 5.876804288e+06 +http_request_duration_microseconds_sum{handler="prometheus"} 1.8909097205e+07 +http_request_duration_microseconds_count{handler="prometheus"} 9 +# HELP get_token_fail_count Counter of failed Token() requests to the alternate token source +# TYPE get_token_fail_count counter +get_token_fail_count 0 +# HELP apiserver_request_latencies Response latency distribution in microseconds for each verb, resource and client. +# TYPE apiserver_request_latencies histogram +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="125000"} 1994 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="250000"} 1997 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="500000"} 2000 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="1e+06"} 2005 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="2e+06"} 2012 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="4e+06"} 2017 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="8e+06"} 2024 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="+Inf"} 2025 +apiserver_request_latencies_sum{resource="bindings",verb="POST"} 1.02726334e+08 +apiserver_request_latencies_count{resource="bindings",verb="POST"} 2025 +` + +const prometheusMulti = ` +cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 +cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 +cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 +cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 +cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 +cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 +cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 +` + +const prometheusMultiSomeInvalid = ` +cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 +cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 +cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 +cpu,cpu=cpu3, host=foo,datacenter=us-east usage_idle=99,usage_busy=1 +cpu,cpu=cpu4 , usage_idle=99,usage_busy=1 +cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 +` + +func TestParseValidPrometheus(t *testing.T) { + parser := PrometheusParser{} + + // Gauge value + metrics, err := parser.Parse([]byte(validUniqueGauge)) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + assert.Equal(t, "cadvisor_version_info", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "gauge": float64(1), + }, metrics[0].Fields()) + assert.Equal(t, map[string]string{ + "osVersion": "CentOS Linux 7 (Core)", + "dockerVersion": "1.8.2", + "kernelVersion": "3.10.0-229.20.1.el7.x86_64", + }, metrics[0].Tags()) + + // Counter value + //parser.SetDefaultTags(map[string]string{"mytag": "mytagvalue"}) + metrics, err = parser.Parse([]byte(validUniqueCounter)) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + assert.Equal(t, "get_token_fail_count", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "counter": float64(0), + }, metrics[0].Fields()) + assert.Equal(t, map[string]string{}, metrics[0].Tags()) + + // Summary data + //parser.SetDefaultTags(map[string]string{}) + metrics, err = parser.Parse([]byte(validUniqueSummary)) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + assert.Equal(t, "http_request_duration_microseconds", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "0.5": 552048.506, + "0.9": 5.876804288e+06, + "0.99": 5.876804288e+06, + "count": 0.0, + "sum": 1.8909097205e+07, + }, metrics[0].Fields()) + assert.Equal(t, map[string]string{"handler": "prometheus"}, metrics[0].Tags()) + + // histogram data + metrics, err = parser.Parse([]byte(validUniqueHistogram)) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + assert.Equal(t, "apiserver_request_latencies", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "500000": 2000.0, + "count": 2025.0, + "sum": 0.0, + "250000": 1997.0, + "2e+06": 2012.0, + "4e+06": 2017.0, + "8e+06": 2024.0, + "+Inf": 2025.0, + "125000": 1994.0, + "1e+06": 2005.0, + }, metrics[0].Fields()) + assert.Equal(t, + map[string]string{"verb": "POST", "resource": "bindings"}, + metrics[0].Tags()) + +} + +func TestParseLineInvalidPrometheus(t *testing.T) { + parser := PrometheusParser{} + metric, err := parser.ParseLine(validUniqueLine) + assert.NotNil(t, err) + assert.Nil(t, metric) + +} diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 5873b27cc..05149f332 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -5,9 +5,7 @@ import ( "fmt" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/prometheus/common/expfmt" - "github.com/prometheus/common/model" - "io" + "io/ioutil" "net/http" "sync" "time" @@ -62,6 +60,7 @@ var client = &http.Client{ } func (g *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error { + collectDate := time.Now() resp, err := client.Get(url) if err != nil { return fmt.Errorf("error making HTTP request to %s: %s", url, err) @@ -70,38 +69,33 @@ func (g *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error { if resp.StatusCode != http.StatusOK { return fmt.Errorf("%s returned HTTP status %s", url, resp.Status) } - format := expfmt.ResponseFormat(resp.Header) - decoder := expfmt.NewDecoder(resp.Body, format) - - options := &expfmt.DecodeOptions{ - Timestamp: model.Now(), - } - sampleDecoder := &expfmt.SampleDecoder{ - Dec: decoder, - Opts: options, + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("error reading body: %s", err) } - for { - var samples model.Vector - err := sampleDecoder.Decode(&samples) - if err == io.EOF { - break - } else if err != nil { - return fmt.Errorf("error getting processing samples for %s: %s", - url, err) - } - for _, sample := range samples { - tags := make(map[string]string) - for key, value := range sample.Metric { - if key == model.MetricNameLabel { - continue - } - tags[string(key)] = string(value) - } - acc.Add("prometheus_"+string(sample.Metric[model.MetricNameLabel]), - float64(sample.Value), tags) - } + // Headers + headers := make(map[string]string) + for key, value := range headers { + headers[key] = value + } + + // Prepare Prometheus parser config + promparser := PrometheusParser{ + PromFormat: headers, + } + + metrics, err := promparser.Parse(body) + if err != nil { + return fmt.Errorf("error getting processing samples for %s: %s", + url, err) + } + // Add (or not) collected metrics + for _, metric := range metrics { + tags := metric.Tags() + tags["url"] = url + acc.AddFields(metric.Name(), metric.Fields(), tags, collectDate) } return nil diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index 2009cbb11..8a8fea9e3 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -40,16 +40,6 @@ func TestPrometheusGeneratesMetrics(t *testing.T) { err := p.Gather(&acc) require.NoError(t, err) - expected := []struct { - name string - value float64 - tags map[string]string - }{ - {"prometheus_go_gc_duration_seconds_count", 7, map[string]string{}}, - {"prometheus_go_goroutines", 15, map[string]string{}}, - } - - for _, e := range expected { - assert.True(t, acc.HasFloatField(e.name, "value")) - } + assert.True(t, acc.HasFloatField("go_gc_duration_seconds", "count")) + assert.True(t, acc.HasFloatField("go_goroutines", "gauge")) } diff --git a/plugins/outputs/prometheus_client/prometheus_client_test.go b/plugins/outputs/prometheus_client/prometheus_client_test.go index 16414a8e4..15ed7b7e4 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_test.go @@ -54,7 +54,7 @@ func TestPrometheusWritePointEmptyTag(t *testing.T) { require.NoError(t, p.Gather(&acc)) for _, e := range expected { - acc.AssertContainsFields(t, "prometheus_"+e.name, + acc.AssertContainsFields(t, e.name, map[string]interface{}{"value": e.value}) } @@ -84,7 +84,7 @@ func TestPrometheusWritePointEmptyTag(t *testing.T) { require.NoError(t, p.Gather(&acc)) for _, e := range expected2 { - acc.AssertContainsFields(t, "prometheus_"+e.name, + acc.AssertContainsFields(t, e.name, map[string]interface{}{"value": e.value}) } } From 530b4f3bee00b6a8b1abac98569697542c306185 Mon Sep 17 00:00:00 2001 From: Eugene Dementiev Date: Wed, 16 Mar 2016 21:44:11 +0300 Subject: [PATCH 159/287] [amqp output] Allow external auth (cert-based tls auth) closes #863 --- CHANGELOG.md | 1 + internal/internal.go | 1 + plugins/outputs/amqp/amqp.go | 32 ++++++++++++++++++++++++++++---- 3 files changed, 30 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 79587140a..9308f9390 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ ## v0.11.2 [unreleased] ### Features +- [#863](https://github.com/influxdata/telegraf/pull/863): AMQP output: allow external auth. Thanks @ekini! - [#707](https://github.com/influxdata/telegraf/pull/707): Improved prometheus plugin. Thanks @titilambert! ### Bugfixes diff --git a/internal/internal.go b/internal/internal.go index 82758e5e8..9c3696c3d 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -109,6 +109,7 @@ func GetTLSConfig( RootCAs: caCertPool, InsecureSkipVerify: InsecureSkipVerify, } + t.BuildNameToCertificate() } else { if InsecureSkipVerify { t.InsecureSkipVerify = true diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index 948007117..c9531b2a5 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "log" + "strings" "sync" "time" @@ -20,6 +21,8 @@ type AMQP struct { URL string // AMQP exchange Exchange string + // AMQP Auth method + AuthMethod string // Routing Key Tag RoutingTag string `toml:"routing_tag"` // InfluxDB database @@ -45,7 +48,17 @@ type AMQP struct { serializer serializers.Serializer } +type externalAuth struct{} + +func (a *externalAuth) Mechanism() string { + return "EXTERNAL" +} +func (a *externalAuth) Response() string { + return fmt.Sprintf("\000") +} + const ( + DefaultAuthMethod = "PLAIN" DefaultRetentionPolicy = "default" DefaultDatabase = "telegraf" DefaultPrecision = "s" @@ -56,6 +69,8 @@ var sampleConfig = ` url = "amqp://localhost:5672/influxdb" ## AMQP exchange exchange = "telegraf" + ## Auth method. PLAIN and EXTERNAL are supported + # auth_method = "PLAIN" ## Telegraf tag to use as a routing key ## ie, if this tag exists, it's value will be used as the routing key routing_tag = "host" @@ -103,11 +118,19 @@ func (q *AMQP) Connect() error { return err } - if tls != nil { - connection, err = amqp.DialTLS(q.URL, tls) - } else { - connection, err = amqp.Dial(q.URL) + // parse auth method + var sasl []amqp.Authentication // nil by default + + if strings.ToUpper(q.AuthMethod) == "EXTERNAL" { + sasl = []amqp.Authentication{&externalAuth{}} } + + amqpConf := amqp.Config{ + TLSClientConfig: tls, + SASL: sasl, // if nil, it will be PLAIN + } + + connection, err = amqp.DialConfig(q.URL, amqpConf) if err != nil { return err } @@ -200,6 +223,7 @@ func (q *AMQP) Write(metrics []telegraf.Metric) error { func init() { outputs.Add("amqp", func() telegraf.Output { return &AMQP{ + AuthMethod: DefaultAuthMethod, Database: DefaultDatabase, Precision: DefaultPrecision, RetentionPolicy: DefaultRetentionPolicy, From eee6b0059cecfac4d32922b80e8a691d7ada94d8 Mon Sep 17 00:00:00 2001 From: Dirk Pahl Date: Thu, 17 Mar 2016 16:53:55 +0100 Subject: [PATCH 160/287] Add FreeBSD tarball location to README --- README.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/README.md b/README.md index 97470aece..3ee6a9c09 100644 --- a/README.md +++ b/README.md @@ -79,6 +79,25 @@ To extract only the binary, run: tar -zxvf telegraf-0.11.1-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf ``` +### FreeBSD tarball: + +Latest: +* http://get.influxdb.org/telegraf/telegraf-0.11.1-1_freebsd_amd64.tar.gz + +##### tarball Instructions: + +To install the full directory structure with config file, run: + +``` +sudo tar -C / -zxvf ./telegraf-0.11.1-1_freebsd_amd64.tar.gz +``` + +To extract only the binary, run: + +``` +tar -zxvf telegraf-0.11.1-1_freebsd_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf +``` + ### Ansible Role: Ansible role: https://github.com/rossmcdonald/telegraf From b2d38cd31cf04a649a7fdbd72e61f8dea9761cfb Mon Sep 17 00:00:00 2001 From: HUANG Wei Date: Thu, 17 Mar 2016 16:10:36 +0800 Subject: [PATCH 161/287] Close the UDP connection in Stop() of statsd input plugin. If not, when doing reload, we may listen to the same port, we'll get error about listen to already used address. --- plugins/inputs/statsd/statsd.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index a16e78b5c..f237ac783 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -65,6 +65,8 @@ type Statsd struct { // bucket -> influx templates Templates []string + + listener *net.UDPConn } func NewStatsd() *Statsd { @@ -246,13 +248,14 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error { // udpListen starts listening for udp packets on the configured port. func (s *Statsd) udpListen() error { + var err error address, _ := net.ResolveUDPAddr("udp", s.ServiceAddress) - listener, err := net.ListenUDP("udp", address) + s.listener, err = net.ListenUDP("udp", address) if err != nil { log.Fatalf("ERROR: ListenUDP - %s", err) } - defer listener.Close() - log.Println("Statsd listener listening on: ", listener.LocalAddr().String()) + defer s.listener.Close() + log.Println("Statsd listener listening on: ", s.listener.LocalAddr().String()) for { select { @@ -260,9 +263,10 @@ func (s *Statsd) udpListen() error { return nil default: buf := make([]byte, s.UDPPacketSize) - n, _, err := listener.ReadFromUDP(buf) + n, _, err := s.listener.ReadFromUDP(buf) if err != nil { - log.Printf("ERROR: %s\n", err.Error()) + log.Printf("ERROR READ: %s\n", err.Error()) + continue } select { @@ -557,6 +561,7 @@ func (s *Statsd) Stop() { s.Lock() defer s.Unlock() log.Println("Stopping the statsd service") + s.listener.Close() close(s.done) close(s.in) } From 8e7284de5a7cce30e428cbdbe0293d575a23063c Mon Sep 17 00:00:00 2001 From: HUANG Wei Date: Thu, 17 Mar 2016 16:38:09 +0800 Subject: [PATCH 162/287] fixup! Close the UDP connection in Stop() of statsd input plugin. --- plugins/inputs/statsd/statsd.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index f237ac783..ba605baa4 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -66,7 +66,7 @@ type Statsd struct { // bucket -> influx templates Templates []string - listener *net.UDPConn + listener *net.UDPConn } func NewStatsd() *Statsd { @@ -248,7 +248,7 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error { // udpListen starts listening for udp packets on the configured port. func (s *Statsd) udpListen() error { - var err error + var err error address, _ := net.ResolveUDPAddr("udp", s.ServiceAddress) s.listener, err = net.ListenUDP("udp", address) if err != nil { @@ -266,7 +266,7 @@ func (s *Statsd) udpListen() error { n, _, err := s.listener.ReadFromUDP(buf) if err != nil { log.Printf("ERROR READ: %s\n", err.Error()) - continue + continue } select { @@ -561,7 +561,7 @@ func (s *Statsd) Stop() { s.Lock() defer s.Unlock() log.Println("Stopping the statsd service") - s.listener.Close() + s.listener.Close() close(s.done) close(s.in) } From b5a431624b3e918c97ec4aa033a00406c96f7254 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 17 Mar 2016 10:16:12 -0600 Subject: [PATCH 163/287] Close UDP listener in udp_listener plugin also adding waitgroups to udp_listener and statsd plugins to verify that all goroutines have been cleaned up before Stop() exits. closes #869 --- plugins/inputs/statsd/statsd.go | 10 ++++++--- plugins/inputs/udp_listener/udp_listener.go | 21 ++++++++++++++----- .../inputs/udp_listener/udp_listener_test.go | 4 ++++ 3 files changed, 27 insertions(+), 8 deletions(-) diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index ba605baa4..943188353 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -50,6 +50,7 @@ type Statsd struct { UDPPacketSize int `toml:"udp_packet_size"` sync.Mutex + wg sync.WaitGroup // Channel for all incoming statsd packets in chan []byte @@ -238,6 +239,7 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error { s.sets = make(map[string]cachedset) s.timings = make(map[string]cachedtimings) + s.wg.Add(2) // Start the UDP listener go s.udpListen() // Start the line parser @@ -248,13 +250,13 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error { // udpListen starts listening for udp packets on the configured port. func (s *Statsd) udpListen() error { + defer s.wg.Done() var err error address, _ := net.ResolveUDPAddr("udp", s.ServiceAddress) s.listener, err = net.ListenUDP("udp", address) if err != nil { log.Fatalf("ERROR: ListenUDP - %s", err) } - defer s.listener.Close() log.Println("Statsd listener listening on: ", s.listener.LocalAddr().String()) for { @@ -264,7 +266,7 @@ func (s *Statsd) udpListen() error { default: buf := make([]byte, s.UDPPacketSize) n, _, err := s.listener.ReadFromUDP(buf) - if err != nil { + if err != nil && !strings.Contains(err.Error(), "closed network") { log.Printf("ERROR READ: %s\n", err.Error()) continue } @@ -282,6 +284,7 @@ func (s *Statsd) udpListen() error { // packet into statsd strings and then calls parseStatsdLine, which parses a // single statsd metric into a struct. func (s *Statsd) parser() error { + defer s.wg.Done() for { select { case <-s.done: @@ -561,8 +564,9 @@ func (s *Statsd) Stop() { s.Lock() defer s.Unlock() log.Println("Stopping the statsd service") - s.listener.Close() close(s.done) + s.listener.Close() + s.wg.Wait() close(s.in) } diff --git a/plugins/inputs/udp_listener/udp_listener.go b/plugins/inputs/udp_listener/udp_listener.go index 7aac3160c..4b362c478 100644 --- a/plugins/inputs/udp_listener/udp_listener.go +++ b/plugins/inputs/udp_listener/udp_listener.go @@ -3,6 +3,7 @@ package udp_listener import ( "log" "net" + "strings" "sync" "github.com/influxdata/telegraf" @@ -14,7 +15,9 @@ type UdpListener struct { ServiceAddress string UDPPacketSize int `toml:"udp_packet_size"` AllowedPendingMessages int + sync.Mutex + wg sync.WaitGroup in chan []byte done chan struct{} @@ -23,6 +26,8 @@ type UdpListener struct { // Keep the accumulator in this struct acc telegraf.Accumulator + + listener *net.UDPConn } const UDP_PACKET_SIZE int = 1500 @@ -76,6 +81,7 @@ func (u *UdpListener) Start(acc telegraf.Accumulator) error { u.in = make(chan []byte, u.AllowedPendingMessages) u.done = make(chan struct{}) + u.wg.Add(2) go u.udpListen() go u.udpParser() @@ -87,18 +93,21 @@ func (u *UdpListener) Stop() { u.Lock() defer u.Unlock() close(u.done) + u.listener.Close() + u.wg.Wait() close(u.in) log.Println("Stopped UDP listener service on ", u.ServiceAddress) } func (u *UdpListener) udpListen() error { + defer u.wg.Done() + var err error address, _ := net.ResolveUDPAddr("udp", u.ServiceAddress) - listener, err := net.ListenUDP("udp", address) + u.listener, err = net.ListenUDP("udp", address) if err != nil { log.Fatalf("ERROR: ListenUDP - %s", err) } - defer listener.Close() - log.Println("UDP server listening on: ", listener.LocalAddr().String()) + log.Println("UDP server listening on: ", u.listener.LocalAddr().String()) for { select { @@ -106,9 +115,10 @@ func (u *UdpListener) udpListen() error { return nil default: buf := make([]byte, u.UDPPacketSize) - n, _, err := listener.ReadFromUDP(buf) - if err != nil { + n, _, err := u.listener.ReadFromUDP(buf) + if err != nil && !strings.Contains(err.Error(), "closed network") { log.Printf("ERROR: %s\n", err.Error()) + continue } select { @@ -121,6 +131,7 @@ func (u *UdpListener) udpListen() error { } func (u *UdpListener) udpParser() error { + defer u.wg.Done() for { select { case <-u.done: diff --git a/plugins/inputs/udp_listener/udp_listener_test.go b/plugins/inputs/udp_listener/udp_listener_test.go index 2f0f6fae5..bdbab318b 100644 --- a/plugins/inputs/udp_listener/udp_listener_test.go +++ b/plugins/inputs/udp_listener/udp_listener_test.go @@ -32,6 +32,7 @@ func TestRunParser(t *testing.T) { defer close(listener.done) listener.parser, _ = parsers.NewInfluxParser() + listener.wg.Add(1) go listener.udpParser() in <- testmsg @@ -58,6 +59,7 @@ func TestRunParserInvalidMsg(t *testing.T) { defer close(listener.done) listener.parser, _ = parsers.NewInfluxParser() + listener.wg.Add(1) go listener.udpParser() in <- testmsg @@ -78,6 +80,7 @@ func TestRunParserGraphiteMsg(t *testing.T) { defer close(listener.done) listener.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) + listener.wg.Add(1) go listener.udpParser() in <- testmsg @@ -98,6 +101,7 @@ func TestRunParserJSONMsg(t *testing.T) { defer close(listener.done) listener.parser, _ = parsers.NewJSONParser("udp_json_test", []string{}, nil) + listener.wg.Add(1) go listener.udpParser() in <- testmsg From d6b5f3efe63cd41df23cebe245212bf7877280e9 Mon Sep 17 00:00:00 2001 From: Jonathan Chauncey Date: Thu, 17 Mar 2016 15:17:48 -0400 Subject: [PATCH 164/287] fix(prometheus): Add support for bearer token to prometheus input plugin closes #864 merges #880 --- CHANGELOG.md | 1 + plugins/inputs/prometheus/prometheus.go | 51 +++++++++++++++++++++---- 2 files changed, 45 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9308f9390..6672034ab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ### Features - [#863](https://github.com/influxdata/telegraf/pull/863): AMQP output: allow external auth. Thanks @ekini! - [#707](https://github.com/influxdata/telegraf/pull/707): Improved prometheus plugin. Thanks @titilambert! +- [#880](https://github.com/influxdata/telegraf/pull/880): Add the ability to specify the bearer token to the prometheus plugin. Thanks @jchauncey! ### Bugfixes diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 05149f332..0281cc24a 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -1,11 +1,13 @@ package prometheus import ( + "crypto/tls" "errors" "fmt" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" "io/ioutil" + "net" "net/http" "sync" "time" @@ -13,18 +15,28 @@ import ( type Prometheus struct { Urls []string + + // Use SSL but skip chain & host verification + InsecureSkipVerify bool + // Bearer Token authorization file path + BearerToken string `toml:"bearer_token"` } var sampleConfig = ` ## An array of urls to scrape metrics from. urls = ["http://localhost:9100/metrics"] + + ### Use SSL but skip chain & host verification + # insecure_skip_verify = false + ### Use bearer token for authorization + # bearer_token = /path/to/bearer/token ` -func (r *Prometheus) SampleConfig() string { +func (p *Prometheus) SampleConfig() string { return sampleConfig } -func (r *Prometheus) Description() string { +func (p *Prometheus) Description() string { return "Read metrics from one or many prometheus clients" } @@ -32,16 +44,16 @@ var ErrProtocolError = errors.New("prometheus protocol error") // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). -func (g *Prometheus) Gather(acc telegraf.Accumulator) error { +func (p *Prometheus) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup var outerr error - for _, serv := range g.Urls { + for _, serv := range p.Urls { wg.Add(1) go func(serv string) { defer wg.Done() - outerr = g.gatherURL(serv, acc) + outerr = p.gatherURL(serv, acc) }(serv) } @@ -59,9 +71,34 @@ var client = &http.Client{ Timeout: time.Duration(4 * time.Second), } -func (g *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error { +func (p *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error { collectDate := time.Now() - resp, err := client.Get(url) + var req, err = http.NewRequest("GET", url, nil) + req.Header = make(http.Header) + var token []byte + var resp *http.Response + + var rt http.RoundTripper = &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 10 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: p.InsecureSkipVerify, + }, + ResponseHeaderTimeout: time.Duration(3 * time.Second), + } + + if p.BearerToken != "" { + token, err = ioutil.ReadFile(p.BearerToken) + if err != nil { + return err + } + req.Header.Set("Authorization", "Bearer "+string(token)) + } + + resp, err = rt.RoundTrip(req) if err != nil { return fmt.Errorf("error making HTTP request to %s: %s", url, err) } From d66d66e74b7817ffd0ff85d254087c0fef16dc4f Mon Sep 17 00:00:00 2001 From: Marcelo Salazar Date: Thu, 17 Mar 2016 14:50:39 -0300 Subject: [PATCH 165/287] added json serializer closes #878 --- CHANGELOG.md | 2 + docs/DATA_FORMATS_OUTPUT.md | 38 +++++++++++- plugins/serializers/json/json.go | 27 +++++++++ plugins/serializers/json/json_test.go | 87 +++++++++++++++++++++++++++ plugins/serializers/registry.go | 7 +++ 5 files changed, 159 insertions(+), 2 deletions(-) create mode 100644 plugins/serializers/json/json.go create mode 100644 plugins/serializers/json/json_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 6672034ab..eb343ccdf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,8 @@ - [#863](https://github.com/influxdata/telegraf/pull/863): AMQP output: allow external auth. Thanks @ekini! - [#707](https://github.com/influxdata/telegraf/pull/707): Improved prometheus plugin. Thanks @titilambert! - [#880](https://github.com/influxdata/telegraf/pull/880): Add the ability to specify the bearer token to the prometheus plugin. Thanks @jchauncey! +- [#878](https://github.com/influxdata/telegraf/pull/878): Added json serializer. Thanks @ch3lo! +- [#880](https://github.com/influxdata/telegraf/pull/880): Add the ability to specify the bearer token to the prometheus plugin. Thanks @jchauncey! ### Bugfixes diff --git a/docs/DATA_FORMATS_OUTPUT.md b/docs/DATA_FORMATS_OUTPUT.md index 524ec6d66..a75816a71 100644 --- a/docs/DATA_FORMATS_OUTPUT.md +++ b/docs/DATA_FORMATS_OUTPUT.md @@ -53,7 +53,7 @@ metrics are serialized directly into InfluxDB line-protocol. ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/metrics.out"] - ## Data format to output. This can be "influx" or "graphite" + ## Data format to output. This can be "influx", "json" or "graphite" ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md @@ -87,7 +87,7 @@ tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690 ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/metrics.out"] - ## Data format to output. This can be "influx" or "graphite" + ## Data format to output. This can be "influx", "json" or "graphite" ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md @@ -95,3 +95,37 @@ tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690 prefix = "telegraf" ``` + +## Json: + +The Json data format serialized Telegraf metrics in json format. The format is: + +```json +{ + "fields":{ + "field_1":30, + "field_2":4, + "field_N":59, + "n_images":660 + }, + "name":"docker", + "tags":{ + "host":"raynor" + }, + "timestamp":1458229140 +} +``` + +#### Json Configuration: + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. This can be "influx", "json" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "json" +``` diff --git a/plugins/serializers/json/json.go b/plugins/serializers/json/json.go new file mode 100644 index 000000000..e27aa400f --- /dev/null +++ b/plugins/serializers/json/json.go @@ -0,0 +1,27 @@ +package json + +import ( + ejson "encoding/json" + + "github.com/influxdata/telegraf" +) + +type JsonSerializer struct { +} + +func (s *JsonSerializer) Serialize(metric telegraf.Metric) ([]string, error) { + out := []string{} + + m := make(map[string]interface{}) + m["tags"] = metric.Tags() + m["fields"] = metric.Fields() + m["name"] = metric.Name() + m["timestamp"] = metric.UnixNano() / 1000000000 + serialized, err := ejson.Marshal(m) + if err != nil { + return []string{}, err + } + out = append(out, string(serialized)) + + return out, nil +} diff --git a/plugins/serializers/json/json_test.go b/plugins/serializers/json/json_test.go new file mode 100644 index 000000000..127bf237a --- /dev/null +++ b/plugins/serializers/json/json_test.go @@ -0,0 +1,87 @@ +package json + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/influxdata/telegraf" +) + +func TestSerializeMetricFloat(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + m, err := telegraf.NewMetric("cpu", tags, fields, now) + assert.NoError(t, err) + + s := JsonSerializer{} + mS, err := s.Serialize(m) + assert.NoError(t, err) + expS := []string{fmt.Sprintf("{\"fields\":{\"usage_idle\":91.5},\"name\":\"cpu\",\"tags\":{\"cpu\":\"cpu0\"},\"timestamp\":%d}", now.Unix())} + assert.Equal(t, expS, mS) +} + +func TestSerializeMetricInt(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": int64(90), + } + m, err := telegraf.NewMetric("cpu", tags, fields, now) + assert.NoError(t, err) + + s := JsonSerializer{} + mS, err := s.Serialize(m) + assert.NoError(t, err) + + expS := []string{fmt.Sprintf("{\"fields\":{\"usage_idle\":90},\"name\":\"cpu\",\"tags\":{\"cpu\":\"cpu0\"},\"timestamp\":%d}", now.Unix())} + assert.Equal(t, expS, mS) +} + +func TestSerializeMetricString(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": "foobar", + } + m, err := telegraf.NewMetric("cpu", tags, fields, now) + assert.NoError(t, err) + + s := JsonSerializer{} + mS, err := s.Serialize(m) + assert.NoError(t, err) + + expS := []string{fmt.Sprintf("{\"fields\":{\"usage_idle\":\"foobar\"},\"name\":\"cpu\",\"tags\":{\"cpu\":\"cpu0\"},\"timestamp\":%d}", now.Unix())} + assert.Equal(t, expS, mS) +} + +func TestSerializeMultiFields(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": int64(90), + "usage_total": 8559615, + } + m, err := telegraf.NewMetric("cpu", tags, fields, now) + assert.NoError(t, err) + + s := JsonSerializer{} + mS, err := s.Serialize(m) + assert.NoError(t, err) + + expS := []string{fmt.Sprintf("{\"fields\":{\"usage_idle\":90,\"usage_total\":8559615},\"name\":\"cpu\",\"tags\":{\"cpu\":\"cpu0\"},\"timestamp\":%d}", now.Unix())} + assert.Equal(t, expS, mS) +} diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index 2fedfbeaf..ebf79bc59 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -5,6 +5,7 @@ import ( "github.com/influxdata/telegraf/plugins/serializers/graphite" "github.com/influxdata/telegraf/plugins/serializers/influx" + "github.com/influxdata/telegraf/plugins/serializers/json" ) // SerializerOutput is an interface for output plugins that are able to @@ -40,10 +41,16 @@ func NewSerializer(config *Config) (Serializer, error) { serializer, err = NewInfluxSerializer() case "graphite": serializer, err = NewGraphiteSerializer(config.Prefix) + case "json": + serializer, err = NewJsonSerializer() } return serializer, err } +func NewJsonSerializer() (Serializer, error) { + return &json.JsonSerializer{}, nil +} + func NewInfluxSerializer() (Serializer, error) { return &influx.InfluxSerializer{}, nil } From ba06533c3ecf0dee88105cab452e508698cd88d6 Mon Sep 17 00:00:00 2001 From: Balakrishnan Date: Thu, 17 Mar 2016 18:01:19 -0400 Subject: [PATCH 166/287] Fixed SQL Server Plugin issues #881 --- plugins/inputs/sqlserver/sqlserver.go | 97 ++++++++++++++++++++------- 1 file changed, 71 insertions(+), 26 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 3b29a32c1..58d61705f 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -283,30 +283,75 @@ EXEC sp_executesql @DynamicPivotQuery; const sqlMemoryClerk string = `SET NOCOUNT ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; -DECLARE @w TABLE (ClerkCategory nvarchar(64) NOT NULL, UsedPercent decimal(9,2), UsedBytes bigint) -INSERT @w (ClerkCategory, UsedPercent, UsedBytes) -SELECT ClerkCategory -, UsedPercent = SUM(UsedPercent) -, UsedBytes = SUM(UsedBytes) -FROM -( -SELECT ClerkCategory = CASE MC.[type] - WHEN 'MEMORYCLERK_SQLBUFFERPOOL' THEN 'Buffer pool' - WHEN 'CACHESTORE_SQLCP' THEN 'Cache (sql plans)' - WHEN 'CACHESTORE_OBJCP' THEN 'Cache (objects)' - ELSE 'Other' END -, SUM(pages_kb * 1024) AS UsedBytes -, Cast(100 * Sum(pages_kb)*1.0/(Select Sum(pages_kb) From sys.dm_os_memory_clerks) as Decimal(7, 4)) UsedPercent -FROM sys.dm_os_memory_clerks MC -WHERE pages_kb > 0 -GROUP BY CASE MC.[type] - WHEN 'MEMORYCLERK_SQLBUFFERPOOL' THEN 'Buffer pool' - WHEN 'CACHESTORE_SQLCP' THEN 'Cache (sql plans)' - WHEN 'CACHESTORE_OBJCP' THEN 'Cache (objects)' - ELSE 'Other' END -) as T -GROUP BY ClerkCategory +DECLARE @sqlVers numeric(4,2) +SELECT @sqlVers = LEFT(CAST(SERVERPROPERTY('productversion') as varchar), 4) +IF OBJECT_ID('tempdb..#clerk') IS NOT NULL + DROP TABLE #clerk; + +CREATE TABLE #clerk ( + ClerkCategory nvarchar(64) NOT NULL, + UsedPercent decimal(9,2), + UsedBytes bigint +); + +DECLARE @DynamicClerkQuery AS NVARCHAR(MAX) + +IF @sqlVers < 11 +BEGIN + SET @DynamicClerkQuery = N' + INSERT #clerk (ClerkCategory, UsedPercent, UsedBytes) + SELECT ClerkCategory + , UsedPercent = SUM(UsedPercent) + , UsedBytes = SUM(UsedBytes) + FROM + ( + SELECT ClerkCategory = CASE MC.[type] + WHEN ''MEMORYCLERK_SQLBUFFERPOOL'' THEN ''Buffer pool'' + WHEN ''CACHESTORE_SQLCP'' THEN ''Cache (sql plans)'' + WHEN ''CACHESTORE_OBJCP'' THEN ''Cache (objects)'' + ELSE ''Other'' END + , SUM((single_pages_kb + multi_pages_kb) * 1024) AS UsedBytes + , Cast(100 * Sum((single_pages_kb + multi_pages_kb))*1.0/(Select Sum((single_pages_kb + multi_pages_kb)) From sys.dm_os_memory_clerks) as Decimal(7, 4)) UsedPercent + FROM sys.dm_os_memory_clerks MC + WHERE (single_pages_kb + multi_pages_kb) > 0 + GROUP BY CASE MC.[type] + WHEN ''MEMORYCLERK_SQLBUFFERPOOL'' THEN ''Buffer pool'' + WHEN ''CACHESTORE_SQLCP'' THEN ''Cache (sql plans)'' + WHEN ''CACHESTORE_OBJCP'' THEN ''Cache (objects)'' + ELSE ''Other'' END + ) as T + GROUP BY ClerkCategory; + ' +END +ELSE +BEGIN + SET @DynamicClerkQuery = N' + INSERT #clerk (ClerkCategory, UsedPercent, UsedBytes) + SELECT ClerkCategory + , UsedPercent = SUM(UsedPercent) + , UsedBytes = SUM(UsedBytes) + FROM + ( + SELECT ClerkCategory = CASE MC.[type] + WHEN ''MEMORYCLERK_SQLBUFFERPOOL'' THEN ''Buffer pool'' + WHEN ''CACHESTORE_SQLCP'' THEN ''Cache (sql plans)'' + WHEN ''CACHESTORE_OBJCP'' THEN ''Cache (objects)'' + ELSE ''Other'' END + , SUM(pages_kb * 1024) AS UsedBytes + , Cast(100 * Sum(pages_kb)*1.0/(Select Sum(pages_kb) From sys.dm_os_memory_clerks) as Decimal(7, 4)) UsedPercent + FROM sys.dm_os_memory_clerks MC + WHERE pages_kb > 0 + GROUP BY CASE MC.[type] + WHEN ''MEMORYCLERK_SQLBUFFERPOOL'' THEN ''Buffer pool'' + WHEN ''CACHESTORE_SQLCP'' THEN ''Cache (sql plans)'' + WHEN ''CACHESTORE_OBJCP'' THEN ''Cache (objects)'' + ELSE ''Other'' END + ) as T + GROUP BY ClerkCategory; + ' +END +EXEC sp_executesql @DynamicClerkQuery; SELECT -- measurement measurement @@ -325,7 +370,7 @@ SELECT measurement = 'Memory breakdown (%)' , [Cache (objects)] = ISNULL(ROUND([Cache (objects)], 1), 0) , [Cache (sql plans)] = ISNULL(ROUND([Cache (sql plans)], 1), 0) , [Other] = ISNULL(ROUND([Other], 1), 0) -FROM (SELECT ClerkCategory, UsedPercent FROM @w) as G1 +FROM (SELECT ClerkCategory, UsedPercent FROM #clerk) as G1 PIVOT ( SUM(UsedPercent) @@ -339,7 +384,7 @@ SELECT measurement = 'Memory breakdown (bytes)' , [Cache (objects)] = ISNULL(ROUND([Cache (objects)], 1), 0) , [Cache (sql plans)] = ISNULL(ROUND([Cache (sql plans)], 1), 0) , [Other] = ISNULL(ROUND([Other], 1), 0) -FROM (SELECT ClerkCategory, UsedBytes FROM @w) as G2 +FROM (SELECT ClerkCategory, UsedBytes FROM #clerk) as G2 PIVOT ( SUM(UsedBytes) @@ -698,7 +743,7 @@ IF OBJECT_ID('tempdb..#Databases') IS NOT NULL CREATE TABLE #Databases ( Measurement nvarchar(64) NOT NULL, - DatabaseName nvarchar(64) NOT NULL, + DatabaseName nvarchar(128) NOT NULL, Value tinyint NOT NULL Primary Key(DatabaseName, Measurement) ); From 7aa55371b531fcad0f3c52589e32d8068dab60ab Mon Sep 17 00:00:00 2001 From: Lukasz Jagiello Date: Thu, 17 Mar 2016 15:54:22 -0700 Subject: [PATCH 167/287] Duplicate line --- CHANGELOG.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index eb343ccdf..d4b481868 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,7 +3,6 @@ ### Features - [#863](https://github.com/influxdata/telegraf/pull/863): AMQP output: allow external auth. Thanks @ekini! - [#707](https://github.com/influxdata/telegraf/pull/707): Improved prometheus plugin. Thanks @titilambert! -- [#880](https://github.com/influxdata/telegraf/pull/880): Add the ability to specify the bearer token to the prometheus plugin. Thanks @jchauncey! - [#878](https://github.com/influxdata/telegraf/pull/878): Added json serializer. Thanks @ch3lo! - [#880](https://github.com/influxdata/telegraf/pull/880): Add the ability to specify the bearer token to the prometheus plugin. Thanks @jchauncey! From c356e565221a008ee022491ee1f21405c82fef09 Mon Sep 17 00:00:00 2001 From: Balakrishnan Date: Thu, 17 Mar 2016 19:56:39 -0400 Subject: [PATCH 168/287] Updated Change log #881 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index eb343ccdf..38271811c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ - [#880](https://github.com/influxdata/telegraf/pull/880): Add the ability to specify the bearer token to the prometheus plugin. Thanks @jchauncey! - [#878](https://github.com/influxdata/telegraf/pull/878): Added json serializer. Thanks @ch3lo! - [#880](https://github.com/influxdata/telegraf/pull/880): Add the ability to specify the bearer token to the prometheus plugin. Thanks @jchauncey! +- [#882](https://github.com/influxdata/telegraf/pull/882): Fixed SQL Server Plugin issues ### Bugfixes From 63410491b724c4f2404153a640a53e2bfbb44178 Mon Sep 17 00:00:00 2001 From: HUANG Wei Date: Fri, 18 Mar 2016 10:06:44 +0800 Subject: [PATCH 169/287] Fix typo, should be input instead of output. --- cmd/telegraf/telegraf.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 436d1a38e..be591829b 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -32,7 +32,7 @@ var fPidfile = flag.String("pidfile", "", "file to write our pid to") var fInputFilters = flag.String("input-filter", "", "filter the inputs to enable, separator is :") var fInputList = flag.Bool("input-list", false, - "print available output plugins.") + "print available input plugins.") var fOutputFilters = flag.String("output-filter", "", "filter the outputs to enable, separator is :") var fOutputList = flag.Bool("output-list", false, From 5c1b635229435d2f1c032b7342ca9e8d877707fe Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 17 Mar 2016 18:01:01 -0600 Subject: [PATCH 170/287] Value parser, for parsing a single value into a metric closes #849 --- docs/DATA_FORMATS_INPUT.md | 39 +++++ internal/config/config.go | 9 + plugins/parsers/registry.go | 23 ++- plugins/parsers/value/parser.go | 68 ++++++++ plugins/parsers/value/parser_test.go | 238 +++++++++++++++++++++++++++ 5 files changed, 375 insertions(+), 2 deletions(-) create mode 100644 plugins/parsers/value/parser.go create mode 100644 plugins/parsers/value/parser_test.go diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index 79528a962..12c4d4cde 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -1,5 +1,12 @@ # Telegraf Input Data Formats +Telegraf is able to parse the following input data formats into metrics: + +1. InfluxDB Line Protocol +1. JSON +1. Graphite +1. Value, ie 45 or "booyah" + Telegraf metrics, like InfluxDB [points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/), are a combination of four basic parts: @@ -134,6 +141,38 @@ Your Telegraf metrics would get tagged with "my_tag_1" exec_mycollector,my_tag_1=foo a=5,b_c=6 ``` +## Value: + +The "value" data format translates single values into Telegraf metrics. This +is done by assigning a measurement name (which can be overridden using the +`name_override` config option), and setting a single field ("value") as the +parsed metric. + +#### Value Configuration: + +You can tell Telegraf what type of metric to collect by using the `data_type` +configuration option. + +It is also recommended that you set `name_override` to a measurement name that +makes sense for your metric, otherwise it will just be set to the name of the +plugin. + +```toml +[[inputs.exec]] + ## Commands array + commands = ["cat /proc/sys/kernel/random/entropy_avail"] + + ## override the default metric name of "exec" + name_override = "entropy_available" + + ## Data format to consume. This can be "json", "value", influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "value" + data_type = "integer" +``` + ## Graphite: The Graphite data format translates graphite _dot_ buckets directly into diff --git a/internal/config/config.go b/internal/config/config.go index f64e0a56a..6990b2db7 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -701,12 +701,21 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { } } + if node, ok := tbl.Fields["data_type"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.DataType = str.Value + } + } + } + c.MetricName = name delete(tbl.Fields, "data_format") delete(tbl.Fields, "separator") delete(tbl.Fields, "templates") delete(tbl.Fields, "tag_keys") + delete(tbl.Fields, "data_type") return parsers.NewParser(c) } diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index 982b6bb80..b86b61c18 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -8,6 +8,7 @@ import ( "github.com/influxdata/telegraf/plugins/parsers/graphite" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/parsers/json" + "github.com/influxdata/telegraf/plugins/parsers/value" ) // ParserInput is an interface for input plugins that are able to parse @@ -38,7 +39,7 @@ type Parser interface { // Config is a struct that covers the data types needed for all parser types, // and can be used to instantiate _any_ of the parsers. type Config struct { - // Dataformat can be one of: json, influx, graphite + // Dataformat can be one of: json, influx, graphite, value DataFormat string // Separator only applied to Graphite data. @@ -48,9 +49,12 @@ type Config struct { // TagKeys only apply to JSON data TagKeys []string - // MetricName only applies to JSON data. This will be the name of the measurement. + // MetricName applies to JSON & value. This will be the name of the measurement. MetricName string + // DataType only applies to value, this will be the type to parse value to + DataType string + // DefaultTags are the default tags that will be added to all parsed metrics. DefaultTags map[string]string } @@ -63,6 +67,9 @@ func NewParser(config *Config) (Parser, error) { case "json": parser, err = NewJSONParser(config.MetricName, config.TagKeys, config.DefaultTags) + case "value": + parser, err = NewValueParser(config.MetricName, + config.DataType, config.DefaultTags) case "influx": parser, err = NewInfluxParser() case "graphite": @@ -98,3 +105,15 @@ func NewGraphiteParser( ) (Parser, error) { return graphite.NewGraphiteParser(separator, templates, defaultTags) } + +func NewValueParser( + metricName string, + dataType string, + defaultTags map[string]string, +) (Parser, error) { + return &value.ValueParser{ + MetricName: metricName, + DataType: dataType, + DefaultTags: defaultTags, + }, nil +} diff --git a/plugins/parsers/value/parser.go b/plugins/parsers/value/parser.go new file mode 100644 index 000000000..00673eced --- /dev/null +++ b/plugins/parsers/value/parser.go @@ -0,0 +1,68 @@ +package value + +import ( + "bytes" + "fmt" + "strconv" + "time" + + "github.com/influxdata/telegraf" +) + +type ValueParser struct { + MetricName string + DataType string + DefaultTags map[string]string +} + +func (v *ValueParser) Parse(buf []byte) ([]telegraf.Metric, error) { + // separate out any fields in the buffer, ignore anything but the last. + values := bytes.Fields(buf) + if len(values) < 1 { + return []telegraf.Metric{}, nil + } + valueStr := string(values[len(values)-1]) + + var value interface{} + var err error + switch v.DataType { + case "", "int", "integer": + value, err = strconv.Atoi(valueStr) + case "float", "long": + value, err = strconv.ParseFloat(valueStr, 64) + case "str", "string": + value = valueStr + case "bool", "boolean": + value, err = strconv.ParseBool(valueStr) + } + if err != nil { + return nil, err + } + + fields := map[string]interface{}{"value": value} + metric, err := telegraf.NewMetric(v.MetricName, v.DefaultTags, + fields, time.Now().UTC()) + if err != nil { + return nil, err + } + + return []telegraf.Metric{metric}, nil +} + +func (v *ValueParser) ParseLine(line string) (telegraf.Metric, error) { + metrics, err := v.Parse([]byte(line)) + + if err != nil { + return nil, err + } + + if len(metrics) < 1 { + return nil, fmt.Errorf("Can not parse the line: %s, for data format: value", line) + } + + return metrics[0], nil +} + +func (v *ValueParser) SetDefaultTags(tags map[string]string) { + v.DefaultTags = tags +} diff --git a/plugins/parsers/value/parser_test.go b/plugins/parsers/value/parser_test.go new file mode 100644 index 000000000..f60787491 --- /dev/null +++ b/plugins/parsers/value/parser_test.go @@ -0,0 +1,238 @@ +package value + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseValidValues(t *testing.T) { + parser := ValueParser{ + MetricName: "value_test", + DataType: "integer", + } + metrics, err := parser.Parse([]byte("55")) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + assert.Equal(t, "value_test", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "value": int64(55), + }, metrics[0].Fields()) + assert.Equal(t, map[string]string{}, metrics[0].Tags()) + + parser = ValueParser{ + MetricName: "value_test", + DataType: "float", + } + metrics, err = parser.Parse([]byte("64")) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + assert.Equal(t, "value_test", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "value": float64(64), + }, metrics[0].Fields()) + assert.Equal(t, map[string]string{}, metrics[0].Tags()) + + parser = ValueParser{ + MetricName: "value_test", + DataType: "string", + } + metrics, err = parser.Parse([]byte("foobar")) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + assert.Equal(t, "value_test", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "value": "foobar", + }, metrics[0].Fields()) + assert.Equal(t, map[string]string{}, metrics[0].Tags()) + + parser = ValueParser{ + MetricName: "value_test", + DataType: "boolean", + } + metrics, err = parser.Parse([]byte("true")) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + assert.Equal(t, "value_test", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "value": true, + }, metrics[0].Fields()) + assert.Equal(t, map[string]string{}, metrics[0].Tags()) +} + +func TestParseMultipleValues(t *testing.T) { + parser := ValueParser{ + MetricName: "value_test", + DataType: "integer", + } + metrics, err := parser.Parse([]byte(`55 +45 +223 +12 +999 +`)) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + assert.Equal(t, "value_test", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "value": int64(999), + }, metrics[0].Fields()) + assert.Equal(t, map[string]string{}, metrics[0].Tags()) +} + +func TestParseLineValidValues(t *testing.T) { + parser := ValueParser{ + MetricName: "value_test", + DataType: "integer", + } + metric, err := parser.ParseLine("55") + assert.NoError(t, err) + assert.Equal(t, "value_test", metric.Name()) + assert.Equal(t, map[string]interface{}{ + "value": int64(55), + }, metric.Fields()) + assert.Equal(t, map[string]string{}, metric.Tags()) + + parser = ValueParser{ + MetricName: "value_test", + DataType: "float", + } + metric, err = parser.ParseLine("64") + assert.NoError(t, err) + assert.Equal(t, "value_test", metric.Name()) + assert.Equal(t, map[string]interface{}{ + "value": float64(64), + }, metric.Fields()) + assert.Equal(t, map[string]string{}, metric.Tags()) + + parser = ValueParser{ + MetricName: "value_test", + DataType: "string", + } + metric, err = parser.ParseLine("foobar") + assert.NoError(t, err) + assert.Equal(t, "value_test", metric.Name()) + assert.Equal(t, map[string]interface{}{ + "value": "foobar", + }, metric.Fields()) + assert.Equal(t, map[string]string{}, metric.Tags()) + + parser = ValueParser{ + MetricName: "value_test", + DataType: "boolean", + } + metric, err = parser.ParseLine("true") + assert.NoError(t, err) + assert.Equal(t, "value_test", metric.Name()) + assert.Equal(t, map[string]interface{}{ + "value": true, + }, metric.Fields()) + assert.Equal(t, map[string]string{}, metric.Tags()) +} + +func TestParseInvalidValues(t *testing.T) { + parser := ValueParser{ + MetricName: "value_test", + DataType: "integer", + } + metrics, err := parser.Parse([]byte("55.0")) + assert.Error(t, err) + assert.Len(t, metrics, 0) + + parser = ValueParser{ + MetricName: "value_test", + DataType: "float", + } + metrics, err = parser.Parse([]byte("foobar")) + assert.Error(t, err) + assert.Len(t, metrics, 0) + + parser = ValueParser{ + MetricName: "value_test", + DataType: "boolean", + } + metrics, err = parser.Parse([]byte("213")) + assert.Error(t, err) + assert.Len(t, metrics, 0) +} + +func TestParseLineInvalidValues(t *testing.T) { + parser := ValueParser{ + MetricName: "value_test", + DataType: "integer", + } + _, err := parser.ParseLine("55.0") + assert.Error(t, err) + + parser = ValueParser{ + MetricName: "value_test", + DataType: "float", + } + _, err = parser.ParseLine("foobar") + assert.Error(t, err) + + parser = ValueParser{ + MetricName: "value_test", + DataType: "boolean", + } + _, err = parser.ParseLine("213") + assert.Error(t, err) +} + +func TestParseValidValuesDefaultTags(t *testing.T) { + parser := ValueParser{ + MetricName: "value_test", + DataType: "integer", + } + parser.SetDefaultTags(map[string]string{"test": "tag"}) + metrics, err := parser.Parse([]byte("55")) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + assert.Equal(t, "value_test", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "value": int64(55), + }, metrics[0].Fields()) + assert.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags()) + + parser = ValueParser{ + MetricName: "value_test", + DataType: "float", + } + parser.SetDefaultTags(map[string]string{"test": "tag"}) + metrics, err = parser.Parse([]byte("64")) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + assert.Equal(t, "value_test", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "value": float64(64), + }, metrics[0].Fields()) + assert.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags()) + + parser = ValueParser{ + MetricName: "value_test", + DataType: "string", + } + parser.SetDefaultTags(map[string]string{"test": "tag"}) + metrics, err = parser.Parse([]byte("foobar")) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + assert.Equal(t, "value_test", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "value": "foobar", + }, metrics[0].Fields()) + assert.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags()) + + parser = ValueParser{ + MetricName: "value_test", + DataType: "boolean", + } + parser.SetDefaultTags(map[string]string{"test": "tag"}) + metrics, err = parser.Parse([]byte("true")) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + assert.Equal(t, "value_test", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "value": true, + }, metrics[0].Fields()) + assert.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags()) +} From fe7b884cc9cf97febce4080cce1c30341fecef78 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 17 Mar 2016 20:40:22 -0600 Subject: [PATCH 171/287] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 684e1343f..3d2187913 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ - [#878](https://github.com/influxdata/telegraf/pull/878): Added json serializer. Thanks @ch3lo! - [#880](https://github.com/influxdata/telegraf/pull/880): Add the ability to specify the bearer token to the prometheus plugin. Thanks @jchauncey! - [#882](https://github.com/influxdata/telegraf/pull/882): Fixed SQL Server Plugin issues +- [#849](https://github.com/influxdata/telegraf/issues/849): Adding ability to parse single values as an input data type. ### Bugfixes From 5b0c3951f64a9d6b1d298c9bc01c9df73635c28e Mon Sep 17 00:00:00 2001 From: JP Date: Fri, 18 Mar 2016 11:25:51 -0500 Subject: [PATCH 172/287] replace @ character with - for librato --- plugins/outputs/librato/librato.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go index ed15350fc..406f45361 100644 --- a/plugins/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -159,7 +159,10 @@ func (l *Librato) buildGaugeName(m telegraf.Metric, fieldName string) string { serializedMetric := graphiteSerializer.SerializeBucketName(m, fieldName) // Deal with slash characters: - return strings.Replace(serializedMetric, "/", "-", -1) + replacedString := strings.Replace(serializedMetric, "/", "-", -1) + // Deal with @ characters: + replacedString = strings.Replace(replacedString, "@", "-", -1) + return replacedString } func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) { From 061b74904169bec8b4e9c010c81b9b71d078e24a Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 18 Mar 2016 10:24:44 -0600 Subject: [PATCH 173/287] TLS config: if only given ssl_ca, create tls config anyways fixes #890 --- CHANGELOG.md | 1 + internal/internal.go | 43 ++++++++++++++++++++++--------------------- 2 files changed, 23 insertions(+), 21 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3d2187913..1e2a4856f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ - [#849](https://github.com/influxdata/telegraf/issues/849): Adding ability to parse single values as an input data type. ### Bugfixes +[#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided. ## v0.11.1 [2016-03-17] diff --git a/internal/internal.go b/internal/internal.go index 9c3696c3d..8a427909e 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -86,15 +86,15 @@ func GetTLSConfig( SSLCert, SSLKey, SSLCA string, InsecureSkipVerify bool, ) (*tls.Config, error) { - t := &tls.Config{} - if SSLCert != "" && SSLKey != "" && SSLCA != "" { - cert, err := tls.LoadX509KeyPair(SSLCert, SSLKey) - if err != nil { - return nil, errors.New(fmt.Sprintf( - "Could not load TLS client key/certificate: %s", - err)) - } + if SSLCert == "" && SSLKey == "" && SSLCA == "" && !InsecureSkipVerify { + return nil, nil + } + t := &tls.Config{ + InsecureSkipVerify: InsecureSkipVerify, + } + + if SSLCA != "" { caCert, err := ioutil.ReadFile(SSLCA) if err != nil { return nil, errors.New(fmt.Sprintf("Could not load TLS CA: %s", @@ -103,20 +103,21 @@ func GetTLSConfig( caCertPool := x509.NewCertPool() caCertPool.AppendCertsFromPEM(caCert) - - t = &tls.Config{ - Certificates: []tls.Certificate{cert}, - RootCAs: caCertPool, - InsecureSkipVerify: InsecureSkipVerify, - } - t.BuildNameToCertificate() - } else { - if InsecureSkipVerify { - t.InsecureSkipVerify = true - } else { - return nil, nil - } + t.RootCAs = caCertPool } + + if SSLCert != "" && SSLKey != "" { + cert, err := tls.LoadX509KeyPair(SSLCert, SSLKey) + if err != nil { + return nil, errors.New(fmt.Sprintf( + "Could not load TLS client key/certificate: %s", + err)) + } + + t.Certificates = []tls.Certificate{cert} + t.BuildNameToCertificate() + } + // will be nil by default if nothing is provided return t, nil } From 77dcbe95c0d6610654de3b01cbce0d16def71cf2 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 18 Mar 2016 10:51:14 -0600 Subject: [PATCH 174/287] Do not write metrics if there are 0 to write closes #884 --- CHANGELOG.md | 1 + internal/models/running_output.go | 3 +++ 2 files changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1e2a4856f..93a8950e5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ ### Bugfixes [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided. +[#884](https://github.com/influxdata/telegraf/issues/884): Do not call write method if there are 0 metrics to write. ## v0.11.1 [2016-03-17] diff --git a/internal/models/running_output.go b/internal/models/running_output.go index 33fa4e120..1e3d44a61 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -121,6 +121,9 @@ func (ro *RunningOutput) Write() error { } func (ro *RunningOutput) write(metrics []telegraf.Metric) error { + if len(metrics) == 0 { + return nil + } start := time.Now() err := ro.Output.Write(metrics) elapsed := time.Since(start) From 18f4afb388fe97e8ac653030d971a73f2803a75c Mon Sep 17 00:00:00 2001 From: HUANG Wei Date: Fri, 18 Mar 2016 10:21:09 +0800 Subject: [PATCH 175/287] Inherit previous instance's stats in statsd plugin. This way, after a reload, the stats wont restart again at least for the counter type. closes #887 --- plugins/inputs/statsd/statsd.go | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 943188353..5e1e85667 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -26,6 +26,8 @@ const ( var dropwarn = "ERROR: Message queue full. Discarding line [%s] " + "You may want to increase allowed_pending_messages in the config\n" +var prevInstance *Statsd + type Statsd struct { // Address & Port to serve from ServiceAddress string @@ -234,10 +236,18 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error { // Make data structures s.done = make(chan struct{}) s.in = make(chan []byte, s.AllowedPendingMessages) - s.gauges = make(map[string]cachedgauge) - s.counters = make(map[string]cachedcounter) - s.sets = make(map[string]cachedset) - s.timings = make(map[string]cachedtimings) + + if prevInstance == nil { + s.gauges = make(map[string]cachedgauge) + s.counters = make(map[string]cachedcounter) + s.sets = make(map[string]cachedset) + s.timings = make(map[string]cachedtimings) + } else { + s.gauges = prevInstance.gauges + s.counters = prevInstance.counters + s.sets = prevInstance.sets + s.timings = prevInstance.timings + } s.wg.Add(2) // Start the UDP listener @@ -245,6 +255,7 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error { // Start the line parser go s.parser() log.Printf("Started the statsd service on %s\n", s.ServiceAddress) + prevInstance = s return nil } From b371ec5cf642df5c20fadf18673b4d45fe215b21 Mon Sep 17 00:00:00 2001 From: Thomas Menard Date: Mon, 14 Mar 2016 10:27:07 +0100 Subject: [PATCH 176/287] Add the postgresql_extensible plugin MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This plugin is intended to add an extended support of Postgresql compared to the legacy postgres plugin. Basically, the plugin don’t have any metrics defined and it’s up to the user to define it in the telegraph.conf (as a toml structure). Each query can have it’s specific tags, and can be written specifically using a where clause in order to eventually filter per database name. To be more generic, a minimum postgresql version has been defined per query in case you have 2 different version of Postgresql running on the same host. --- plugins/inputs/all/all.go | 1 + .../inputs/postgresql_extensible/README.md | 59 ++++ .../postgresql_extensible.go | 271 ++++++++++++++++++ .../postgresql_extensible_test.go | 98 +++++++ 4 files changed, 429 insertions(+) create mode 100644 plugins/inputs/postgresql_extensible/README.md create mode 100644 plugins/inputs/postgresql_extensible/postgresql_extensible.go create mode 100644 plugins/inputs/postgresql_extensible/postgresql_extensible_test.go diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index a3300df66..db36cfbec 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -34,6 +34,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/phpfpm" _ "github.com/influxdata/telegraf/plugins/inputs/ping" _ "github.com/influxdata/telegraf/plugins/inputs/postgresql" + _ "github.com/influxdata/telegraf/plugins/inputs/postgresql_extensible" _ "github.com/influxdata/telegraf/plugins/inputs/powerdns" _ "github.com/influxdata/telegraf/plugins/inputs/procstat" _ "github.com/influxdata/telegraf/plugins/inputs/prometheus" diff --git a/plugins/inputs/postgresql_extensible/README.md b/plugins/inputs/postgresql_extensible/README.md new file mode 100644 index 000000000..f44c66596 --- /dev/null +++ b/plugins/inputs/postgresql_extensible/README.md @@ -0,0 +1,59 @@ +# PostgreSQL plugin + +This postgresql plugin provides metrics for your postgres database. It has been designed to parse ithe sql queries in the plugin section of your telegraf.conf. + +For now only two queries are specified and it's up to you to add more; some per query parameters have been added : + +* The SQl query itself +* The minimum version supported (here in numeric display visible in pg_settings) +* A boolean to define if the query have to be run against some specific variables (defined in the databaes variable of the plugin section) +* The list of the column that have to be defined has tags + +``` + # specify address via a url matching: + # postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] + # or a simple string: + # host=localhost user=pqotest password=... sslmode=... dbname=app_production + # + # All connection parameters are optional. # + # Without the dbname parameter, the driver will default to a database + # with the same name as the user. This dbname is just for instantiating a + # connection with the server and doesn't restrict the databases we are trying + # to grab metrics for. + # + address = "host=localhost user=postgres sslmode=disable" + # A list of databases to pull metrics about. If not specified, metrics for all + # databases are gathered. + # databases = ["app_production", "testing"] + # + # Define the toml config where the sql queries are stored + # New queries can be added, if the withdbname is set to true and there is no databases defined + # in the 'databases field', the sql query is ended by a 'is not null' in order to make the query + # succeed. + # Be careful that the sqlquery must contain the where clause with a part of the filtering, the plugin will + # add a 'IN (dbname list)' clause if the withdbname is set to true + # Example : + # The sqlquery : "SELECT * FROM pg_stat_database where datname" become "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" + # because the databases variable was set to ['postgres', 'pgbench' ] and the withdbname was true. + # Be careful that if the withdbname is set to false you d'ont have to define the where clause (aka with the dbname) + # the tagvalue field is used to define custom tags (separated by comas) + # + # Structure : + # [[inputs.postgresql_extensible.query]] + # sqlquery string + # version string + # withdbname boolean + # tagvalue string (coma separated) + [[inputs.postgresql_extensible.query]] + sqlquery="SELECT * FROM pg_stat_database where datname" + version=901 + withdbname=false + tagvalue="" + [[inputs.postgresql_extensible.query]] + sqlquery="SELECT * FROM pg_stat_bgwriter" + version=901 + withdbname=false + tagvalue="" +``` + +The system can be easily extended using homemade metrics collection tools or using postgreql extensions ([pg_stat_statements](http://www.postgresql.org/docs/current/static/pgstatstatements.html), [pg_proctab](https://github.com/markwkm/pg_proctab), [powa](http://dalibo.github.io/powa/)...) diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go new file mode 100644 index 000000000..44c452e0b --- /dev/null +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -0,0 +1,271 @@ +package postgresql_extensible + +import ( + "bytes" + "database/sql" + "fmt" + "regexp" + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + + "github.com/lib/pq" +) + +type Postgresql struct { + Address string + Databases []string + OrderedColumns []string + AllColumns []string + AdditionalTags []string + sanitizedAddress string + Query []struct { + Sqlquery string + Version int + Withdbname bool + Tagvalue string + } +} + +type query []struct { + Sqlquery string + Version int + Withdbname bool + Tagvalue string +} + +var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true} + +var sampleConfig = ` + # specify address via a url matching: + # postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] + # or a simple string: + # host=localhost user=pqotest password=... sslmode=... dbname=app_production + # + # All connection parameters are optional. # + # Without the dbname parameter, the driver will default to a database + # with the same name as the user. This dbname is just for instantiating a + # connection with the server and doesn't restrict the databases we are trying + # to grab metrics for. + # + address = "host=localhost user=postgres sslmode=disable" + # A list of databases to pull metrics about. If not specified, metrics for all + # databases are gathered. + # databases = ["app_production", "testing"] + # + # Define the toml config where the sql queries are stored + # New queries can be added, if the withdbname is set to true and there is no databases defined + # in the 'databases field', the sql query is ended by a 'is not null' in order to make the query + # succeed. + # Example : + # The sqlquery : "SELECT * FROM pg_stat_database where datname" become "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" + # because the databases variable was set to ['postgres', 'pgbench' ] and the withdbname was true. + # Be careful that if the withdbname is set to false you d'ont have to define the where clause (aka with the dbname) + # the tagvalue field is used to define custom tags (separated by comas) + # + # Structure : + # [[inputs.postgresql_extensible.query]] + # sqlquery string + # version string + # withdbname boolean + # tagvalue string (coma separated) + [[inputs.postgresql_extensible.query]] + sqlquery="SELECT * FROM pg_stat_database" + version=901 + withdbname=false + tagvalue="" + [[inputs.postgresql_extensible.query]] + sqlquery="SELECT * FROM pg_stat_bgwriter" + version=901 + withdbname=false + tagvalue="" +` + +func (p *Postgresql) SampleConfig() string { + return sampleConfig +} + +func (p *Postgresql) Description() string { + return "Read metrics from one or many postgresql servers" +} + +func (p *Postgresql) IgnoredColumns() map[string]bool { + return ignoredColumns +} + +var localhost = "host=localhost sslmode=disable" + +func (p *Postgresql) Gather(acc telegraf.Accumulator) error { + + var sql_query string + var query_addon string + var db_version int + var query string + var tag_value string + + if p.Address == "" || p.Address == "localhost" { + p.Address = localhost + } + + db, err := sql.Open("postgres", p.Address) + if err != nil { + return err + } + + defer db.Close() + + // Retreiving the database version + + query = `select substring(setting from 1 for 3) as version from pg_settings where name='server_version_num'` + err = db.QueryRow(query).Scan(&db_version) + if err != nil { + return err + } + // We loop in order to process each query + // Query is not run if Database version does not match the query version. + + for i := range p.Query { + sql_query = p.Query[i].Sqlquery + tag_value = p.Query[i].Tagvalue + + if p.Query[i].Withdbname { + if len(p.Databases) != 0 { + query_addon = fmt.Sprintf(` IN ('%s')`, + strings.Join(p.Databases, "','")) + } else { + query_addon = " is not null" + } + } else { + query_addon = "" + } + sql_query += query_addon + + if p.Query[i].Version <= db_version { + rows, err := db.Query(sql_query) + if err != nil { + return err + } + + defer rows.Close() + + // grab the column information from the result + p.OrderedColumns, err = rows.Columns() + if err != nil { + return err + } else { + for _, v := range p.OrderedColumns { + p.AllColumns = append(p.AllColumns, v) + } + } + p.AdditionalTags = nil + if tag_value != "" { + tag_list := strings.Split(tag_value, ",") + for t := range tag_list { + p.AdditionalTags = append(p.AdditionalTags, tag_list[t]) + } + } + + for rows.Next() { + err = p.accRow(rows, acc) + if err != nil { + return err + } + } + } + } + return nil +} + +type scanner interface { + Scan(dest ...interface{}) error +} + +var passwordKVMatcher, _ = regexp.Compile("password=\\S+ ?") + +func (p *Postgresql) SanitizedAddress() (_ string, err error) { + var canonicalizedAddress string + if strings.HasPrefix(p.Address, "postgres://") || strings.HasPrefix(p.Address, "postgresql://") { + canonicalizedAddress, err = pq.ParseURL(p.Address) + if err != nil { + return p.sanitizedAddress, err + } + } else { + canonicalizedAddress = p.Address + } + p.sanitizedAddress = passwordKVMatcher.ReplaceAllString(canonicalizedAddress, "") + + return p.sanitizedAddress, err +} + +func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator) error { + var columnVars []interface{} + var dbname bytes.Buffer + + // this is where we'll store the column name with its *interface{} + columnMap := make(map[string]*interface{}) + + for _, column := range p.OrderedColumns { + columnMap[column] = new(interface{}) + } + + // populate the array of interface{} with the pointers in the right order + for i := 0; i < len(columnMap); i++ { + columnVars = append(columnVars, columnMap[p.OrderedColumns[i]]) + } + + // deconstruct array of variables and send to Scan + err := row.Scan(columnVars...) + + if err != nil { + return err + } + if columnMap["datname"] != nil { + // extract the database name from the column map + dbnameChars := (*columnMap["datname"]).([]uint8) + for i := 0; i < len(dbnameChars); i++ { + dbname.WriteString(string(dbnameChars[i])) + } + } else { + dbname.WriteString("postgres") + } + + var tagAddress string + tagAddress, err = p.SanitizedAddress() + if err != nil { + return err + } + + // Process the additional tags + + tags := map[string]string{} + tags["server"] = tagAddress + tags["db"] = dbname.String() + + fields := make(map[string]interface{}) + for col, val := range columnMap { + _, ignore := ignoredColumns[col] + //if !ignore && *val != "" { + if !ignore { + for tag := range p.AdditionalTags { + if col == p.AdditionalTags[tag] { + value_type_p := fmt.Sprintf(`%T`, *val) + if value_type_p == "[]uint8" { + tags[col] = fmt.Sprintf(`%s`, *val) + } else if value_type_p == "int64" { + tags[col] = fmt.Sprintf(`%v`, *val) + } + } + } + fields[col] = *val + } + } + acc.AddFields("postgresql", fields, tags) + return nil +} + +func init() { + inputs.Add("postgresql_extensible", func() telegraf.Input { + return &Postgresql{} + }) +} diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go new file mode 100644 index 000000000..7fd907102 --- /dev/null +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go @@ -0,0 +1,98 @@ +package postgresql_extensible + +import ( + "fmt" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPostgresqlGeneratesMetrics(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + p := &Postgresql{ + Address: fmt.Sprintf("host=%s user=postgres sslmode=disable", + testutil.GetLocalHost()), + Databases: []string{"postgres"}, + Query: query{ + {Sqlquery: "select * from pg_stat_database", + Version: 901, + Withdbname: false, + Tagvalue: ""}, + }, + } + var acc testutil.Accumulator + err := p.Gather(&acc) + require.NoError(t, err) + + availableColumns := make(map[string]bool) + for _, col := range p.AllColumns { + availableColumns[col] = true + } + intMetrics := []string{ + "xact_commit", + "xact_rollback", + "blks_read", + "blks_hit", + "tup_returned", + "tup_fetched", + "tup_inserted", + "tup_updated", + "tup_deleted", + "conflicts", + "temp_files", + "temp_bytes", + "deadlocks", + "numbackends", + } + + floatMetrics := []string{ + "blk_read_time", + "blk_write_time", + } + + metricsCounted := 0 + + for _, metric := range intMetrics { + _, ok := availableColumns[metric] + if ok { + assert.True(t, acc.HasIntField("postgresql", metric)) + metricsCounted++ + } + } + + for _, metric := range floatMetrics { + _, ok := availableColumns[metric] + if ok { + assert.True(t, acc.HasFloatField("postgresql", metric)) + metricsCounted++ + } + } + + assert.True(t, metricsCounted > 0) + assert.Equal(t, len(availableColumns)-len(p.IgnoredColumns()), metricsCounted) +} + +func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + p := &Postgresql{ + Address: fmt.Sprintf("host=%s user=postgres sslmode=disable", + testutil.GetLocalHost()), + } + + var acc testutil.Accumulator + + err := p.Gather(&acc) + require.NoError(t, err) + + for col := range p.IgnoredColumns() { + assert.False(t, acc.HasMeasurement(col)) + } +} From 36446bcbc2501be3fb18a30a880fd2e75a445748 Mon Sep 17 00:00:00 2001 From: Thomas Menard Date: Thu, 17 Mar 2016 15:01:08 +0100 Subject: [PATCH 177/287] Remove the columns used as tag closes #844 --- CHANGELOG.md | 1 + .../inputs/postgresql_extensible/README.md | 198 ++++++++++++++++-- .../postgresql_extensible.go | 8 +- 3 files changed, 192 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 93a8950e5..ed574daf3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ - [#880](https://github.com/influxdata/telegraf/pull/880): Add the ability to specify the bearer token to the prometheus plugin. Thanks @jchauncey! - [#882](https://github.com/influxdata/telegraf/pull/882): Fixed SQL Server Plugin issues - [#849](https://github.com/influxdata/telegraf/issues/849): Adding ability to parse single values as an input data type. +- [#844](https://github.com/influxdata/telegraf/pull/844): postgres_extensible plugin added. Thanks @menardorama! ### Bugfixes [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided. diff --git a/plugins/inputs/postgresql_extensible/README.md b/plugins/inputs/postgresql_extensible/README.md index f44c66596..e9fbc571c 100644 --- a/plugins/inputs/postgresql_extensible/README.md +++ b/plugins/inputs/postgresql_extensible/README.md @@ -1,17 +1,21 @@ # PostgreSQL plugin -This postgresql plugin provides metrics for your postgres database. It has been designed to parse ithe sql queries in the plugin section of your telegraf.conf. +This postgresql plugin provides metrics for your postgres database. It has been +designed to parse ithe sql queries in the plugin section of your telegraf.conf. -For now only two queries are specified and it's up to you to add more; some per query parameters have been added : +For now only two queries are specified and it's up to you to add more; some per +query parameters have been added : * The SQl query itself * The minimum version supported (here in numeric display visible in pg_settings) -* A boolean to define if the query have to be run against some specific variables (defined in the databaes variable of the plugin section) +* A boolean to define if the query have to be run against some specific +* variables (defined in the databaes variable of the plugin section) * The list of the column that have to be defined has tags ``` +[[inputs.postgresql_extensible]] # specify address via a url matching: - # postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] + # postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=... # or a simple string: # host=localhost user=pqotest password=... sslmode=... dbname=app_production # @@ -27,15 +31,19 @@ For now only two queries are specified and it's up to you to add more; some per # databases = ["app_production", "testing"] # # Define the toml config where the sql queries are stored - # New queries can be added, if the withdbname is set to true and there is no databases defined - # in the 'databases field', the sql query is ended by a 'is not null' in order to make the query - # succeed. - # Be careful that the sqlquery must contain the where clause with a part of the filtering, the plugin will - # add a 'IN (dbname list)' clause if the withdbname is set to true + # New queries can be added, if the withdbname is set to true and there is no + # databases defined in the 'databases field', the sql query is ended by a 'is + # not null' in order to make the query succeed. + # Be careful that the sqlquery must contain the where clause with a part of + # the filtering, the plugin will add a 'IN (dbname list)' clause if the + # withdbname is set to true # Example : - # The sqlquery : "SELECT * FROM pg_stat_database where datname" become "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" - # because the databases variable was set to ['postgres', 'pgbench' ] and the withdbname was true. - # Be careful that if the withdbname is set to false you d'ont have to define the where clause (aka with the dbname) + # The sqlquery : "SELECT * FROM pg_stat_database where datname" become + # "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" + # because the databases variable was set to ['postgres', 'pgbench' ] and the + # withdbname was true. + # Be careful that if the withdbname is set to false you d'ont have to define + # the where clause (aka with the dbname) # the tagvalue field is used to define custom tags (separated by comas) # # Structure : @@ -56,4 +64,168 @@ For now only two queries are specified and it's up to you to add more; some per tagvalue="" ``` -The system can be easily extended using homemade metrics collection tools or using postgreql extensions ([pg_stat_statements](http://www.postgresql.org/docs/current/static/pgstatstatements.html), [pg_proctab](https://github.com/markwkm/pg_proctab), [powa](http://dalibo.github.io/powa/)...) +The system can be easily extended using homemade metrics collection tools or +using postgreql extensions ([pg_stat_statements](http://www.postgresql.org/docs/current/static/pgstatstatements.html), [pg_proctab](https://github.com/markwkm/pg_proctab),[powa](http://dalibo.github.io/powa/)...) + +# Sample Queries : +- telegraf.conf postgresql_extensible queries (assuming that you have configured + correctly your connection) +``` +[[inputs.postgresql_extensible.query]] + sqlquery="SELECT * FROM pg_stat_database" + version=901 + withdbname=false + tagvalue="" +[[inputs.postgresql_extensible.query]] + sqlquery="SELECT * FROM pg_stat_bgwriter" + version=901 + withdbname=false + tagvalue="" +[[inputs.postgresql_extensible.query]] + sqlquery="select * from sessions" + version=901 + withdbname=false + tagvalue="db,username,state" +[[inputs.postgresql_extensible.query]] + sqlquery="select setting as max_connections from pg_settings where \ + name='max_connections'" + version=801 + withdbname=false + tagvalue="" +[[inputs.postgresql_extensible.query]] + sqlquery="select * from pg_stat_kcache" + version=901 + withdbname=false + tagvalue="" +[[inputs.postgresql_extensible.query]] + sqlquery="select setting as shared_buffers from pg_settings where \ + name='shared_buffers'" + version=801 + withdbname=false + tagvalue="" +[[inputs.postgresql_extensible.query]] + sqlquery="SELECT db, count( distinct blocking_pid ) AS num_blocking_sessions,\ + count( distinct blocked_pid) AS num_blocked_sessions FROM \ + public.blocking_procs group by db" + version=901 + withdbname=false + tagvalue="db" +``` + +# Postgresql Side +postgresql.conf : +``` +shared_preload_libraries = 'pg_stat_statements,pg_stat_kcache' +``` + +Please follow the requirements to setup those extensions. + +In the database (can be a specific monitoring db) +``` +create extension pg_stat_statements; +create extension pg_stat_kcache; +create extension pg_proctab; +``` +(assuming that the extension is installed on the OS Layer) + + - pg_stat_kcache is available on the postgresql.org yum repo + - pg_proctab is available at : https://github.com/markwkm/pg_proctab + + ##Views + - Blocking sessions +``` +CREATE OR REPLACE VIEW public.blocking_procs AS + SELECT a.datname AS db, + kl.pid AS blocking_pid, + ka.usename AS blocking_user, + ka.query AS blocking_query, + bl.pid AS blocked_pid, + a.usename AS blocked_user, + a.query AS blocked_query, + to_char(age(now(), a.query_start), 'HH24h:MIm:SSs'::text) AS age + FROM pg_locks bl + JOIN pg_stat_activity a ON bl.pid = a.pid + JOIN pg_locks kl ON bl.locktype = kl.locktype AND NOT bl.database IS + DISTINCT FROM kl.database AND NOT bl.relation IS DISTINCT FROM kl.relation + AND NOT bl.page IS DISTINCT FROM kl.page AND NOT bl.tuple IS DISTINCT FROM + kl.tuple AND NOT bl.virtualxid IS DISTINCT FROM kl.virtualxid AND NOT + bl.transactionid IS DISTINCT FROM kl.transactionid AND NOT bl.classid IS + DISTINCT FROM kl.classid AND NOT bl.objid IS DISTINCT FROM kl.objid AND + NOT bl.objsubid IS DISTINCT FROM kl.objsubid AND bl.pid <> kl.pid + JOIN pg_stat_activity ka ON kl.pid = ka.pid + WHERE kl.granted AND NOT bl.granted + ORDER BY a.query_start; +``` + - Sessions Statistics +``` +CREATE OR REPLACE VIEW public.sessions AS + WITH proctab AS ( + SELECT pg_proctab.pid, + CASE + WHEN pg_proctab.state::text = 'R'::bpchar::text + THEN 'running'::text + WHEN pg_proctab.state::text = 'D'::bpchar::text + THEN 'sleep-io'::text + WHEN pg_proctab.state::text = 'S'::bpchar::text + THEN 'sleep-waiting'::text + WHEN pg_proctab.state::text = 'Z'::bpchar::text + THEN 'zombie'::text + WHEN pg_proctab.state::text = 'T'::bpchar::text + THEN 'stopped'::text + ELSE NULL::text + END AS proc_state, + pg_proctab.ppid, + pg_proctab.utime, + pg_proctab.stime, + pg_proctab.vsize, + pg_proctab.rss, + pg_proctab.processor, + pg_proctab.rchar, + pg_proctab.wchar, + pg_proctab.syscr, + pg_proctab.syscw, + pg_proctab.reads, + pg_proctab.writes, + pg_proctab.cwrites + FROM pg_proctab() pg_proctab(pid, comm, fullcomm, state, ppid, pgrp, + session, tty_nr, tpgid, flags, minflt, cminflt, majflt, cmajflt, + utime, stime, cutime, cstime, priority, nice, num_threads, + itrealvalue, starttime, vsize, rss, exit_signal, processor, + rt_priority, policy, delayacct_blkio_ticks, uid, username, rchar, + wchar, syscr, syscw, reads, writes, cwrites) + ), stat_activity AS ( + SELECT pg_stat_activity.datname, + pg_stat_activity.pid, + pg_stat_activity.usename, + CASE + WHEN pg_stat_activity.query IS NULL THEN 'no query'::text + WHEN pg_stat_activity.query IS NOT NULL AND + pg_stat_activity.state = 'idle'::text THEN 'no query'::text + ELSE regexp_replace(pg_stat_activity.query, '[\n\r]+'::text, + ' '::text, 'g'::text) + END AS query + FROM pg_stat_activity + ) + SELECT stat.datname::name AS db, + stat.usename::name AS username, + stat.pid, + proc.proc_state::text AS state, +('"'::text || stat.query) || '"'::text AS query, + (proc.utime/1000)::bigint AS session_usertime, + (proc.stime/1000)::bigint AS session_systemtime, + proc.vsize AS session_virtual_memory_size, + proc.rss AS session_resident_memory_size, + proc.processor AS session_processor_number, + proc.rchar AS session_bytes_read, + proc.rchar-proc.reads AS session_logical_bytes_read, + proc.wchar AS session_bytes_written, + proc.wchar-proc.writes AS session_logical_bytes_writes, + proc.syscr AS session_read_io, + proc.syscw AS session_write_io, + proc.reads AS session_physical_reads, + proc.writes AS session_physical_writes, + proc.cwrites AS session_cancel_writes + FROM proctab proc, + stat_activity stat + WHERE proc.pid = stat.pid; +``` diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index 44c452e0b..67097db4b 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -241,14 +241,16 @@ func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator) error { tags := map[string]string{} tags["server"] = tagAddress tags["db"] = dbname.String() - + var isATag int fields := make(map[string]interface{}) for col, val := range columnMap { _, ignore := ignoredColumns[col] //if !ignore && *val != "" { if !ignore { + isATag = 0 for tag := range p.AdditionalTags { if col == p.AdditionalTags[tag] { + isATag = 1 value_type_p := fmt.Sprintf(`%T`, *val) if value_type_p == "[]uint8" { tags[col] = fmt.Sprintf(`%s`, *val) @@ -257,7 +259,9 @@ func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator) error { } } } - fields[col] = *val + if isATag == 0 { + fields[col] = *val + } } } acc.AddFields("postgresql", fields, tags) From 2634cc408a9e6a0ab8e5ef990297b1a20d511946 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 18 Mar 2016 11:26:05 -0600 Subject: [PATCH 178/287] Update README --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 3ee6a9c09..70da27fd8 100644 --- a/README.md +++ b/README.md @@ -210,6 +210,7 @@ Currently implemented sources: * phusion passenger * ping * postgresql +* postgresql_extensible * powerdns * procstat * prometheus From e940f99646d46a3e22d206a1067ea3ea4ab4426e Mon Sep 17 00:00:00 2001 From: JP Date: Mon, 21 Mar 2016 09:50:21 -0500 Subject: [PATCH 179/287] sanitize known issue characters from graphite tag name --- plugins/outputs/librato/librato.go | 9 +-------- plugins/serializers/graphite/graphite.go | 4 +++- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go index 406f45361..910ac8b4e 100644 --- a/plugins/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -7,7 +7,6 @@ import ( "io/ioutil" "log" "net/http" - "strings" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" @@ -156,13 +155,7 @@ func (l *Librato) Description() string { func (l *Librato) buildGaugeName(m telegraf.Metric, fieldName string) string { // Use the GraphiteSerializer graphiteSerializer := graphite.GraphiteSerializer{} - serializedMetric := graphiteSerializer.SerializeBucketName(m, fieldName) - - // Deal with slash characters: - replacedString := strings.Replace(serializedMetric, "/", "-", -1) - // Deal with @ characters: - replacedString = strings.Replace(replacedString, "@", "-", -1) - return replacedString + return graphiteSerializer.SerializeBucketName(m, fieldName) } func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) { diff --git a/plugins/serializers/graphite/graphite.go b/plugins/serializers/graphite/graphite.go index 908dce8fa..7a7fec2f1 100644 --- a/plugins/serializers/graphite/graphite.go +++ b/plugins/serializers/graphite/graphite.go @@ -12,6 +12,8 @@ type GraphiteSerializer struct { Prefix string } +var sanitizedChars = strings.NewReplacer("/", "-", "@", "-", " ", "_") + func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]string, error) { out := []string{} @@ -85,5 +87,5 @@ func buildTags(metric telegraf.Metric) string { tag_str += "." + tag_value } } - return tag_str + return sanitizedChars.Replace(tag_str) } From a043664dc40d3472881ce7d69cefc866cc338dbb Mon Sep 17 00:00:00 2001 From: Vebjorn Ljosa Date: Wed, 16 Mar 2016 17:27:59 -0400 Subject: [PATCH 180/287] Couchbase input plugin --- Godeps | 3 + plugins/inputs/all/all.go | 1 + plugins/inputs/couchbase/README.md | 46 ++++++++++++++ plugins/inputs/couchbase/couchbase.go | 88 +++++++++++++++++++++++++++ 4 files changed, 138 insertions(+) create mode 100644 plugins/inputs/couchbase/README.md create mode 100644 plugins/inputs/couchbase/couchbase.go diff --git a/Godeps b/Godeps index 089860ed5..cbd46c524 100644 --- a/Godeps +++ b/Godeps @@ -5,6 +5,9 @@ github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687 github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857 github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4 github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99 +github.com/couchbase/go-couchbase cb664315a324d87d19c879d9cc67fda6be8c2ac1 +github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2 +github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6 github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3 diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index db36cfbec..55a932df2 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -4,6 +4,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/aerospike" _ "github.com/influxdata/telegraf/plugins/inputs/apache" _ "github.com/influxdata/telegraf/plugins/inputs/bcache" + _ "github.com/influxdata/telegraf/plugins/inputs/couchbase" _ "github.com/influxdata/telegraf/plugins/inputs/couchdb" _ "github.com/influxdata/telegraf/plugins/inputs/disque" _ "github.com/influxdata/telegraf/plugins/inputs/dns_query" diff --git a/plugins/inputs/couchbase/README.md b/plugins/inputs/couchbase/README.md new file mode 100644 index 000000000..fc142e3b2 --- /dev/null +++ b/plugins/inputs/couchbase/README.md @@ -0,0 +1,46 @@ +# Telegraf Plugin: Couchbase + +### Configuration: + +``` +# Read per-node and per-bucket metrics from Couchbase +[[inputs.couchbase]] + ## specify servers via a url matching: + ## [protocol://][:password]@address[:port] + ## e.g. + ## http://couchbase-0.example.com/ + ## http://admin:secret@couchbase-0.example.com:8091/ + ## + ## If no servers are specified, then localhost is used as the host. + ## If no protocol is specifed, HTTP is used. + ## If no port is specified, 8091 is used. + servers = ["http://localhost:8091"] +``` + +## Measurements: + +### Per-node measurements + +Meta: +- units: bytes +- tags: `cluster`, `hostname` + +Measurement names: +- memory_free +- memory_total + +### Per-bucket measurements + +Meta: +- units: varies +- tags: `cluster`, `bucket` + +Measurement names: +- quotaPercentUsed (unit: percent) +- opsPerSec (unit: count) +- diskFetches (unit: count) +- itemCount (unit: count) +- diskUsed (unit: bytes) +- dataUsed (unit: bytes) +- memUsed (unit: bytes) + diff --git a/plugins/inputs/couchbase/couchbase.go b/plugins/inputs/couchbase/couchbase.go new file mode 100644 index 000000000..2a3e687ea --- /dev/null +++ b/plugins/inputs/couchbase/couchbase.go @@ -0,0 +1,88 @@ +package couchbase + +import ( + couchbase "github.com/couchbase/go-couchbase" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "sync" +) + +type Couchbase struct { + Servers []string +} + +var sampleConfig = ` + ## specify servers via a url matching: + ## [protocol://][:password]@address[:port] + ## e.g. + ## http://couchbase-0.example.com/ + ## http://admin:secret@couchbase-0.example.com:8091/ + ## + ## If no servers are specified, then localhost is used as the host. + ## If no protocol is specifed, HTTP is used. + ## If no port is specified, 8091 is used. + servers = ["http://localhost:8091"] +` + +func (r *Couchbase) SampleConfig() string { + return sampleConfig +} + +func (r *Couchbase) Description() string { + return "Read metrics from one or many couchbase clusters" +} + +// Reads stats from all configured clusters. Accumulates stats. +// Returns one of the errors encountered while gathering stats (if any). +func (r *Couchbase) Gather(acc telegraf.Accumulator) error { + if len(r.Servers) == 0 { + r.gatherServer("http://localhost:8091/", acc) + return nil + } + + var wg sync.WaitGroup + + var outerr error + + for _, serv := range r.Servers { + wg.Add(1) + go func(serv string) { + defer wg.Done() + outerr = r.gatherServer(serv, acc) + }(serv) + } + + wg.Wait() + + return outerr +} + +func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator) error { + client, err := couchbase.Connect(addr) + if err != nil { + return err + } + pool, err := client.GetPool("default") + if err != nil { + return err + } + for i := 0; i < len(pool.Nodes); i++ { + node := pool.Nodes[i] + tags := map[string]string{"cluster": addr, "hostname": node.Hostname} + fields := make(map[string]interface{}) + fields["memory_free"] = node.MemoryFree + fields["memory_total"] = node.MemoryTotal + acc.AddFields("couchbase_node", fields, tags) + } + for bucketName, bucket := range pool.BucketMap { + tags := map[string]string{"cluster": addr, "bucket": bucketName} + acc.AddFields("couchbase_bucket", bucket.BasicStats, tags) + } + return nil +} + +func init() { + inputs.Add("couchbase", func() telegraf.Input { + return &Couchbase{} + }) +} From 681e6951704b8292a03ca37736871217797e0e79 Mon Sep 17 00:00:00 2001 From: Vebjorn Ljosa Date: Wed, 16 Mar 2016 17:52:38 -0400 Subject: [PATCH 181/287] Don't copy lock when `range`ing over map Make `go vet` happy. --- plugins/inputs/couchbase/couchbase.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/couchbase/couchbase.go b/plugins/inputs/couchbase/couchbase.go index 2a3e687ea..aff551b2e 100644 --- a/plugins/inputs/couchbase/couchbase.go +++ b/plugins/inputs/couchbase/couchbase.go @@ -74,7 +74,8 @@ func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator) error { fields["memory_total"] = node.MemoryTotal acc.AddFields("couchbase_node", fields, tags) } - for bucketName, bucket := range pool.BucketMap { + for bucketName, _ := range pool.BucketMap { + bucket := pool.BucketMap[bucketName] tags := map[string]string{"cluster": addr, "bucket": bucketName} acc.AddFields("couchbase_bucket", bucket.BasicStats, tags) } From 2ddda6457ffc74e14671baefdc5aca133d62805c Mon Sep 17 00:00:00 2001 From: Vebjorn Ljosa Date: Thu, 17 Mar 2016 11:38:17 -0400 Subject: [PATCH 182/287] Convert measurement names to snake_case --- plugins/inputs/couchbase/couchbase.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/couchbase/couchbase.go b/plugins/inputs/couchbase/couchbase.go index aff551b2e..7135be5a5 100644 --- a/plugins/inputs/couchbase/couchbase.go +++ b/plugins/inputs/couchbase/couchbase.go @@ -75,9 +75,17 @@ func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator) error { acc.AddFields("couchbase_node", fields, tags) } for bucketName, _ := range pool.BucketMap { - bucket := pool.BucketMap[bucketName] tags := map[string]string{"cluster": addr, "bucket": bucketName} - acc.AddFields("couchbase_bucket", bucket.BasicStats, tags) + bs := pool.BucketMap[bucketName].BasicStats + fields := make(map[string]interface{}) + fields["quota_percent_used"] = bs["quotaPercentUsed"] + fields["ops_per_sec"] = bs["opsPerSec"] + fields["disk_fetches"] = bs["diskFetches"] + fields["item_count"] = bs["itemCount"] + fields["disk_used"] = bs["diskUsed"] + fields["data_used"] = bs["dataUsed"] + fields["mem_used"] = bs["memUsed"] + acc.AddFields("couchbase_bucket", fields, tags) } return nil } From 483942dc415a84dba7afee1dbbb08eb7fc2fcdff Mon Sep 17 00:00:00 2001 From: Vebjorn Ljosa Date: Thu, 17 Mar 2016 11:41:04 -0400 Subject: [PATCH 183/287] Comment on `default` pool name --- plugins/inputs/couchbase/couchbase.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/plugins/inputs/couchbase/couchbase.go b/plugins/inputs/couchbase/couchbase.go index 7135be5a5..c16692471 100644 --- a/plugins/inputs/couchbase/couchbase.go +++ b/plugins/inputs/couchbase/couchbase.go @@ -62,6 +62,9 @@ func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator) error { if err != nil { return err } + // `default` is the only possible pool name. It's a + // placeholder for a possible future Couchbase feature. See + // http://stackoverflow.com/a/16990911/17498. pool, err := client.GetPool("default") if err != nil { return err From 9b5a90e3b94f3fe2d62e100f75ccaa8dc033a0d9 Mon Sep 17 00:00:00 2001 From: Vebjorn Ljosa Date: Fri, 18 Mar 2016 18:37:57 -0400 Subject: [PATCH 184/287] Unit test couchbase input plugin --- plugins/inputs/couchbase/couchbase.go | 30 ++++++++++--------- plugins/inputs/couchbase/couchbase_test.go | 34 ++++++++++++++++++++++ 2 files changed, 51 insertions(+), 13 deletions(-) create mode 100644 plugins/inputs/couchbase/couchbase_test.go diff --git a/plugins/inputs/couchbase/couchbase.go b/plugins/inputs/couchbase/couchbase.go index c16692471..48e0c1a75 100644 --- a/plugins/inputs/couchbase/couchbase.go +++ b/plugins/inputs/couchbase/couchbase.go @@ -36,7 +36,7 @@ func (r *Couchbase) Description() string { // Returns one of the errors encountered while gathering stats (if any). func (r *Couchbase) Gather(acc telegraf.Accumulator) error { if len(r.Servers) == 0 { - r.gatherServer("http://localhost:8091/", acc) + r.gatherServer("http://localhost:8091/", acc, nil) return nil } @@ -48,7 +48,7 @@ func (r *Couchbase) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(serv string) { defer wg.Done() - outerr = r.gatherServer(serv, acc) + outerr = r.gatherServer(serv, acc, nil) }(serv) } @@ -57,17 +57,21 @@ func (r *Couchbase) Gather(acc telegraf.Accumulator) error { return outerr } -func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator) error { - client, err := couchbase.Connect(addr) - if err != nil { - return err - } - // `default` is the only possible pool name. It's a - // placeholder for a possible future Couchbase feature. See - // http://stackoverflow.com/a/16990911/17498. - pool, err := client.GetPool("default") - if err != nil { - return err +func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator, pool *couchbase.Pool) error { + if pool == nil { + client, err := couchbase.Connect(addr) + if err != nil { + return err + } + + // `default` is the only possible pool name. It's a + // placeholder for a possible future Couchbase feature. See + // http://stackoverflow.com/a/16990911/17498. + p, err := client.GetPool("default") + if err != nil { + return err + } + pool = &p } for i := 0; i < len(pool.Nodes); i++ { node := pool.Nodes[i] diff --git a/plugins/inputs/couchbase/couchbase_test.go b/plugins/inputs/couchbase/couchbase_test.go new file mode 100644 index 000000000..c7e692002 --- /dev/null +++ b/plugins/inputs/couchbase/couchbase_test.go @@ -0,0 +1,34 @@ +package couchbase + +import ( + "encoding/json" + couchbase "github.com/couchbase/go-couchbase" + "github.com/influxdata/telegraf/testutil" + "testing" +) + +func TestGatherServer(t *testing.T) { + var pool couchbase.Pool + if err := json.Unmarshal([]byte(poolsDefaultResponse), &pool); err != nil { + t.Fatal("parse poolsDefaultResponse", err) + } + var bucket couchbase.Bucket + if err := json.Unmarshal([]byte(bucketResponse), &bucket); err != nil { + t.Fatal("parse bucketResponse", err) + } + pool.BucketMap = make(map[string]couchbase.Bucket) + pool.BucketMap[bucket.Name] = bucket + var cb Couchbase + var acc testutil.Accumulator + cb.gatherServer("mycluster", &acc, &pool) + acc.AssertContainsTaggedFields(t, "couchbase_node", map[string]interface{}{"memory_free": 23181365248.0, "memory_total": 64424656896.0}, map[string]string{"cluster": "mycluster", "hostname": "172.16.10.187:8091"}) + acc.AssertContainsTaggedFields(t, "couchbase_node", map[string]interface{}{"memory_free": 23665811456.0, "memory_total": 64424656896.0}, map[string]string{"cluster": "mycluster", "hostname": "172.16.10.65:8091"}) + acc.AssertContainsTaggedFields(t, "couchbase_bucket", map[string]interface{}{"quota_percent_used": 68.85424936294555, "ops_per_sec": 5686.789686789687, "disk_fetches": 0.0, "item_count": 943239752.0, "disk_used": 409178772321.0, "data_used": 212179309111.0, "mem_used": 202156957464.0}, map[string]string{"cluster": "mycluster", "bucket": "blastro-df"}) + +} + +// From `/pools/default` on a real cluster +var poolsDefaultResponse string = `{"storageTotals":{"ram":{"total":450972598272,"quotaTotal":360777252864,"quotaUsed":360777252864,"used":446826622976,"usedByData":255061495696,"quotaUsedPerNode":51539607552,"quotaTotalPerNode":51539607552},"hdd":{"total":1108766539776,"quotaTotal":1108766539776,"used":559135126484,"usedByData":515767865143,"free":498944942902}},"serverGroupsUri":"/pools/default/serverGroups?v=98656394","name":"default","alerts":["Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.148\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.65\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.173\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.75\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.105\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.127\" is taken up by keys and metadata.","Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.120\" is taken up by keys and metadata.","Metadata overhead warning. Over 66% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.187\" is taken up by keys and metadata."],"alertsSilenceURL":"/controller/resetAlerts?token=2814&uuid=2bec87861652b990cf6aa5c7ee58c253","nodes":[{"systemStats":{"cpu_utilization_rate":35.43307086614173,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23181365248},"interestingStats":{"cmd_get":17.98201798201798,"couch_docs_actual_disk_size":68506048063,"couch_docs_data_size":38718796110,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140158886,"curr_items_tot":279374646,"ep_bg_fetched":0.999000999000999,"get_hits":10.98901098901099,"mem_used":36497390640,"ops":829.1708291708292,"vb_replica_curr_items":139215760},"uptime":"341236","memoryTotal":64424656896,"memoryFree":23181365248,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.187:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.38255033557047,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23665811456},"interestingStats":{"cmd_get":172.8271728271728,"couch_docs_actual_disk_size":79360565405,"couch_docs_data_size":38736382876,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140174377,"curr_items_tot":279383025,"ep_bg_fetched":0.999000999000999,"get_hits":167.8321678321678,"mem_used":36650059656,"ops":1685.314685314685,"vb_replica_curr_items":139208648},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23665811456,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.65:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":25.5586592178771,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23726600192},"interestingStats":{"cmd_get":63.06306306306306,"couch_docs_actual_disk_size":79345105217,"couch_docs_data_size":38728086130,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139195268,"curr_items_tot":279349113,"ep_bg_fetched":0,"get_hits":53.05305305305306,"mem_used":36476665576,"ops":1878.878878878879,"vb_replica_curr_items":140153845},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23726600192,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.105:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":26.45803698435277,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23854841856},"interestingStats":{"cmd_get":51.05105105105105,"couch_docs_actual_disk_size":74465931949,"couch_docs_data_size":38723830730,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139209869,"curr_items_tot":279380019,"ep_bg_fetched":0,"get_hits":47.04704704704704,"mem_used":36471784896,"ops":1831.831831831832,"vb_replica_curr_items":140170150},"uptime":"340526","memoryTotal":64424656896,"memoryFree":23854841856,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.173:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.31034482758621,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23773573120},"interestingStats":{"cmd_get":77.07707707707708,"couch_docs_actual_disk_size":74743093945,"couch_docs_data_size":38594660087,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139215932,"curr_items_tot":278427644,"ep_bg_fetched":0,"get_hits":53.05305305305305,"mem_used":36306500344,"ops":1981.981981981982,"vb_replica_curr_items":139211712},"uptime":"340495","memoryTotal":64424656896,"memoryFree":23773573120,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.15.120:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":17.60660247592847,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23662190592},"interestingStats":{"cmd_get":146.8531468531468,"couch_docs_actual_disk_size":72932847344,"couch_docs_data_size":38581771457,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139226879,"curr_items_tot":278436540,"ep_bg_fetched":0,"get_hits":144.8551448551448,"mem_used":36421860496,"ops":1495.504495504495,"vb_replica_curr_items":139209661},"uptime":"337174","memoryTotal":64424656896,"memoryFree":23662190592,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.127:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":21.68831168831169,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":24049729536},"interestingStats":{"cmd_get":11.98801198801199,"couch_docs_actual_disk_size":66414273220,"couch_docs_data_size":38587642702,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139193759,"curr_items_tot":278398926,"ep_bg_fetched":0,"get_hits":9.990009990009991,"mem_used":36237234088,"ops":883.1168831168832,"vb_replica_curr_items":139205167},"uptime":"341228","memoryTotal":64424656896,"memoryFree":24049729536,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.148:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"buckets":{"uri":"/pools/default/buckets?v=74117050&uuid=2bec87861652b990cf6aa5c7ee58c253","terseBucketsBase":"/pools/default/b/","terseStreamingBucketsBase":"/pools/default/bs/"},"remoteClusters":{"uri":"/pools/default/remoteClusters?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/pools/default/remoteClusters?just_validate=1"},"controllers":{"addNode":{"uri":"/controller/addNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"rebalance":{"uri":"/controller/rebalance?uuid=2bec87861652b990cf6aa5c7ee58c253"},"failOver":{"uri":"/controller/failOver?uuid=2bec87861652b990cf6aa5c7ee58c253"},"startGracefulFailover":{"uri":"/controller/startGracefulFailover?uuid=2bec87861652b990cf6aa5c7ee58c253"},"reAddNode":{"uri":"/controller/reAddNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"reFailOver":{"uri":"/controller/reFailOver?uuid=2bec87861652b990cf6aa5c7ee58c253"},"ejectNode":{"uri":"/controller/ejectNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"setRecoveryType":{"uri":"/controller/setRecoveryType?uuid=2bec87861652b990cf6aa5c7ee58c253"},"setAutoCompaction":{"uri":"/controller/setAutoCompaction?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/setAutoCompaction?just_validate=1"},"clusterLogsCollection":{"startURI":"/controller/startLogsCollection?uuid=2bec87861652b990cf6aa5c7ee58c253","cancelURI":"/controller/cancelLogsCollection?uuid=2bec87861652b990cf6aa5c7ee58c253"},"replication":{"createURI":"/controller/createReplication?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/createReplication?just_validate=1"},"setFastWarmup":{"uri":"/controller/setFastWarmup?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/setFastWarmup?just_validate=1"}},"rebalanceStatus":"none","rebalanceProgressUri":"/pools/default/rebalanceProgress","stopRebalanceUri":"/controller/stopRebalance?uuid=2bec87861652b990cf6aa5c7ee58c253","nodeStatusesUri":"/nodeStatuses","maxBucketCount":10,"autoCompactionSettings":{"parallelDBAndViewCompaction":false,"databaseFragmentationThreshold":{"percentage":50,"size":"undefined"},"viewFragmentationThreshold":{"percentage":50,"size":"undefined"}},"fastWarmupSettings":{"fastWarmupEnabled":true,"minMemoryThreshold":10,"minItemsThreshold":10},"tasks":{"uri":"/pools/default/tasks?v=97479372"},"visualSettingsUri":"/internalSettings/visual?v=7111573","counters":{"rebalance_success":4,"rebalance_start":6,"rebalance_stop":2}}` + +// From `/pools/default/buckets/blastro-df` on a real cluster +var bucketResponse string = `{"name":"blastro-df","bucketType":"membase","authType":"sasl","saslPassword":"","proxyPort":0,"replicaIndex":false,"uri":"/pools/default/buckets/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","streamingUri":"/pools/default/bucketsStreaming/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","localRandomKeyUri":"/pools/default/buckets/blastro-df/localRandomKey","controllers":{"compactAll":"/pools/default/buckets/blastro-df/controller/compactBucket","compactDB":"/pools/default/buckets/default/controller/compactDatabases","purgeDeletes":"/pools/default/buckets/blastro-df/controller/unsafePurgeBucket","startRecovery":"/pools/default/buckets/blastro-df/controller/startRecovery"},"nodes":[{"couchApiBase":"http://172.16.8.148:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":18.39557399723375,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23791935488},"interestingStats":{"cmd_get":10.98901098901099,"couch_docs_actual_disk_size":79525832077,"couch_docs_data_size":38633186946,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139229304,"curr_items_tot":278470058,"ep_bg_fetched":0,"get_hits":5.994005994005994,"mem_used":36284362960,"ops":1275.724275724276,"vb_replica_curr_items":139240754},"uptime":"343968","memoryTotal":64424656896,"memoryFree":23791935488,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.8.127:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.97183098591549,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23533023232},"interestingStats":{"cmd_get":39.96003996003996,"couch_docs_actual_disk_size":63322357663,"couch_docs_data_size":38603481061,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139262616,"curr_items_tot":278508069,"ep_bg_fetched":0.999000999000999,"get_hits":30.96903096903097,"mem_used":36475078736,"ops":1370.629370629371,"vb_replica_curr_items":139245453},"uptime":"339914","memoryTotal":64424656896,"memoryFree":23533023232,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.15.120:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":23.38028169014084,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23672963072},"interestingStats":{"cmd_get":88.08808808808809,"couch_docs_actual_disk_size":80260594761,"couch_docs_data_size":38632863189,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139251563,"curr_items_tot":278498913,"ep_bg_fetched":0,"get_hits":74.07407407407408,"mem_used":36348663000,"ops":1707.707707707708,"vb_replica_curr_items":139247350},"uptime":"343235","memoryTotal":64424656896,"memoryFree":23672963072,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.173:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":22.15988779803646,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23818825728},"interestingStats":{"cmd_get":103.1031031031031,"couch_docs_actual_disk_size":68247785524,"couch_docs_data_size":38747583467,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139245453,"curr_items_tot":279451313,"ep_bg_fetched":1.001001001001001,"get_hits":86.08608608608608,"mem_used":36524715864,"ops":1749.74974974975,"vb_replica_curr_items":140205860},"uptime":"343266","memoryTotal":64424656896,"memoryFree":23818825728,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.105:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.94444444444444,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23721426944},"interestingStats":{"cmd_get":113.1131131131131,"couch_docs_actual_disk_size":68102832275,"couch_docs_data_size":38747477407,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139230887,"curr_items_tot":279420530,"ep_bg_fetched":0,"get_hits":106.1061061061061,"mem_used":36524887624,"ops":1799.7997997998,"vb_replica_curr_items":140189643},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23721426944,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.65:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":60.62176165803109,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23618203648},"interestingStats":{"cmd_get":30.96903096903097,"couch_docs_actual_disk_size":69052175561,"couch_docs_data_size":38755695030,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140210194,"curr_items_tot":279454253,"ep_bg_fetched":0,"get_hits":26.97302697302698,"mem_used":36543072472,"ops":1337.662337662338,"vb_replica_curr_items":139244059},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23618203648,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.187:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.83588317107093,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23062269952},"interestingStats":{"cmd_get":33.03303303303304,"couch_docs_actual_disk_size":74422029546,"couch_docs_data_size":38758172837,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140194321,"curr_items_tot":279445526,"ep_bg_fetched":0,"get_hits":21.02102102102102,"mem_used":36527676832,"ops":1088.088088088088,"vb_replica_curr_items":139251205},"uptime":"343971","memoryTotal":64424656896,"memoryFree":23062269952,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"stats":{"uri":"/pools/default/buckets/blastro-df/stats","directoryURI":"/pools/default/buckets/blastro-df/statsDirectory","nodeStatsListURI":"/pools/default/buckets/blastro-df/nodes"},"ddocs":{"uri":"/pools/default/buckets/blastro-df/ddocs"},"nodeLocator":"vbucket","fastWarmupSettings":false,"autoCompactionSettings":false,"uuid":"2e6b9dc4c278300ce3a4f27ad540323f","vBucketServerMap":{"hashAlgorithm":"CRC","numReplicas":1,"serverList":["172.16.10.187:11210","172.16.10.65:11210","172.16.13.105:11210","172.16.13.173:11210","172.16.15.120:11210","172.16.8.127:11210","172.16.8.148:11210"],"vBucketMap":[[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,6],[0,6],[0,6],[0,6],[0,6],[1,3],[1,3],[1,3],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[2,3],[2,3],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[2,5],[2,5],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,6],[3,6],[3,6],[3,6],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[5,3],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[0,3],[0,3],[0,3],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,5],[4,5],[4,5],[4,5],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[2,6],[2,6],[3,2],[3,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[2,0],[2,0],[2,0],[2,0],[2,0],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,2],[4,3],[4,3],[4,3],[4,5],[4,5],[4,5],[4,5],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[5,4],[5,4],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[6,5],[6,5],[6,5],[6,5],[6,5],[4,0],[4,0],[4,0],[4,0],[4,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,6],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[4,6],[4,6],[4,6],[4,6],[4,6],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[5,0],[5,0],[5,0],[2,0],[2,0],[3,0],[3,0],[3,0],[5,3],[5,3],[5,3],[5,3],[5,3],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,5],[4,5],[1,0],[3,0],[3,1],[3,1],[3,1],[3,1],[5,4],[5,4],[5,4],[5,4],[5,4],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[5,6],[5,6],[5,6],[6,2],[6,2],[6,3],[6,3],[6,3],[4,0],[4,0],[4,0],[4,0],[4,0],[4,1],[4,1],[4,1],[5,6],[5,6],[5,6],[5,6],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[0,5],[0,5],[0,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,1],[0,1],[4,6],[4,6],[4,6],[4,6],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,6],[2,0],[2,0],[5,2],[5,3],[5,3],[5,3],[5,3],[5,1],[5,1],[5,1],[5,1],[5,1],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,1],[4,1],[4,1],[5,3],[5,3],[5,3],[5,3],[5,3],[2,0],[5,2],[5,2],[5,2],[5,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,2],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,1],[4,1],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,1],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,2],[0,2],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[0,2],[0,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,1],[4,1],[4,2],[4,2],[4,2],[6,3],[6,3],[6,3],[6,3],[6,3],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,2],[6,2],[6,2],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,1],[2,3],[2,3],[1,2],[1,2],[1,2],[1,3],[1,3],[1,3],[1,3],[1,3],[3,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[3,1],[3,1],[3,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[6,3],[6,3],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[5,1],[5,1],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[5,2],[6,2],[6,2],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,3],[1,3],[1,3],[6,2],[6,2],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[1,3],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,5],[6,5],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,2]]},"replicaNumber":1,"threadsNumber":3,"quota":{"ram":293601280000,"rawRAM":41943040000},"basicStats":{"quotaPercentUsed":68.85424936294555,"opsPerSec":5686.789686789687,"diskFetches":0,"itemCount":943239752,"diskUsed":409178772321,"dataUsed":212179309111,"memUsed":202156957464},"evictionPolicy":"valueOnly","bucketCapabilitiesVer":"","bucketCapabilities":["cbhello","touch","couchapi","cccp","xdcrCheckpointing","nodesExt"]}` From 2ae9316f48f3bae4637bcdd0ca81a7f3669145ea Mon Sep 17 00:00:00 2001 From: Vebjorn Ljosa Date: Fri, 18 Mar 2016 18:40:46 -0400 Subject: [PATCH 185/287] Add examples in documentation for couchbase input plugin --- plugins/inputs/couchbase/README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/couchbase/README.md b/plugins/inputs/couchbase/README.md index fc142e3b2..1e7516285 100644 --- a/plugins/inputs/couchbase/README.md +++ b/plugins/inputs/couchbase/README.md @@ -26,8 +26,8 @@ Meta: - tags: `cluster`, `hostname` Measurement names: -- memory_free -- memory_total +- memory_free (example: 23181365248.0) +- memory_total (example: 64424656896.0) ### Per-bucket measurements @@ -36,11 +36,11 @@ Meta: - tags: `cluster`, `bucket` Measurement names: -- quotaPercentUsed (unit: percent) -- opsPerSec (unit: count) -- diskFetches (unit: count) -- itemCount (unit: count) -- diskUsed (unit: bytes) -- dataUsed (unit: bytes) -- memUsed (unit: bytes) +- quotaPercentUsed (unit: percent, example: 68.85424936294555) +- opsPerSec (unit: count, example: 5686.789686789687) +- diskFetches (unit: count, example: 0.0) +- itemCount (unit: count, example: 943239752.0) +- diskUsed (unit: bytes, example: 409178772321.0) +- dataUsed (unit: bytes, example: 212179309111.0) +- memUsed (unit: bytes, example: 202156957464.0) From 077fa2e6b95a3bd8089c4e90cb3e4038ffd1369e Mon Sep 17 00:00:00 2001 From: Vebjorn Ljosa Date: Sat, 19 Mar 2016 05:56:57 -0400 Subject: [PATCH 186/287] Improve README for couchabase input plugin Proper terminology and case. Exmaples for tags. Example output. --- plugins/inputs/couchbase/README.md | 57 +++++++++++++++++++----------- 1 file changed, 37 insertions(+), 20 deletions(-) diff --git a/plugins/inputs/couchbase/README.md b/plugins/inputs/couchbase/README.md index 1e7516285..6d654a0e2 100644 --- a/plugins/inputs/couchbase/README.md +++ b/plugins/inputs/couchbase/README.md @@ -1,6 +1,6 @@ # Telegraf Plugin: Couchbase -### Configuration: +## Configuration: ``` # Read per-node and per-bucket metrics from Couchbase @@ -19,28 +19,45 @@ ## Measurements: -### Per-node measurements +### couchbase_node -Meta: -- units: bytes -- tags: `cluster`, `hostname` +Tags: +- cluster: whatever you called it in `servers` in the configuration, e.g.: `http://couchbase-0.example.com/` +- hostname: Couchbase's name for the node and port, e.g., `172.16.10.187:8091` -Measurement names: -- memory_free (example: 23181365248.0) -- memory_total (example: 64424656896.0) +Fields: +- memory_free (unit: bytes, example: 23181365248.0) +- memory_total (unit: bytes, example: 64424656896.0) -### Per-bucket measurements +### couchbase_bucket -Meta: -- units: varies -- tags: `cluster`, `bucket` +Tags: +- cluster: whatever you called it in `servers` in the configuration, e.g.: `http://couchbase-0.example.com/`) +- bucket: the name of the couchbase bucket, e.g., `blastro-df` -Measurement names: -- quotaPercentUsed (unit: percent, example: 68.85424936294555) -- opsPerSec (unit: count, example: 5686.789686789687) -- diskFetches (unit: count, example: 0.0) -- itemCount (unit: count, example: 943239752.0) -- diskUsed (unit: bytes, example: 409178772321.0) -- dataUsed (unit: bytes, example: 212179309111.0) -- memUsed (unit: bytes, example: 202156957464.0) +Fields: +- quota_percent_used (unit: percent, example: 68.85424936294555) +- ops_per_sec (unit: count, example: 5686.789686789687) +- disk_fetches (unit: count, example: 0.0) +- item_count (unit: count, example: 943239752.0) +- disk_used (unit: bytes, example: 409178772321.0) +- data_used (unit: bytes, example: 212179309111.0) +- mem_used (unit: bytes, example: 202156957464.0) + +## Example output + +``` +$ telegraf -config telegraf.conf -input-filter couchbase -test +* Plugin: couchbase, Collection 1 +> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.10.187:8091 memory_free=22927384576,memory_total=64424656896 1458381183695864929 +> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.10.65:8091 memory_free=23520161792,memory_total=64424656896 1458381183695972112 +> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.13.105:8091 memory_free=23531704320,memory_total=64424656896 1458381183695995259 +> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.13.173:8091 memory_free=23628767232,memory_total=64424656896 1458381183696010870 +> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.15.120:8091 memory_free=23616692224,memory_total=64424656896 1458381183696027406 +> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.8.127:8091 memory_free=23431770112,memory_total=64424656896 1458381183696041040 +> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.8.148:8091 memory_free=23811371008,memory_total=64424656896 1458381183696059060 +> couchbase_bucket,bucket=default,cluster=https://couchbase-0.example.com/ data_used=25743360,disk_fetches=0,disk_used=31744886,item_count=0,mem_used=77729224,ops_per_sec=0,quota_percent_used=10.58976636614118 1458381183696210074 +> couchbase_bucket,bucket=demoncat,cluster=https://couchbase-0.example.com/ data_used=38157584951,disk_fetches=0,disk_used=62730302441,item_count=14662532,mem_used=24015304256,ops_per_sec=1207.753207753208,quota_percent_used=79.87855353525707 1458381183696242695 +> couchbase_bucket,bucket=blastro-df,cluster=https://couchbase-0.example.com/ data_used=212552491622,disk_fetches=0,disk_used=413323157621,item_count=944655680,mem_used=202421103760,ops_per_sec=1692.176692176692,quota_percent_used=68.9442170551845 1458381183696272206 +``` From 9de4a8efcfa63b082bc5d466e41d6ba09a3b0447 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 21 Mar 2016 12:12:23 -0600 Subject: [PATCH 187/287] Update readme, changelog for couchbase plugin closes #866 closes #482 --- CHANGELOG.md | 5 +++-- README.md | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ed574daf3..1b7e6fd16 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,10 +8,11 @@ - [#882](https://github.com/influxdata/telegraf/pull/882): Fixed SQL Server Plugin issues - [#849](https://github.com/influxdata/telegraf/issues/849): Adding ability to parse single values as an input data type. - [#844](https://github.com/influxdata/telegraf/pull/844): postgres_extensible plugin added. Thanks @menardorama! +- [#866](https://github.com/influxdata/telegraf/pull/866): couchbase input plugin. Thanks @ljosa! ### Bugfixes -[#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided. -[#884](https://github.com/influxdata/telegraf/issues/884): Do not call write method if there are 0 metrics to write. +- [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided. +- [#884](https://github.com/influxdata/telegraf/issues/884): Do not call write method if there are 0 metrics to write. ## v0.11.1 [2016-03-17] diff --git a/README.md b/README.md index 70da27fd8..2e9239d9a 100644 --- a/README.md +++ b/README.md @@ -184,6 +184,7 @@ Currently implemented sources: * aerospike * apache * bcache +* couchbase * couchdb * disque * dns query time From 20b4e8c779ef81caee1dd25fd974dc81b4271b0d Mon Sep 17 00:00:00 2001 From: "Chris H (CruftMaster)" Date: Thu, 3 Mar 2016 17:26:14 +0000 Subject: [PATCH 188/287] GREEDY field templates for the graphite parser, and support for multiple specific field names closes #789 --- CHANGELOG.md | 1 + docs/DATA_FORMATS_INPUT.md | 21 ++++++-- plugins/parsers/graphite/parser.go | 27 +++++++--- plugins/parsers/graphite/parser_test.go | 65 ++++++++++++++++++++++--- 4 files changed, 99 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b7e6fd16..0332a4eda 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ - [#849](https://github.com/influxdata/telegraf/issues/849): Adding ability to parse single values as an input data type. - [#844](https://github.com/influxdata/telegraf/pull/844): postgres_extensible plugin added. Thanks @menardorama! - [#866](https://github.com/influxdata/telegraf/pull/866): couchbase input plugin. Thanks @ljosa! +- [#789](https://github.com/influxdata/telegraf/pull/789): Support multiple field specification and `field*` in graphite templates. Thanks @chrusty! ### Bugfixes - [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided. diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index 12c4d4cde..fd8ef8538 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -220,17 +220,32 @@ So the following template: ```toml templates = [ - "measurement.measurement.field.region" + "measurement.measurement.field.field.region" ] ``` would result in the following Graphite -> Telegraf transformation. ``` -cpu.usage.idle.us-west 100 -=> cpu_usage,region=us-west idle=100 +cpu.usage.idle.percent.us-west 100 +=> cpu_usage,region=us-west idle_percent=100 ``` +The field key can also be derived from the second "half" of the input metric-name by specifying ```field*```: +```toml +templates = [ + "measurement.measurement.region.field*" +] +``` + +would result in the following Graphite -> Telegraf transformation. + +``` +cpu.usage.us-west.idle.percentage 100 +=> cpu_usage,region=us-west idle_percentage=100 +``` +(This cannot be used in conjunction with "measurement*"!) + #### Filter Templates: Users can also filter the template(s) to use based on the name of the bucket, diff --git a/plugins/parsers/graphite/parser.go b/plugins/parsers/graphite/parser.go index 5e8815064..8c31cd760 100644 --- a/plugins/parsers/graphite/parser.go +++ b/plugins/parsers/graphite/parser.go @@ -231,6 +231,7 @@ func (p *GraphiteParser) ApplyTemplate(line string) (string, map[string]string, type template struct { tags []string defaultTags map[string]string + greedyField bool greedyMeasurement bool separator string } @@ -248,6 +249,8 @@ func NewTemplate(pattern string, defaultTags map[string]string, separator string } if tag == "measurement*" { template.greedyMeasurement = true + } else if tag == "field*" { + template.greedyField = true } } @@ -265,7 +268,7 @@ func (t *template) Apply(line string) (string, map[string]string, string, error) var ( measurement []string tags = make(map[string]string) - field string + field []string ) // Set any default tags @@ -273,6 +276,18 @@ func (t *template) Apply(line string) (string, map[string]string, string, error) tags[k] = v } + // See if an invalid combination has been specified in the template: + for _, tag := range t.tags { + if tag == "measurement*" { + t.greedyMeasurement = true + } else if tag == "field*" { + t.greedyField = true + } + } + if t.greedyField && t.greedyMeasurement { + return "", nil, "", fmt.Errorf("either 'field*' or 'measurement*' can be used in each template (but not both together): %q", strings.Join(t.tags, t.separator)) + } + for i, tag := range t.tags { if i >= len(fields) { continue @@ -281,10 +296,10 @@ func (t *template) Apply(line string) (string, map[string]string, string, error) if tag == "measurement" { measurement = append(measurement, fields[i]) } else if tag == "field" { - if len(field) != 0 { - return "", nil, "", fmt.Errorf("'field' can only be used once in each template: %q", line) - } - field = fields[i] + field = append(field, fields[i]) + } else if tag == "field*" { + field = append(field, fields[i:]...) + break } else if tag == "measurement*" { measurement = append(measurement, fields[i:]...) break @@ -293,7 +308,7 @@ func (t *template) Apply(line string) (string, map[string]string, string, error) } } - return strings.Join(measurement, t.separator), tags, field, nil + return strings.Join(measurement, t.separator), tags, strings.Join(field, t.separator), nil } // matcher determines which template should be applied to a given metric diff --git a/plugins/parsers/graphite/parser_test.go b/plugins/parsers/graphite/parser_test.go index ccf478c7a..5200cfbdd 100644 --- a/plugins/parsers/graphite/parser_test.go +++ b/plugins/parsers/graphite/parser_test.go @@ -94,6 +94,20 @@ func TestTemplateApply(t *testing.T) { measurement: "cpu.load", tags: map[string]string{"zone": "us-west"}, }, + { + test: "conjoined fields", + input: "prod.us-west.server01.cpu.util.idle.percent", + template: "env.zone.host.measurement.measurement.field*", + measurement: "cpu.util", + tags: map[string]string{"env": "prod", "zone": "us-west", "host": "server01"}, + }, + { + test: "multiple fields", + input: "prod.us-west.server01.cpu.util.idle.percent.free", + template: "env.zone.host.measurement.measurement.field.field.reading", + measurement: "cpu.util", + tags: map[string]string{"env": "prod", "zone": "us-west", "host": "server01", "reading": "free"}, + }, } for _, test := range tests { @@ -187,6 +201,12 @@ func TestParse(t *testing.T) { template: "measurement", err: `field "cpu" time: strconv.ParseFloat: parsing "14199724z57825": invalid syntax`, }, + { + test: "measurement* and field* (invalid)", + input: `prod.us-west.server01.cpu.util.idle.percent 99.99 1419972457825`, + template: "env.zone.host.measurement*.field*", + err: `either 'field*' or 'measurement*' can be used in each template (but not both together): "env.zone.host.measurement*.field*"`, + }, } for _, test := range tests { @@ -574,15 +594,48 @@ func TestApplyTemplateField(t *testing.T) { } } -func TestApplyTemplateFieldError(t *testing.T) { +func TestApplyTemplateMultipleFieldsTogether(t *testing.T) { p, err := NewGraphiteParser("_", - []string{"current.* measurement.field.field"}, nil) + []string{"current.* measurement.measurement.field.field"}, nil) assert.NoError(t, err) - _, _, _, err = p.ApplyTemplate("current.users.logged_in") - if err == nil { - t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s", err, - "'field' can only be used once in each template: current.users.logged_in") + measurement, _, field, err := p.ApplyTemplate("current.users.logged_in.ssh") + + assert.Equal(t, "current_users", measurement) + + if field != "logged_in_ssh" { + t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s", + field, "logged_in_ssh") + } +} + +func TestApplyTemplateMultipleFieldsApart(t *testing.T) { + p, err := NewGraphiteParser("_", + []string{"current.* measurement.measurement.field.method.field"}, nil) + assert.NoError(t, err) + + measurement, _, field, err := p.ApplyTemplate("current.users.logged_in.ssh.total") + + assert.Equal(t, "current_users", measurement) + + if field != "logged_in_total" { + t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s", + field, "logged_in_total") + } +} + +func TestApplyTemplateGreedyField(t *testing.T) { + p, err := NewGraphiteParser("_", + []string{"current.* measurement.measurement.field*"}, nil) + assert.NoError(t, err) + + measurement, _, field, err := p.ApplyTemplate("current.users.logged_in") + + assert.Equal(t, "current_users", measurement) + + if field != "logged_in" { + t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s", + field, "logged_in") } } From 8f09aadfdf792f8827f548fc6d4e8af2b791cc7b Mon Sep 17 00:00:00 2001 From: Thibault Cohen Date: Wed, 24 Feb 2016 23:32:22 -0500 Subject: [PATCH 189/287] Add nagios parser for exec input plugin closes #762 --- CHANGELOG.md | 1 + README.md | 2 +- docs/DATA_FORMATS_INPUT.md | 24 ++++++ plugins/inputs/exec/exec.go | 42 +++++++++-- plugins/inputs/exec/exec_test.go | 3 +- plugins/parsers/nagios/parser.go | 102 ++++++++++++++++++++++++++ plugins/parsers/nagios/parser_test.go | 89 ++++++++++++++++++++++ plugins/parsers/registry.go | 9 ++- 8 files changed, 264 insertions(+), 8 deletions(-) create mode 100644 plugins/parsers/nagios/parser.go create mode 100644 plugins/parsers/nagios/parser_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 0332a4eda..44c969bf5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ - [#844](https://github.com/influxdata/telegraf/pull/844): postgres_extensible plugin added. Thanks @menardorama! - [#866](https://github.com/influxdata/telegraf/pull/866): couchbase input plugin. Thanks @ljosa! - [#789](https://github.com/influxdata/telegraf/pull/789): Support multiple field specification and `field*` in graphite templates. Thanks @chrusty! +- [#762](https://github.com/influxdata/telegraf/pull/762): Nagios parser for the exec plugin. Thanks @titilambert! ### Bugfixes - [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided. diff --git a/README.md b/README.md index 2e9239d9a..9f3a19ed9 100644 --- a/README.md +++ b/README.md @@ -191,7 +191,7 @@ Currently implemented sources: * docker * dovecot * elasticsearch -* exec (generic executable plugin, support JSON, influx and graphite) +* exec (generic executable plugin, support JSON, influx, graphite and nagios) * haproxy * httpjson (generic JSON-emitting http service plugin) * influxdb diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index fd8ef8538..589db53a3 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -326,3 +326,27 @@ There are many more options available, "measurement*" ] ``` + +## Nagios: + +There are no additional configuration options for Nagios line-protocol. The +metrics are parsed directly into Telegraf metrics. + +Note: Nagios Input Data Formats is only supported in `exec` input plugin. + +#### Nagios Configuration: + +```toml +[[inputs.exec]] + ## Commands array + commands = ["/usr/lib/nagios/plugins/check_load", "-w 5,6,7 -c 7,8,9"] + + ## measurement name suffix (for separating different commands) + name_suffix = "_mycollector" + + ## Data format to consume. This can be "json", "influx", "graphite" or "nagios" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "nagios" +``` diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index 5231fd013..9fd9491ca 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -5,12 +5,14 @@ import ( "fmt" "os/exec" "sync" + "syscall" "github.com/gonuts/go-shellquote" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/nagios" ) const sampleConfig = ` @@ -20,7 +22,7 @@ const sampleConfig = ` ## measurement name suffix (for separating different commands) name_suffix = "_mycollector" - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. This can be "json", "influx", "graphite" or "nagios ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md @@ -46,12 +48,32 @@ func NewExec() *Exec { } type Runner interface { - Run(*Exec, string) ([]byte, error) + Run(*Exec, string, telegraf.Accumulator) ([]byte, error) } type CommandRunner struct{} -func (c CommandRunner) Run(e *Exec, command string) ([]byte, error) { +func AddNagiosState(exitCode error, acc telegraf.Accumulator) error { + nagiosState := 0 + if exitCode != nil { + exiterr, ok := exitCode.(*exec.ExitError) + if ok { + status, ok := exiterr.Sys().(syscall.WaitStatus) + if ok { + nagiosState = status.ExitStatus() + } else { + return fmt.Errorf("exec: unable to get nagios plugin exit code") + } + } else { + return fmt.Errorf("exec: unable to get nagios plugin exit code") + } + } + fields := map[string]interface{}{"state": nagiosState} + acc.AddFields("nagios_state", fields, nil) + return nil +} + +func (c CommandRunner) Run(e *Exec, command string, acc telegraf.Accumulator) ([]byte, error) { split_cmd, err := shellquote.Split(command) if err != nil || len(split_cmd) == 0 { return nil, fmt.Errorf("exec: unable to parse command, %s", err) @@ -63,7 +85,17 @@ func (c CommandRunner) Run(e *Exec, command string) ([]byte, error) { cmd.Stdout = &out if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("exec: %s for command '%s'", err, command) + switch e.parser.(type) { + case *nagios.NagiosParser: + AddNagiosState(err, acc) + default: + return nil, fmt.Errorf("exec: %s for command '%s'", err, command) + } + } else { + switch e.parser.(type) { + case *nagios.NagiosParser: + AddNagiosState(nil, acc) + } } return out.Bytes(), nil @@ -72,7 +104,7 @@ func (c CommandRunner) Run(e *Exec, command string) ([]byte, error) { func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator) { defer e.wg.Done() - out, err := e.runner.Run(e, command) + out, err := e.runner.Run(e, command, acc) if err != nil { e.errChan <- err return diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index da55ef9d3..9c75857cf 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" @@ -57,7 +58,7 @@ func newRunnerMock(out []byte, err error) Runner { } } -func (r runnerMock) Run(e *Exec, command string) ([]byte, error) { +func (r runnerMock) Run(e *Exec, command string, acc telegraf.Accumulator) ([]byte, error) { if r.err != nil { return nil, r.err } diff --git a/plugins/parsers/nagios/parser.go b/plugins/parsers/nagios/parser.go new file mode 100644 index 000000000..305c3af11 --- /dev/null +++ b/plugins/parsers/nagios/parser.go @@ -0,0 +1,102 @@ +package nagios + +import ( + "regexp" + "strings" + "time" + + "github.com/influxdata/telegraf" +) + +type NagiosParser struct { + MetricName string + DefaultTags map[string]string +} + +// Got from Alignak +// https://github.com/Alignak-monitoring/alignak/blob/develop/alignak/misc/perfdata.py +var perfSplitRegExp, _ = regexp.Compile(`([^=]+=\S+)`) +var nagiosRegExp, _ = regexp.Compile(`^([^=]+)=([\d\.\-\+eE]+)([\w\/%]*);?([\d\.\-\+eE:~@]+)?;?([\d\.\-\+eE:~@]+)?;?([\d\.\-\+eE]+)?;?([\d\.\-\+eE]+)?;?\s*`) + +func (p *NagiosParser) ParseLine(line string) (telegraf.Metric, error) { + metrics, err := p.Parse([]byte(line)) + return metrics[0], err +} + +func (p *NagiosParser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} + +//> rta,host=absol,unit=ms critical=6000,min=0,value=0.332,warning=4000 1456374625003628099 +//> pl,host=absol,unit=% critical=90,min=0,value=0,warning=80 1456374625003693967 + +func (p *NagiosParser) Parse(buf []byte) ([]telegraf.Metric, error) { + metrics := make([]telegraf.Metric, 0) + // Convert to string + out := string(buf) + // Prepare output for splitting + // Delete escaped pipes + out = strings.Replace(out, `\|`, "___PROTECT_PIPE___", -1) + // Split lines and get the first one + lines := strings.Split(out, "\n") + // Split output and perfdatas + data_splitted := strings.Split(lines[0], "|") + if len(data_splitted) <= 1 { + // No pipe == no perf data + return nil, nil + } + // Get perfdatas + perfdatas := data_splitted[1] + // Add escaped pipes + perfdatas = strings.Replace(perfdatas, "___PROTECT_PIPE___", `\|`, -1) + // Split perfs + unParsedPerfs := perfSplitRegExp.FindAllSubmatch([]byte(perfdatas), -1) + // Iterate on all perfs + for _, unParsedPerfs := range unParsedPerfs { + // Get metrics + // Trim perf + trimedPerf := strings.Trim(string(unParsedPerfs[0]), " ") + // Parse perf + perf := nagiosRegExp.FindAllSubmatch([]byte(trimedPerf), -1) + // Bad string + if len(perf) == 0 { + continue + } + if len(perf[0]) <= 2 { + continue + } + if perf[0][1] == nil || perf[0][2] == nil { + continue + } + fieldName := string(perf[0][1]) + tags := make(map[string]string) + if perf[0][3] != nil { + tags["unit"] = string(perf[0][3]) + } + fields := make(map[string]interface{}) + fields["value"] = perf[0][2] + // TODO should we set empty field + // if metric if there is no data ? + if perf[0][4] != nil { + fields["warning"] = perf[0][4] + } + if perf[0][5] != nil { + fields["critical"] = perf[0][5] + } + if perf[0][6] != nil { + fields["min"] = perf[0][6] + } + if perf[0][7] != nil { + fields["max"] = perf[0][7] + } + // Create metric + metric, err := telegraf.NewMetric(fieldName, tags, fields, time.Now().UTC()) + if err != nil { + return nil, err + } + // Add Metric + metrics = append(metrics, metric) + } + + return metrics, nil +} diff --git a/plugins/parsers/nagios/parser_test.go b/plugins/parsers/nagios/parser_test.go new file mode 100644 index 000000000..49502a021 --- /dev/null +++ b/plugins/parsers/nagios/parser_test.go @@ -0,0 +1,89 @@ +package nagios + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const validOutput1 = `PING OK - Packet loss = 0%, RTA = 0.30 ms|rta=0.298000ms;4000.000000;6000.000000;0.000000 pl=0%;80;90;0;100 +This is a long output +with three lines +` +const validOutput2 = "TCP OK - 0.008 second response time on port 80|time=0.008457s;;;0.000000;10.000000" +const validOutput3 = "TCP OK - 0.008 second response time on port 80|time=0.008457" +const invalidOutput3 = "PING OK - Packet loss = 0%, RTA = 0.30 ms" +const invalidOutput4 = "PING OK - Packet loss = 0%, RTA = 0.30 ms| =3;;;; dgasdg =;;;; sff=;;;;" + +func TestParseValidOutput(t *testing.T) { + parser := NagiosParser{ + MetricName: "nagios_test", + } + + // Output1 + metrics, err := parser.Parse([]byte(validOutput1)) + require.NoError(t, err) + assert.Len(t, metrics, 2) + // rta + assert.Equal(t, "rta", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "value": float64(0.298), + "warning": float64(4000), + "critical": float64(6000), + "min": float64(0), + }, metrics[0].Fields()) + assert.Equal(t, map[string]string{"unit": "ms"}, metrics[0].Tags()) + // pl + assert.Equal(t, "pl", metrics[1].Name()) + assert.Equal(t, map[string]interface{}{ + "value": float64(0), + "warning": float64(80), + "critical": float64(90), + "min": float64(0), + "max": float64(100), + }, metrics[1].Fields()) + assert.Equal(t, map[string]string{"unit": "%"}, metrics[1].Tags()) + + // Output2 + metrics, err = parser.Parse([]byte(validOutput2)) + require.NoError(t, err) + assert.Len(t, metrics, 1) + // time + assert.Equal(t, "time", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "value": float64(0.008457), + "min": float64(0), + "max": float64(10), + }, metrics[0].Fields()) + assert.Equal(t, map[string]string{"unit": "s"}, metrics[0].Tags()) + + // Output3 + metrics, err = parser.Parse([]byte(validOutput3)) + require.NoError(t, err) + assert.Len(t, metrics, 1) + // time + assert.Equal(t, "time", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "value": float64(0.008457), + }, metrics[0].Fields()) + assert.Equal(t, map[string]string{}, metrics[0].Tags()) + +} + +func TestParseInvalidOutput(t *testing.T) { + parser := NagiosParser{ + MetricName: "nagios_test", + } + + // invalidOutput3 + metrics, err := parser.Parse([]byte(invalidOutput3)) + require.NoError(t, err) + assert.Len(t, metrics, 0) + + // invalidOutput4 + metrics, err = parser.Parse([]byte(invalidOutput4)) + require.NoError(t, err) + assert.Len(t, metrics, 0) + +} diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index b86b61c18..360d795bc 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -8,6 +8,7 @@ import ( "github.com/influxdata/telegraf/plugins/parsers/graphite" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/parsers/json" + "github.com/influxdata/telegraf/plugins/parsers/nagios" "github.com/influxdata/telegraf/plugins/parsers/value" ) @@ -39,7 +40,7 @@ type Parser interface { // Config is a struct that covers the data types needed for all parser types, // and can be used to instantiate _any_ of the parsers. type Config struct { - // Dataformat can be one of: json, influx, graphite, value + // Dataformat can be one of: json, influx, graphite, value, nagios DataFormat string // Separator only applied to Graphite data. @@ -72,6 +73,8 @@ func NewParser(config *Config) (Parser, error) { config.DataType, config.DefaultTags) case "influx": parser, err = NewInfluxParser() + case "nagios": + parser, err = NewNagiosParser() case "graphite": parser, err = NewGraphiteParser(config.Separator, config.Templates, config.DefaultTags) @@ -94,6 +97,10 @@ func NewJSONParser( return parser, nil } +func NewNagiosParser() (Parser, error) { + return &nagios.NagiosParser{}, nil +} + func NewInfluxParser() (Parser, error) { return &influx.InfluxParser{}, nil } From 31c323c09753af4b1dc47582e1a432108cf05f36 Mon Sep 17 00:00:00 2001 From: david birdsong Date: Fri, 4 Mar 2016 20:05:10 +0000 Subject: [PATCH 190/287] fix prometheus output if i understand the prometheus data model correctly, the current output for this plugin is unusable prometheus only accepts a single value per measurement. prior to this change, the range loop causes a measurement to end up w/ a random value for instance: net,dc=sjc1,grp_dashboard=1,grp_home=1,grp_hwy_fetcher=1,grp_web_admin=1,host=sjc1-b4-8,hw=app,interface=docker0,state=live bytes_recv=477596i,bytes_sent=152963303i,drop_in=0i,drop_out=0i,err_in=0i,err_out=0i,packets_recv=7231i,packets_sent=11460i 1457121990003778992 this 'net' measurent would have all it's tags copied to prometheus labels, but any of 152963303, or 0, or 7231 as a value for 'net' depending on which field is last in the map iteration this change expands the fields into new measurements by appending the field name to the influxdb measurement name. ie, the above example results with 'net' dropped and new measurements to take it's place: net_bytes_recv net_bytes_sent net_drop_in net_err_in net_packets_recv net_packets_sent i hope this can be merged, i love telegraf's composability of tags and filtering --- .../prometheus_client/prometheus_client.go | 35 ++++++++++--------- .../prometheus_client_test.go | 8 ++--- 2 files changed, 22 insertions(+), 21 deletions(-) diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index df546c192..50a9224cd 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -73,42 +73,43 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { } } - if _, ok := p.metrics[key]; !ok { - p.metrics[key] = prometheus.NewUntypedVec( - prometheus.UntypedOpts{ - Name: key, - Help: fmt.Sprintf("Telegraf collected point '%s'", key), - }, - labels, - ) - prometheus.MustRegister(p.metrics[key]) - } - l := prometheus.Labels{} for tk, tv := range point.Tags() { l[tk] = tv } - for _, val := range point.Fields() { + for n, val := range point.Fields() { + mname := fmt.Sprintf("%s_%s", key, n) + if _, ok := p.metrics[mname]; !ok { + p.metrics[mname] = prometheus.NewUntypedVec( + prometheus.UntypedOpts{ + Name: mname, + Help: fmt.Sprintf("Telegraf collected point '%s'", mname), + }, + labels, + ) + prometheus.MustRegister(p.metrics[mname]) + } + switch val := val.(type) { default: log.Printf("Prometheus output, unsupported type. key: %s, type: %T\n", - key, val) + mname, val) case int64: - m, err := p.metrics[key].GetMetricWith(l) + m, err := p.metrics[mname].GetMetricWith(l) if err != nil { log.Printf("ERROR Getting metric in Prometheus output, "+ "key: %s, labels: %v,\nerr: %s\n", - key, l, err.Error()) + mname, l, err.Error()) continue } m.Set(float64(val)) case float64: - m, err := p.metrics[key].GetMetricWith(l) + m, err := p.metrics[mname].GetMetricWith(l) if err != nil { log.Printf("ERROR Getting metric in Prometheus output, "+ "key: %s, labels: %v,\nerr: %s\n", - key, l, err.Error()) + mname, l, err.Error()) continue } m.Set(val) diff --git a/plugins/outputs/prometheus_client/prometheus_client_test.go b/plugins/outputs/prometheus_client/prometheus_client_test.go index 15ed7b7e4..13b6beff5 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_test.go @@ -46,8 +46,8 @@ func TestPrometheusWritePointEmptyTag(t *testing.T) { value float64 tags map[string]string }{ - {"test_point_1", 0.0, tags}, - {"test_point_2", 1.0, tags}, + {"test_point_1_value", 0.0, tags}, + {"test_point_2_value", 1.0, tags}, } var acc testutil.Accumulator @@ -78,8 +78,8 @@ func TestPrometheusWritePointEmptyTag(t *testing.T) { name string value float64 }{ - {"test_point_3", 0.0}, - {"test_point_4", 1.0}, + {"test_point_3_value", 0.0}, + {"test_point_4_value", 1.0}, } require.NoError(t, p.Gather(&acc)) From d09bb13cb6961ac28e2aaf169407e0d9cd407892 Mon Sep 17 00:00:00 2001 From: david birdsong Date: Tue, 8 Mar 2016 18:33:57 +0000 Subject: [PATCH 191/287] special case 'value' it usually connotes a single value type metric, appending just clutters closes #793 --- plugins/outputs/prometheus_client/prometheus_client.go | 7 ++++++- .../outputs/prometheus_client/prometheus_client_test.go | 8 ++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index 50a9224cd..f13fe726c 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -79,7 +79,12 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { } for n, val := range point.Fields() { - mname := fmt.Sprintf("%s_%s", key, n) + var mname string + if n == "value" { + mname = key + } else { + mname = fmt.Sprintf("%s_%s", key, n) + } if _, ok := p.metrics[mname]; !ok { p.metrics[mname] = prometheus.NewUntypedVec( prometheus.UntypedOpts{ diff --git a/plugins/outputs/prometheus_client/prometheus_client_test.go b/plugins/outputs/prometheus_client/prometheus_client_test.go index 13b6beff5..15ed7b7e4 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_test.go @@ -46,8 +46,8 @@ func TestPrometheusWritePointEmptyTag(t *testing.T) { value float64 tags map[string]string }{ - {"test_point_1_value", 0.0, tags}, - {"test_point_2_value", 1.0, tags}, + {"test_point_1", 0.0, tags}, + {"test_point_2", 1.0, tags}, } var acc testutil.Accumulator @@ -78,8 +78,8 @@ func TestPrometheusWritePointEmptyTag(t *testing.T) { name string value float64 }{ - {"test_point_3_value", 0.0}, - {"test_point_4_value", 1.0}, + {"test_point_3", 0.0}, + {"test_point_4", 1.0}, } require.NoError(t, p.Gather(&acc)) From fbe1664214eb1ecfdded4a85ba29c1fbeac23af3 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 21 Mar 2016 14:30:59 -0600 Subject: [PATCH 192/287] README cleanup and update --- README.md | 33 ++++----------------------------- 1 file changed, 4 insertions(+), 29 deletions(-) diff --git a/README.md b/README.md index 9f3a19ed9..440af37cf 100644 --- a/README.md +++ b/README.md @@ -17,13 +17,6 @@ new plugins. ## Installation: -NOTE: Telegraf 0.10.x is **not** backwards-compatible with previous versions -of telegraf, both in the database layout and the configuration file. 0.2.x -will continue to be supported, see below for download links. - -For more details on the differences between Telegraf 0.2.x and 0.10.x, see -the [release blog post](https://influxdata.com/blog/announcing-telegraf-0-10-0/). - ### Linux deb and rpm Packages: Latest: @@ -34,10 +27,6 @@ Latest (arm): * http://get.influxdb.org/telegraf/telegraf_0.11.1-1_armhf.deb * http://get.influxdb.org/telegraf/telegraf-0.11.1-1.armhf.rpm -0.2.x: -* http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb -* http://get.influxdb.org/telegraf/telegraf-0.2.4-1.x86_64.rpm - ##### Package Instructions: * Telegraf binary is installed in `/usr/bin/telegraf` @@ -50,8 +39,9 @@ controlled via `systemctl [action] telegraf` ### yum/apt Repositories: There is a yum/apt repo available for the whole InfluxData stack, see -[here](https://docs.influxdata.com/influxdb/v0.9/introduction/installation/#installation) -for instructions, replacing the `influxdb` package name with `telegraf`. +[here](https://docs.influxdata.com/influxdb/v0.10/introduction/installation/#installation) +for instructions on setting up the repo. Once it is configured, you will be able +to use this repo to install & update telegraf. ### Linux tarballs: @@ -60,11 +50,6 @@ Latest: * http://get.influxdb.org/telegraf/telegraf-0.11.1-1_linux_i386.tar.gz * http://get.influxdb.org/telegraf/telegraf-0.11.1-1_linux_armhf.tar.gz -0.2.x: -* http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz -* http://get.influxdb.org/telegraf/telegraf_linux_386_0.2.4.tar.gz -* http://get.influxdb.org/telegraf/telegraf_linux_arm_0.2.4.tar.gz - ##### tarball Instructions: To install the full directory structure with config file, run: @@ -86,17 +71,7 @@ Latest: ##### tarball Instructions: -To install the full directory structure with config file, run: - -``` -sudo tar -C / -zxvf ./telegraf-0.11.1-1_freebsd_amd64.tar.gz -``` - -To extract only the binary, run: - -``` -tar -zxvf telegraf-0.11.1-1_freebsd_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf -``` +See linux instructions above. ### Ansible Role: From 5917a429974ae67ef92d11e70170b07aeddf7c3f Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 21 Mar 2016 14:35:58 -0600 Subject: [PATCH 193/287] influxdb output: quote the database name closes #898 --- CHANGELOG.md | 1 + plugins/outputs/influxdb/influxdb.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 44c969bf5..be3b75a77 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ ### Bugfixes - [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided. - [#884](https://github.com/influxdata/telegraf/issues/884): Do not call write method if there are 0 metrics to write. +- [#898](https://github.com/influxdata/telegraf/issues/898): Put database name in quotes, fixes special characters in the database name. ## v0.11.1 [2016-03-17] diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index d72a07754..626635a3b 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -127,7 +127,7 @@ func (i *InfluxDB) Connect() error { // Create Database if it doesn't exist _, e := c.Query(client.Query{ - Command: fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s", i.Database), + Command: fmt.Sprintf("CREATE DATABASE IF NOT EXISTS \"%s\"", i.Database), }) if e != nil { From f543dbb42f23a43f594bff88de879b8761206ce3 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 21 Mar 2016 15:33:19 -0600 Subject: [PATCH 194/287] Allow users to tell telegraf Agent not to include host tag closes #848 --- CHANGELOG.md | 1 + agent/agent.go | 16 +++++++++------- agent/agent_test.go | 11 ++++++++++- internal/config/config.go | 7 +++++-- 4 files changed, 25 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index be3b75a77..60dbb2b33 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ - [#866](https://github.com/influxdata/telegraf/pull/866): couchbase input plugin. Thanks @ljosa! - [#789](https://github.com/influxdata/telegraf/pull/789): Support multiple field specification and `field*` in graphite templates. Thanks @chrusty! - [#762](https://github.com/influxdata/telegraf/pull/762): Nagios parser for the exec plugin. Thanks @titilambert! +- [#848](https://github.com/influxdata/telegraf/issues/848): Provide option to omit host tag from telegraf agent. ### Bugfixes - [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided. diff --git a/agent/agent.go b/agent/agent.go index 8a8800cc2..fdd17a267 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -27,17 +27,19 @@ func NewAgent(config *config.Config) (*Agent, error) { Config: config, } - if a.Config.Agent.Hostname == "" { - hostname, err := os.Hostname() - if err != nil { - return nil, err + if !a.Config.Agent.OmitHostname { + if a.Config.Agent.Hostname == "" { + hostname, err := os.Hostname() + if err != nil { + return nil, err + } + + a.Config.Agent.Hostname = hostname } - a.Config.Agent.Hostname = hostname + config.Tags["host"] = a.Config.Agent.Hostname } - config.Tags["host"] = a.Config.Agent.Hostname - return a, nil } diff --git a/agent/agent_test.go b/agent/agent_test.go index 8bf8a150b..adbde9a13 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -1,7 +1,6 @@ package agent import ( - "github.com/stretchr/testify/assert" "testing" "time" @@ -11,8 +10,18 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/all" // needing to load the outputs _ "github.com/influxdata/telegraf/plugins/outputs/all" + + "github.com/stretchr/testify/assert" ) +func TestAgent_OmitHostname(t *testing.T) { + c := config.NewConfig() + c.Agent.OmitHostname = true + _, err := NewAgent(c) + assert.NoError(t, err) + assert.NotContains(t, c.Tags, "host") +} + func TestAgent_LoadPlugin(t *testing.T) { c := config.NewConfig() c.InputFilters = []string{"mysql"} diff --git a/internal/config/config.go b/internal/config/config.go index 6990b2db7..b15c5e651 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -97,8 +97,9 @@ type AgentConfig struct { Debug bool // Quiet is the option for running in quiet mode - Quiet bool - Hostname string + Quiet bool + Hostname string + OmitHostname bool } // Inputs returns a list of strings of the configured inputs. @@ -183,6 +184,8 @@ var header = `# Telegraf Configuration quiet = false ## Override default hostname, if empty use os.Hostname() hostname = "" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = false # From 4f5f6761f3b57d32d7c010e019eb09d2a7a58f04 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 21 Mar 2016 15:59:31 -0600 Subject: [PATCH 195/287] Update gopsutil dependency closes #656 --- CHANGELOG.md | 1 + Godeps | 6 +---- circle.yml | 6 ++--- plugins/inputs/couchbase/couchbase_test.go | 30 +++++++++++++++++----- plugins/inputs/zfs/zfs_test.go | 14 +++++----- 5 files changed, 35 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 60dbb2b33..d400b139c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ - [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided. - [#884](https://github.com/influxdata/telegraf/issues/884): Do not call write method if there are 0 metrics to write. - [#898](https://github.com/influxdata/telegraf/issues/898): Put database name in quotes, fixes special characters in the database name. +- [#656](https://github.com/influxdata/telegraf/issues/656): No longer run `lsof` on linux to get netstat data, fixes permissions issue. ## v0.11.1 [2016-03-17] diff --git a/Godeps b/Godeps index cbd46c524..75cb813ba 100644 --- a/Godeps +++ b/Godeps @@ -13,7 +13,6 @@ github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3 github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367 github.com/fsouza/go-dockerclient a49c8269a6899cae30da1f8a4b82e0ce945f9967 -github.com/go-ini/ini 776aa739ce9373377cd16f526cdf06cb4c89b40f github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032 github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7 @@ -24,7 +23,6 @@ github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478 github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da github.com/influxdata/influxdb e3fef5593c21644f2b43af55d6e17e70910b0e48 github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0 -github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74 github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720 github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36 github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453 @@ -34,16 +32,14 @@ github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3 github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980 -github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2 github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831 github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37 github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8 github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f -github.com/shirou/gopsutil 1de1357e7737a536c7f4ff6be7bd27977db4d2cb +github.com/shirou/gopsutil 1f32ce1bb380845be7f5d174ac641a2c592c0c42 github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744 -github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94 github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3 github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8 diff --git a/circle.yml b/circle.yml index 8fd255a78..e7b711f9d 100644 --- a/circle.yml +++ b/circle.yml @@ -4,9 +4,9 @@ machine: post: - sudo service zookeeper stop - go version - - go version | grep 1.5.3 || sudo rm -rf /usr/local/go - - wget https://storage.googleapis.com/golang/go1.5.3.linux-amd64.tar.gz - - sudo tar -C /usr/local -xzf go1.5.3.linux-amd64.tar.gz + - go version | grep 1.6 || sudo rm -rf /usr/local/go + - wget https://storage.googleapis.com/golang/go1.6.linux-amd64.tar.gz + - sudo tar -C /usr/local -xzf go1.6.linux-amd64.tar.gz - go version dependencies: diff --git a/plugins/inputs/couchbase/couchbase_test.go b/plugins/inputs/couchbase/couchbase_test.go index c7e692002..8fda04d41 100644 --- a/plugins/inputs/couchbase/couchbase_test.go +++ b/plugins/inputs/couchbase/couchbase_test.go @@ -12,23 +12,39 @@ func TestGatherServer(t *testing.T) { if err := json.Unmarshal([]byte(poolsDefaultResponse), &pool); err != nil { t.Fatal("parse poolsDefaultResponse", err) } + var bucket couchbase.Bucket if err := json.Unmarshal([]byte(bucketResponse), &bucket); err != nil { t.Fatal("parse bucketResponse", err) } - pool.BucketMap = make(map[string]couchbase.Bucket) - pool.BucketMap[bucket.Name] = bucket + pool.BucketMap = map[string]couchbase.Bucket{ + bucket.Name: bucket, + } var cb Couchbase var acc testutil.Accumulator cb.gatherServer("mycluster", &acc, &pool) - acc.AssertContainsTaggedFields(t, "couchbase_node", map[string]interface{}{"memory_free": 23181365248.0, "memory_total": 64424656896.0}, map[string]string{"cluster": "mycluster", "hostname": "172.16.10.187:8091"}) - acc.AssertContainsTaggedFields(t, "couchbase_node", map[string]interface{}{"memory_free": 23665811456.0, "memory_total": 64424656896.0}, map[string]string{"cluster": "mycluster", "hostname": "172.16.10.65:8091"}) - acc.AssertContainsTaggedFields(t, "couchbase_bucket", map[string]interface{}{"quota_percent_used": 68.85424936294555, "ops_per_sec": 5686.789686789687, "disk_fetches": 0.0, "item_count": 943239752.0, "disk_used": 409178772321.0, "data_used": 212179309111.0, "mem_used": 202156957464.0}, map[string]string{"cluster": "mycluster", "bucket": "blastro-df"}) + acc.AssertContainsTaggedFields(t, "couchbase_node", + map[string]interface{}{"memory_free": 23181365248.0, "memory_total": 64424656896.0}, + map[string]string{"cluster": "mycluster", "hostname": "172.16.10.187:8091"}) + acc.AssertContainsTaggedFields(t, "couchbase_node", + map[string]interface{}{"memory_free": 23665811456.0, "memory_total": 64424656896.0}, + map[string]string{"cluster": "mycluster", "hostname": "172.16.10.65:8091"}) + acc.AssertContainsTaggedFields(t, "couchbase_bucket", + map[string]interface{}{ + "quota_percent_used": 68.85424936294555, + "ops_per_sec": 5686.789686789687, + "disk_fetches": 0.0, + "item_count": 943239752.0, + "disk_used": 409178772321.0, + "data_used": 212179309111.0, + "mem_used": 202156957464.0, + }, + map[string]string{"cluster": "mycluster", "bucket": "blastro-df"}) } // From `/pools/default` on a real cluster -var poolsDefaultResponse string = `{"storageTotals":{"ram":{"total":450972598272,"quotaTotal":360777252864,"quotaUsed":360777252864,"used":446826622976,"usedByData":255061495696,"quotaUsedPerNode":51539607552,"quotaTotalPerNode":51539607552},"hdd":{"total":1108766539776,"quotaTotal":1108766539776,"used":559135126484,"usedByData":515767865143,"free":498944942902}},"serverGroupsUri":"/pools/default/serverGroups?v=98656394","name":"default","alerts":["Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.148\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.65\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.173\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.75\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.105\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.127\" is taken up by keys and metadata.","Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.120\" is taken up by keys and metadata.","Metadata overhead warning. Over 66% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.187\" is taken up by keys and metadata."],"alertsSilenceURL":"/controller/resetAlerts?token=2814&uuid=2bec87861652b990cf6aa5c7ee58c253","nodes":[{"systemStats":{"cpu_utilization_rate":35.43307086614173,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23181365248},"interestingStats":{"cmd_get":17.98201798201798,"couch_docs_actual_disk_size":68506048063,"couch_docs_data_size":38718796110,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140158886,"curr_items_tot":279374646,"ep_bg_fetched":0.999000999000999,"get_hits":10.98901098901099,"mem_used":36497390640,"ops":829.1708291708292,"vb_replica_curr_items":139215760},"uptime":"341236","memoryTotal":64424656896,"memoryFree":23181365248,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.187:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.38255033557047,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23665811456},"interestingStats":{"cmd_get":172.8271728271728,"couch_docs_actual_disk_size":79360565405,"couch_docs_data_size":38736382876,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140174377,"curr_items_tot":279383025,"ep_bg_fetched":0.999000999000999,"get_hits":167.8321678321678,"mem_used":36650059656,"ops":1685.314685314685,"vb_replica_curr_items":139208648},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23665811456,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.65:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":25.5586592178771,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23726600192},"interestingStats":{"cmd_get":63.06306306306306,"couch_docs_actual_disk_size":79345105217,"couch_docs_data_size":38728086130,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139195268,"curr_items_tot":279349113,"ep_bg_fetched":0,"get_hits":53.05305305305306,"mem_used":36476665576,"ops":1878.878878878879,"vb_replica_curr_items":140153845},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23726600192,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.105:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":26.45803698435277,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23854841856},"interestingStats":{"cmd_get":51.05105105105105,"couch_docs_actual_disk_size":74465931949,"couch_docs_data_size":38723830730,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139209869,"curr_items_tot":279380019,"ep_bg_fetched":0,"get_hits":47.04704704704704,"mem_used":36471784896,"ops":1831.831831831832,"vb_replica_curr_items":140170150},"uptime":"340526","memoryTotal":64424656896,"memoryFree":23854841856,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.173:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.31034482758621,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23773573120},"interestingStats":{"cmd_get":77.07707707707708,"couch_docs_actual_disk_size":74743093945,"couch_docs_data_size":38594660087,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139215932,"curr_items_tot":278427644,"ep_bg_fetched":0,"get_hits":53.05305305305305,"mem_used":36306500344,"ops":1981.981981981982,"vb_replica_curr_items":139211712},"uptime":"340495","memoryTotal":64424656896,"memoryFree":23773573120,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.15.120:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":17.60660247592847,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23662190592},"interestingStats":{"cmd_get":146.8531468531468,"couch_docs_actual_disk_size":72932847344,"couch_docs_data_size":38581771457,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139226879,"curr_items_tot":278436540,"ep_bg_fetched":0,"get_hits":144.8551448551448,"mem_used":36421860496,"ops":1495.504495504495,"vb_replica_curr_items":139209661},"uptime":"337174","memoryTotal":64424656896,"memoryFree":23662190592,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.127:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":21.68831168831169,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":24049729536},"interestingStats":{"cmd_get":11.98801198801199,"couch_docs_actual_disk_size":66414273220,"couch_docs_data_size":38587642702,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139193759,"curr_items_tot":278398926,"ep_bg_fetched":0,"get_hits":9.990009990009991,"mem_used":36237234088,"ops":883.1168831168832,"vb_replica_curr_items":139205167},"uptime":"341228","memoryTotal":64424656896,"memoryFree":24049729536,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.148:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"buckets":{"uri":"/pools/default/buckets?v=74117050&uuid=2bec87861652b990cf6aa5c7ee58c253","terseBucketsBase":"/pools/default/b/","terseStreamingBucketsBase":"/pools/default/bs/"},"remoteClusters":{"uri":"/pools/default/remoteClusters?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/pools/default/remoteClusters?just_validate=1"},"controllers":{"addNode":{"uri":"/controller/addNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"rebalance":{"uri":"/controller/rebalance?uuid=2bec87861652b990cf6aa5c7ee58c253"},"failOver":{"uri":"/controller/failOver?uuid=2bec87861652b990cf6aa5c7ee58c253"},"startGracefulFailover":{"uri":"/controller/startGracefulFailover?uuid=2bec87861652b990cf6aa5c7ee58c253"},"reAddNode":{"uri":"/controller/reAddNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"reFailOver":{"uri":"/controller/reFailOver?uuid=2bec87861652b990cf6aa5c7ee58c253"},"ejectNode":{"uri":"/controller/ejectNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"setRecoveryType":{"uri":"/controller/setRecoveryType?uuid=2bec87861652b990cf6aa5c7ee58c253"},"setAutoCompaction":{"uri":"/controller/setAutoCompaction?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/setAutoCompaction?just_validate=1"},"clusterLogsCollection":{"startURI":"/controller/startLogsCollection?uuid=2bec87861652b990cf6aa5c7ee58c253","cancelURI":"/controller/cancelLogsCollection?uuid=2bec87861652b990cf6aa5c7ee58c253"},"replication":{"createURI":"/controller/createReplication?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/createReplication?just_validate=1"},"setFastWarmup":{"uri":"/controller/setFastWarmup?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/setFastWarmup?just_validate=1"}},"rebalanceStatus":"none","rebalanceProgressUri":"/pools/default/rebalanceProgress","stopRebalanceUri":"/controller/stopRebalance?uuid=2bec87861652b990cf6aa5c7ee58c253","nodeStatusesUri":"/nodeStatuses","maxBucketCount":10,"autoCompactionSettings":{"parallelDBAndViewCompaction":false,"databaseFragmentationThreshold":{"percentage":50,"size":"undefined"},"viewFragmentationThreshold":{"percentage":50,"size":"undefined"}},"fastWarmupSettings":{"fastWarmupEnabled":true,"minMemoryThreshold":10,"minItemsThreshold":10},"tasks":{"uri":"/pools/default/tasks?v=97479372"},"visualSettingsUri":"/internalSettings/visual?v=7111573","counters":{"rebalance_success":4,"rebalance_start":6,"rebalance_stop":2}}` +const poolsDefaultResponse string = `{"storageTotals":{"ram":{"total":450972598272,"quotaTotal":360777252864,"quotaUsed":360777252864,"used":446826622976,"usedByData":255061495696,"quotaUsedPerNode":51539607552,"quotaTotalPerNode":51539607552},"hdd":{"total":1108766539776,"quotaTotal":1108766539776,"used":559135126484,"usedByData":515767865143,"free":498944942902}},"serverGroupsUri":"/pools/default/serverGroups?v=98656394","name":"default","alerts":["Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.148\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.65\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.173\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.75\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.105\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.127\" is taken up by keys and metadata.","Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.120\" is taken up by keys and metadata.","Metadata overhead warning. Over 66% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.187\" is taken up by keys and metadata."],"alertsSilenceURL":"/controller/resetAlerts?token=2814&uuid=2bec87861652b990cf6aa5c7ee58c253","nodes":[{"systemStats":{"cpu_utilization_rate":35.43307086614173,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23181365248},"interestingStats":{"cmd_get":17.98201798201798,"couch_docs_actual_disk_size":68506048063,"couch_docs_data_size":38718796110,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140158886,"curr_items_tot":279374646,"ep_bg_fetched":0.999000999000999,"get_hits":10.98901098901099,"mem_used":36497390640,"ops":829.1708291708292,"vb_replica_curr_items":139215760},"uptime":"341236","memoryTotal":64424656896,"memoryFree":23181365248,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.187:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.38255033557047,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23665811456},"interestingStats":{"cmd_get":172.8271728271728,"couch_docs_actual_disk_size":79360565405,"couch_docs_data_size":38736382876,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140174377,"curr_items_tot":279383025,"ep_bg_fetched":0.999000999000999,"get_hits":167.8321678321678,"mem_used":36650059656,"ops":1685.314685314685,"vb_replica_curr_items":139208648},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23665811456,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.65:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":25.5586592178771,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23726600192},"interestingStats":{"cmd_get":63.06306306306306,"couch_docs_actual_disk_size":79345105217,"couch_docs_data_size":38728086130,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139195268,"curr_items_tot":279349113,"ep_bg_fetched":0,"get_hits":53.05305305305306,"mem_used":36476665576,"ops":1878.878878878879,"vb_replica_curr_items":140153845},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23726600192,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.105:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":26.45803698435277,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23854841856},"interestingStats":{"cmd_get":51.05105105105105,"couch_docs_actual_disk_size":74465931949,"couch_docs_data_size":38723830730,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139209869,"curr_items_tot":279380019,"ep_bg_fetched":0,"get_hits":47.04704704704704,"mem_used":36471784896,"ops":1831.831831831832,"vb_replica_curr_items":140170150},"uptime":"340526","memoryTotal":64424656896,"memoryFree":23854841856,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.173:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.31034482758621,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23773573120},"interestingStats":{"cmd_get":77.07707707707708,"couch_docs_actual_disk_size":74743093945,"couch_docs_data_size":38594660087,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139215932,"curr_items_tot":278427644,"ep_bg_fetched":0,"get_hits":53.05305305305305,"mem_used":36306500344,"ops":1981.981981981982,"vb_replica_curr_items":139211712},"uptime":"340495","memoryTotal":64424656896,"memoryFree":23773573120,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.15.120:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":17.60660247592847,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23662190592},"interestingStats":{"cmd_get":146.8531468531468,"couch_docs_actual_disk_size":72932847344,"couch_docs_data_size":38581771457,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139226879,"curr_items_tot":278436540,"ep_bg_fetched":0,"get_hits":144.8551448551448,"mem_used":36421860496,"ops":1495.504495504495,"vb_replica_curr_items":139209661},"uptime":"337174","memoryTotal":64424656896,"memoryFree":23662190592,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.127:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":21.68831168831169,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":24049729536},"interestingStats":{"cmd_get":11.98801198801199,"couch_docs_actual_disk_size":66414273220,"couch_docs_data_size":38587642702,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139193759,"curr_items_tot":278398926,"ep_bg_fetched":0,"get_hits":9.990009990009991,"mem_used":36237234088,"ops":883.1168831168832,"vb_replica_curr_items":139205167},"uptime":"341228","memoryTotal":64424656896,"memoryFree":24049729536,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.148:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"buckets":{"uri":"/pools/default/buckets?v=74117050&uuid=2bec87861652b990cf6aa5c7ee58c253","terseBucketsBase":"/pools/default/b/","terseStreamingBucketsBase":"/pools/default/bs/"},"remoteClusters":{"uri":"/pools/default/remoteClusters?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/pools/default/remoteClusters?just_validate=1"},"controllers":{"addNode":{"uri":"/controller/addNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"rebalance":{"uri":"/controller/rebalance?uuid=2bec87861652b990cf6aa5c7ee58c253"},"failOver":{"uri":"/controller/failOver?uuid=2bec87861652b990cf6aa5c7ee58c253"},"startGracefulFailover":{"uri":"/controller/startGracefulFailover?uuid=2bec87861652b990cf6aa5c7ee58c253"},"reAddNode":{"uri":"/controller/reAddNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"reFailOver":{"uri":"/controller/reFailOver?uuid=2bec87861652b990cf6aa5c7ee58c253"},"ejectNode":{"uri":"/controller/ejectNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"setRecoveryType":{"uri":"/controller/setRecoveryType?uuid=2bec87861652b990cf6aa5c7ee58c253"},"setAutoCompaction":{"uri":"/controller/setAutoCompaction?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/setAutoCompaction?just_validate=1"},"clusterLogsCollection":{"startURI":"/controller/startLogsCollection?uuid=2bec87861652b990cf6aa5c7ee58c253","cancelURI":"/controller/cancelLogsCollection?uuid=2bec87861652b990cf6aa5c7ee58c253"},"replication":{"createURI":"/controller/createReplication?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/createReplication?just_validate=1"},"setFastWarmup":{"uri":"/controller/setFastWarmup?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/setFastWarmup?just_validate=1"}},"rebalanceStatus":"none","rebalanceProgressUri":"/pools/default/rebalanceProgress","stopRebalanceUri":"/controller/stopRebalance?uuid=2bec87861652b990cf6aa5c7ee58c253","nodeStatusesUri":"/nodeStatuses","maxBucketCount":10,"autoCompactionSettings":{"parallelDBAndViewCompaction":false,"databaseFragmentationThreshold":{"percentage":50,"size":"undefined"},"viewFragmentationThreshold":{"percentage":50,"size":"undefined"}},"fastWarmupSettings":{"fastWarmupEnabled":true,"minMemoryThreshold":10,"minItemsThreshold":10},"tasks":{"uri":"/pools/default/tasks?v=97479372"},"visualSettingsUri":"/internalSettings/visual?v=7111573","counters":{"rebalance_success":4,"rebalance_start":6,"rebalance_stop":2}}` // From `/pools/default/buckets/blastro-df` on a real cluster -var bucketResponse string = `{"name":"blastro-df","bucketType":"membase","authType":"sasl","saslPassword":"","proxyPort":0,"replicaIndex":false,"uri":"/pools/default/buckets/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","streamingUri":"/pools/default/bucketsStreaming/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","localRandomKeyUri":"/pools/default/buckets/blastro-df/localRandomKey","controllers":{"compactAll":"/pools/default/buckets/blastro-df/controller/compactBucket","compactDB":"/pools/default/buckets/default/controller/compactDatabases","purgeDeletes":"/pools/default/buckets/blastro-df/controller/unsafePurgeBucket","startRecovery":"/pools/default/buckets/blastro-df/controller/startRecovery"},"nodes":[{"couchApiBase":"http://172.16.8.148:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":18.39557399723375,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23791935488},"interestingStats":{"cmd_get":10.98901098901099,"couch_docs_actual_disk_size":79525832077,"couch_docs_data_size":38633186946,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139229304,"curr_items_tot":278470058,"ep_bg_fetched":0,"get_hits":5.994005994005994,"mem_used":36284362960,"ops":1275.724275724276,"vb_replica_curr_items":139240754},"uptime":"343968","memoryTotal":64424656896,"memoryFree":23791935488,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.8.127:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.97183098591549,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23533023232},"interestingStats":{"cmd_get":39.96003996003996,"couch_docs_actual_disk_size":63322357663,"couch_docs_data_size":38603481061,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139262616,"curr_items_tot":278508069,"ep_bg_fetched":0.999000999000999,"get_hits":30.96903096903097,"mem_used":36475078736,"ops":1370.629370629371,"vb_replica_curr_items":139245453},"uptime":"339914","memoryTotal":64424656896,"memoryFree":23533023232,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.15.120:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":23.38028169014084,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23672963072},"interestingStats":{"cmd_get":88.08808808808809,"couch_docs_actual_disk_size":80260594761,"couch_docs_data_size":38632863189,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139251563,"curr_items_tot":278498913,"ep_bg_fetched":0,"get_hits":74.07407407407408,"mem_used":36348663000,"ops":1707.707707707708,"vb_replica_curr_items":139247350},"uptime":"343235","memoryTotal":64424656896,"memoryFree":23672963072,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.173:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":22.15988779803646,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23818825728},"interestingStats":{"cmd_get":103.1031031031031,"couch_docs_actual_disk_size":68247785524,"couch_docs_data_size":38747583467,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139245453,"curr_items_tot":279451313,"ep_bg_fetched":1.001001001001001,"get_hits":86.08608608608608,"mem_used":36524715864,"ops":1749.74974974975,"vb_replica_curr_items":140205860},"uptime":"343266","memoryTotal":64424656896,"memoryFree":23818825728,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.105:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.94444444444444,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23721426944},"interestingStats":{"cmd_get":113.1131131131131,"couch_docs_actual_disk_size":68102832275,"couch_docs_data_size":38747477407,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139230887,"curr_items_tot":279420530,"ep_bg_fetched":0,"get_hits":106.1061061061061,"mem_used":36524887624,"ops":1799.7997997998,"vb_replica_curr_items":140189643},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23721426944,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.65:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":60.62176165803109,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23618203648},"interestingStats":{"cmd_get":30.96903096903097,"couch_docs_actual_disk_size":69052175561,"couch_docs_data_size":38755695030,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140210194,"curr_items_tot":279454253,"ep_bg_fetched":0,"get_hits":26.97302697302698,"mem_used":36543072472,"ops":1337.662337662338,"vb_replica_curr_items":139244059},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23618203648,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.187:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.83588317107093,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23062269952},"interestingStats":{"cmd_get":33.03303303303304,"couch_docs_actual_disk_size":74422029546,"couch_docs_data_size":38758172837,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140194321,"curr_items_tot":279445526,"ep_bg_fetched":0,"get_hits":21.02102102102102,"mem_used":36527676832,"ops":1088.088088088088,"vb_replica_curr_items":139251205},"uptime":"343971","memoryTotal":64424656896,"memoryFree":23062269952,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"stats":{"uri":"/pools/default/buckets/blastro-df/stats","directoryURI":"/pools/default/buckets/blastro-df/statsDirectory","nodeStatsListURI":"/pools/default/buckets/blastro-df/nodes"},"ddocs":{"uri":"/pools/default/buckets/blastro-df/ddocs"},"nodeLocator":"vbucket","fastWarmupSettings":false,"autoCompactionSettings":false,"uuid":"2e6b9dc4c278300ce3a4f27ad540323f","vBucketServerMap":{"hashAlgorithm":"CRC","numReplicas":1,"serverList":["172.16.10.187:11210","172.16.10.65:11210","172.16.13.105:11210","172.16.13.173:11210","172.16.15.120:11210","172.16.8.127:11210","172.16.8.148:11210"],"vBucketMap":[[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,6],[0,6],[0,6],[0,6],[0,6],[1,3],[1,3],[1,3],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[2,3],[2,3],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[2,5],[2,5],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,6],[3,6],[3,6],[3,6],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[5,3],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[0,3],[0,3],[0,3],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,5],[4,5],[4,5],[4,5],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[2,6],[2,6],[3,2],[3,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[2,0],[2,0],[2,0],[2,0],[2,0],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,2],[4,3],[4,3],[4,3],[4,5],[4,5],[4,5],[4,5],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[5,4],[5,4],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[6,5],[6,5],[6,5],[6,5],[6,5],[4,0],[4,0],[4,0],[4,0],[4,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,6],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[4,6],[4,6],[4,6],[4,6],[4,6],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[5,0],[5,0],[5,0],[2,0],[2,0],[3,0],[3,0],[3,0],[5,3],[5,3],[5,3],[5,3],[5,3],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,5],[4,5],[1,0],[3,0],[3,1],[3,1],[3,1],[3,1],[5,4],[5,4],[5,4],[5,4],[5,4],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[5,6],[5,6],[5,6],[6,2],[6,2],[6,3],[6,3],[6,3],[4,0],[4,0],[4,0],[4,0],[4,0],[4,1],[4,1],[4,1],[5,6],[5,6],[5,6],[5,6],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[0,5],[0,5],[0,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,1],[0,1],[4,6],[4,6],[4,6],[4,6],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,6],[2,0],[2,0],[5,2],[5,3],[5,3],[5,3],[5,3],[5,1],[5,1],[5,1],[5,1],[5,1],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,1],[4,1],[4,1],[5,3],[5,3],[5,3],[5,3],[5,3],[2,0],[5,2],[5,2],[5,2],[5,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,2],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,1],[4,1],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,1],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,2],[0,2],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[0,2],[0,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,1],[4,1],[4,2],[4,2],[4,2],[6,3],[6,3],[6,3],[6,3],[6,3],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,2],[6,2],[6,2],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,1],[2,3],[2,3],[1,2],[1,2],[1,2],[1,3],[1,3],[1,3],[1,3],[1,3],[3,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[3,1],[3,1],[3,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[6,3],[6,3],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[5,1],[5,1],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[5,2],[6,2],[6,2],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,3],[1,3],[1,3],[6,2],[6,2],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[1,3],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,5],[6,5],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,2]]},"replicaNumber":1,"threadsNumber":3,"quota":{"ram":293601280000,"rawRAM":41943040000},"basicStats":{"quotaPercentUsed":68.85424936294555,"opsPerSec":5686.789686789687,"diskFetches":0,"itemCount":943239752,"diskUsed":409178772321,"dataUsed":212179309111,"memUsed":202156957464},"evictionPolicy":"valueOnly","bucketCapabilitiesVer":"","bucketCapabilities":["cbhello","touch","couchapi","cccp","xdcrCheckpointing","nodesExt"]}` +const bucketResponse string = `{"name":"blastro-df","bucketType":"membase","authType":"sasl","saslPassword":"","proxyPort":0,"replicaIndex":false,"uri":"/pools/default/buckets/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","streamingUri":"/pools/default/bucketsStreaming/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","localRandomKeyUri":"/pools/default/buckets/blastro-df/localRandomKey","controllers":{"compactAll":"/pools/default/buckets/blastro-df/controller/compactBucket","compactDB":"/pools/default/buckets/default/controller/compactDatabases","purgeDeletes":"/pools/default/buckets/blastro-df/controller/unsafePurgeBucket","startRecovery":"/pools/default/buckets/blastro-df/controller/startRecovery"},"nodes":[{"couchApiBase":"http://172.16.8.148:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":18.39557399723375,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23791935488},"interestingStats":{"cmd_get":10.98901098901099,"couch_docs_actual_disk_size":79525832077,"couch_docs_data_size":38633186946,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139229304,"curr_items_tot":278470058,"ep_bg_fetched":0,"get_hits":5.994005994005994,"mem_used":36284362960,"ops":1275.724275724276,"vb_replica_curr_items":139240754},"uptime":"343968","memoryTotal":64424656896,"memoryFree":23791935488,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.8.127:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.97183098591549,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23533023232},"interestingStats":{"cmd_get":39.96003996003996,"couch_docs_actual_disk_size":63322357663,"couch_docs_data_size":38603481061,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139262616,"curr_items_tot":278508069,"ep_bg_fetched":0.999000999000999,"get_hits":30.96903096903097,"mem_used":36475078736,"ops":1370.629370629371,"vb_replica_curr_items":139245453},"uptime":"339914","memoryTotal":64424656896,"memoryFree":23533023232,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.15.120:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":23.38028169014084,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23672963072},"interestingStats":{"cmd_get":88.08808808808809,"couch_docs_actual_disk_size":80260594761,"couch_docs_data_size":38632863189,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139251563,"curr_items_tot":278498913,"ep_bg_fetched":0,"get_hits":74.07407407407408,"mem_used":36348663000,"ops":1707.707707707708,"vb_replica_curr_items":139247350},"uptime":"343235","memoryTotal":64424656896,"memoryFree":23672963072,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.173:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":22.15988779803646,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23818825728},"interestingStats":{"cmd_get":103.1031031031031,"couch_docs_actual_disk_size":68247785524,"couch_docs_data_size":38747583467,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139245453,"curr_items_tot":279451313,"ep_bg_fetched":1.001001001001001,"get_hits":86.08608608608608,"mem_used":36524715864,"ops":1749.74974974975,"vb_replica_curr_items":140205860},"uptime":"343266","memoryTotal":64424656896,"memoryFree":23818825728,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.105:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.94444444444444,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23721426944},"interestingStats":{"cmd_get":113.1131131131131,"couch_docs_actual_disk_size":68102832275,"couch_docs_data_size":38747477407,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139230887,"curr_items_tot":279420530,"ep_bg_fetched":0,"get_hits":106.1061061061061,"mem_used":36524887624,"ops":1799.7997997998,"vb_replica_curr_items":140189643},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23721426944,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.65:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":60.62176165803109,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23618203648},"interestingStats":{"cmd_get":30.96903096903097,"couch_docs_actual_disk_size":69052175561,"couch_docs_data_size":38755695030,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140210194,"curr_items_tot":279454253,"ep_bg_fetched":0,"get_hits":26.97302697302698,"mem_used":36543072472,"ops":1337.662337662338,"vb_replica_curr_items":139244059},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23618203648,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.187:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.83588317107093,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23062269952},"interestingStats":{"cmd_get":33.03303303303304,"couch_docs_actual_disk_size":74422029546,"couch_docs_data_size":38758172837,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140194321,"curr_items_tot":279445526,"ep_bg_fetched":0,"get_hits":21.02102102102102,"mem_used":36527676832,"ops":1088.088088088088,"vb_replica_curr_items":139251205},"uptime":"343971","memoryTotal":64424656896,"memoryFree":23062269952,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"stats":{"uri":"/pools/default/buckets/blastro-df/stats","directoryURI":"/pools/default/buckets/blastro-df/statsDirectory","nodeStatsListURI":"/pools/default/buckets/blastro-df/nodes"},"ddocs":{"uri":"/pools/default/buckets/blastro-df/ddocs"},"nodeLocator":"vbucket","fastWarmupSettings":false,"autoCompactionSettings":false,"uuid":"2e6b9dc4c278300ce3a4f27ad540323f","vBucketServerMap":{"hashAlgorithm":"CRC","numReplicas":1,"serverList":["172.16.10.187:11210","172.16.10.65:11210","172.16.13.105:11210","172.16.13.173:11210","172.16.15.120:11210","172.16.8.127:11210","172.16.8.148:11210"],"vBucketMap":[[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,6],[0,6],[0,6],[0,6],[0,6],[1,3],[1,3],[1,3],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[2,3],[2,3],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[2,5],[2,5],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,6],[3,6],[3,6],[3,6],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[5,3],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[0,3],[0,3],[0,3],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,5],[4,5],[4,5],[4,5],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[2,6],[2,6],[3,2],[3,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[2,0],[2,0],[2,0],[2,0],[2,0],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,2],[4,3],[4,3],[4,3],[4,5],[4,5],[4,5],[4,5],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[5,4],[5,4],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[6,5],[6,5],[6,5],[6,5],[6,5],[4,0],[4,0],[4,0],[4,0],[4,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,6],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[4,6],[4,6],[4,6],[4,6],[4,6],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[5,0],[5,0],[5,0],[2,0],[2,0],[3,0],[3,0],[3,0],[5,3],[5,3],[5,3],[5,3],[5,3],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,5],[4,5],[1,0],[3,0],[3,1],[3,1],[3,1],[3,1],[5,4],[5,4],[5,4],[5,4],[5,4],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[5,6],[5,6],[5,6],[6,2],[6,2],[6,3],[6,3],[6,3],[4,0],[4,0],[4,0],[4,0],[4,0],[4,1],[4,1],[4,1],[5,6],[5,6],[5,6],[5,6],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[0,5],[0,5],[0,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,1],[0,1],[4,6],[4,6],[4,6],[4,6],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,6],[2,0],[2,0],[5,2],[5,3],[5,3],[5,3],[5,3],[5,1],[5,1],[5,1],[5,1],[5,1],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,1],[4,1],[4,1],[5,3],[5,3],[5,3],[5,3],[5,3],[2,0],[5,2],[5,2],[5,2],[5,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,2],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,1],[4,1],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,1],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,2],[0,2],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[0,2],[0,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,1],[4,1],[4,2],[4,2],[4,2],[6,3],[6,3],[6,3],[6,3],[6,3],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,2],[6,2],[6,2],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,1],[2,3],[2,3],[1,2],[1,2],[1,2],[1,3],[1,3],[1,3],[1,3],[1,3],[3,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[3,1],[3,1],[3,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[6,3],[6,3],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[5,1],[5,1],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[5,2],[6,2],[6,2],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,3],[1,3],[1,3],[6,2],[6,2],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[1,3],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,5],[6,5],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,2]]},"replicaNumber":1,"threadsNumber":3,"quota":{"ram":293601280000,"rawRAM":41943040000},"basicStats":{"quotaPercentUsed":68.85424936294555,"opsPerSec":5686.789686789687,"diskFetches":0,"itemCount":943239752,"diskUsed":409178772321,"dataUsed":212179309111,"memUsed":202156957464},"evictionPolicy":"valueOnly","bucketCapabilitiesVer":"","bucketCapabilities":["cbhello","touch","couchapi","cccp","xdcrCheckpointing","nodesExt"]}` diff --git a/plugins/inputs/zfs/zfs_test.go b/plugins/inputs/zfs/zfs_test.go index 514bad3d4..03179ba59 100644 --- a/plugins/inputs/zfs/zfs_test.go +++ b/plugins/inputs/zfs/zfs_test.go @@ -212,22 +212,22 @@ func TestZfsGeneratesMetrics(t *testing.T) { } z = &Zfs{KstatPath: testKstatPath} - acc = testutil.Accumulator{} - err = z.Gather(&acc) + acc2 := testutil.Accumulator{} + err = z.Gather(&acc2) require.NoError(t, err) - acc.AssertContainsTaggedFields(t, "zfs", intMetrics, tags) - acc.Metrics = nil + acc2.AssertContainsTaggedFields(t, "zfs", intMetrics, tags) + acc2.Metrics = nil intMetrics = getKstatMetricsArcOnly() //two pools, one metric z = &Zfs{KstatPath: testKstatPath, KstatMetrics: []string{"arcstats"}} - acc = testutil.Accumulator{} - err = z.Gather(&acc) + acc3 := testutil.Accumulator{} + err = z.Gather(&acc3) require.NoError(t, err) - acc.AssertContainsTaggedFields(t, "zfs", intMetrics, tags) + acc3.AssertContainsTaggedFields(t, "zfs", intMetrics, tags) err = os.RemoveAll(os.TempDir() + "/telegraf") require.NoError(t, err) From 7f65ffcb1550de0eb0f570363bbb1ae512ea5621 Mon Sep 17 00:00:00 2001 From: Chris Goller Date: Mon, 21 Mar 2016 18:20:09 -0500 Subject: [PATCH 196/287] Add optional parameters to influxdb output README --- plugins/outputs/influxdb/README.md | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/influxdb/README.md b/plugins/outputs/influxdb/README.md index f9a8f7217..cfa960b37 100644 --- a/plugins/outputs/influxdb/README.md +++ b/plugins/outputs/influxdb/README.md @@ -2,7 +2,7 @@ This plugin writes to [InfluxDB](https://www.influxdb.com) via HTTP or UDP. -Required parameters: +### Required parameters: * `urls`: List of strings, this is for InfluxDB clustering support. On each flush interval, Telegraf will randomly choose one of the urls @@ -10,3 +10,17 @@ to write to. Each URL should start with either `http://` or `udp://` * `database`: The name of the database to write to. +### Optional parameters: + +* `retention_policy`: Retention policy to write to. +* `precision`: Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". note: using "s" precision greatly improves InfluxDB compression. +* `timeout`: Write timeout (for the InfluxDB client), formatted as a string. If not provided, will default to 5s. 0s means no timeout (not recommended). +* `username`: Username for influxdb +* `password`: Password for influxdb +* `user_agent`: Set the user agent for HTTP POSTs (can be useful for log differentiation) +* `udp_payload`: Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) + ## Optional SSL Config +* `ssl_ca`: SSL CA +* `ssl_cert`: SSL CERT +* `ssl_key`: SSL key +* `insecure_skip_verify`: Use SSL but skip chain & host verification (default: false) From 69606a45e0db6777d4b3620207577d946f82ddee Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 22 Mar 2016 10:34:33 -0600 Subject: [PATCH 197/287] Fix prometheus label names, and dont panic if invalid fixes #907 --- CHANGELOG.md | 1 + plugins/inputs/udp_listener/udp_listener.go | 2 - .../prometheus_client/prometheus_client.go | 46 +++++++++++++++---- 3 files changed, 37 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d400b139c..316a8a311 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ - [#884](https://github.com/influxdata/telegraf/issues/884): Do not call write method if there are 0 metrics to write. - [#898](https://github.com/influxdata/telegraf/issues/898): Put database name in quotes, fixes special characters in the database name. - [#656](https://github.com/influxdata/telegraf/issues/656): No longer run `lsof` on linux to get netstat data, fixes permissions issue. +- [#907](https://github.com/influxdata/telegraf/issues/907): Fix prometheus invalid label/measurement name key. ## v0.11.1 [2016-03-17] diff --git a/plugins/inputs/udp_listener/udp_listener.go b/plugins/inputs/udp_listener/udp_listener.go index 4b362c478..9b0a65d6f 100644 --- a/plugins/inputs/udp_listener/udp_listener.go +++ b/plugins/inputs/udp_listener/udp_listener.go @@ -90,8 +90,6 @@ func (u *UdpListener) Start(acc telegraf.Accumulator) error { } func (u *UdpListener) Stop() { - u.Lock() - defer u.Unlock() close(u.done) u.listener.Close() u.wg.Wait() diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index f13fe726c..79a838304 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -4,12 +4,26 @@ import ( "fmt" "log" "net/http" + "regexp" + "strings" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" "github.com/prometheus/client_golang/prometheus" ) +var ( + sanitizedChars = strings.NewReplacer("/", "_", "@", "_", " ", "_", "-", "_", ".", "_") + + // Prometheus metric names must match this regex + // see https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels + metricName = regexp.MustCompile("^[a-zA-Z_:][a-zA-Z0-9_:]*$") + + // Prometheus labels must match this regex + // see https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels + labelName = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") +) + type PrometheusClient struct { Listen string metrics map[string]*prometheus.UntypedVec @@ -64,27 +78,36 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { } for _, point := range metrics { - var labels []string key := point.Name() + key = sanitizedChars.Replace(key) - for k, _ := range point.Tags() { - if len(k) > 0 { - labels = append(labels, k) - } - } - + var labels []string l := prometheus.Labels{} - for tk, tv := range point.Tags() { - l[tk] = tv + for k, v := range point.Tags() { + k = sanitizedChars.Replace(k) + if len(k) == 0 { + continue + } + if !labelName.MatchString(k) { + continue + } + labels = append(labels, k) + l[k] = v } for n, val := range point.Fields() { + n = sanitizedChars.Replace(n) var mname string if n == "value" { mname = key } else { mname = fmt.Sprintf("%s_%s", key, n) } + + if !metricName.MatchString(mname) { + continue + } + if _, ok := p.metrics[mname]; !ok { p.metrics[mname] = prometheus.NewUntypedVec( prometheus.UntypedOpts{ @@ -93,7 +116,10 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { }, labels, ) - prometheus.MustRegister(p.metrics[mname]) + if err := prometheus.Register(p.metrics[mname]); err != nil { + log.Printf("prometheus_client: Metric failed to register with prometheus, %s", err) + continue + } } switch val := val.(type) { From 276e7629bdf96a4700e47c8beab13a273ddc56de Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 21 Mar 2016 16:22:01 -0600 Subject: [PATCH 198/287] memcached unix socket: fix panic. Do not recreate conn inside if closes #841 --- CHANGELOG.md | 1 + plugins/inputs/memcached/memcached.go | 9 +++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 316a8a311..8f605ee95 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ - [#898](https://github.com/influxdata/telegraf/issues/898): Put database name in quotes, fixes special characters in the database name. - [#656](https://github.com/influxdata/telegraf/issues/656): No longer run `lsof` on linux to get netstat data, fixes permissions issue. - [#907](https://github.com/influxdata/telegraf/issues/907): Fix prometheus invalid label/measurement name key. +- [#841](https://github.com/influxdata/telegraf/issues/841): Fix memcached unix socket panic. ## v0.11.1 [2016-03-17] diff --git a/plugins/inputs/memcached/memcached.go b/plugins/inputs/memcached/memcached.go index 24ff09d77..c631a1ed1 100644 --- a/plugins/inputs/memcached/memcached.go +++ b/plugins/inputs/memcached/memcached.go @@ -94,14 +94,15 @@ func (m *Memcached) gatherServer( acc telegraf.Accumulator, ) error { var conn net.Conn + var err error if unix { - conn, err := net.DialTimeout("unix", address, defaultTimeout) + conn, err = net.DialTimeout("unix", address, defaultTimeout) if err != nil { return err } defer conn.Close() } else { - _, _, err := net.SplitHostPort(address) + _, _, err = net.SplitHostPort(address) if err != nil { address = address + ":11211" } @@ -113,6 +114,10 @@ func (m *Memcached) gatherServer( defer conn.Close() } + if conn == nil { + return fmt.Errorf("Failed to create net connection") + } + // Extend connection conn.SetDeadline(time.Now().Add(defaultTimeout)) From 51d772425515fde4068b601a227f68150158b3c3 Mon Sep 17 00:00:00 2001 From: JP Date: Tue, 22 Mar 2016 10:07:01 -0500 Subject: [PATCH 199/287] add verifyValue func for datadog and librato, bail if no good closes #906 --- plugins/outputs/datadog/datadog.go | 11 +++++++++ plugins/outputs/datadog/datadog_test.go | 30 ++++++++++++++++++------- plugins/outputs/librato/librato.go | 11 +++++++++ plugins/outputs/librato/librato_test.go | 11 +++++---- 4 files changed, 49 insertions(+), 14 deletions(-) diff --git a/plugins/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go index 5d6fab165..56fdc38e4 100644 --- a/plugins/outputs/datadog/datadog.go +++ b/plugins/outputs/datadog/datadog.go @@ -139,6 +139,9 @@ func (d *Datadog) authenticatedUrl() string { func buildMetrics(m telegraf.Metric) (map[string]Point, error) { ms := make(map[string]Point) for k, v := range m.Fields() { + if !verifyValue(v) { + continue + } var p Point if err := p.setValue(v); err != nil { return ms, fmt.Errorf("unable to extract value from Fields, %s", err.Error()) @@ -160,6 +163,14 @@ func buildTags(mTags map[string]string) []string { return tags } +func verifyValue(v interface{}) bool { + switch v.(type) { + case string: + return false + } + return true +} + func (p *Point) setValue(v interface{}) error { switch d := v.(type) { case int: diff --git a/plugins/outputs/datadog/datadog_test.go b/plugins/outputs/datadog/datadog_test.go index 30495a044..2d3095be1 100644 --- a/plugins/outputs/datadog/datadog_test.go +++ b/plugins/outputs/datadog/datadog_test.go @@ -152,14 +152,6 @@ func TestBuildPoint(t *testing.T) { }, nil, }, - { - testutil.TestMetric("11234.5", "test7"), - Point{ - float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), - 11234.5, - }, - fmt.Errorf("unable to extract value from Fields, undeterminable type"), - }, } for _, tt := range tagtests { pt, err := buildMetrics(tt.ptIn) @@ -175,3 +167,25 @@ func TestBuildPoint(t *testing.T) { } } } + +func TestVerifyValue(t *testing.T) { + var tagtests = []struct { + ptIn telegraf.Metric + validMetric bool + }{ + { + testutil.TestMetric(float32(11234.5), "test1"), + true, + }, + { + testutil.TestMetric("11234.5", "test2"), + false, + }, + } + for _, tt := range tagtests { + ok := verifyValue(tt.ptIn.Fields()["value"]) + if tt.validMetric != ok { + t.Errorf("%s: verification failed\n", tt.ptIn.Name()) + } + } +} diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go index 910ac8b4e..f0f03400e 100644 --- a/plugins/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -165,6 +165,9 @@ func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) { Name: l.buildGaugeName(m, fieldName), MeasureTime: m.Time().Unix(), } + if !gauge.verifyValue(value) { + continue + } if err := gauge.setValue(value); err != nil { return gauges, fmt.Errorf("unable to extract value from Fields, %s\n", err.Error()) @@ -186,6 +189,14 @@ func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) { return gauges, nil } +func (g *Gauge) verifyValue(v interface{}) bool { + switch v.(type) { + case string: + return false + } + return true +} + func (g *Gauge) setValue(v interface{}) error { switch d := v.(type) { case int: diff --git a/plugins/outputs/librato/librato_test.go b/plugins/outputs/librato/librato_test.go index ae08793e0..3aa5b8748 100644 --- a/plugins/outputs/librato/librato_test.go +++ b/plugins/outputs/librato/librato_test.go @@ -139,12 +139,8 @@ func TestBuildGauge(t *testing.T) { }, { testutil.TestMetric("11234.5", "test7"), - &Gauge{ - Name: "value1.test7.value", - MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), - Value: 11234.5, - }, - fmt.Errorf("unable to extract value from Fields, undeterminable type"), + nil, + nil, }, } @@ -158,6 +154,9 @@ func TestBuildGauge(t *testing.T) { t.Errorf("%s: expected an error (%s) but none returned", gt.ptIn.Name(), gt.err.Error()) } + if len(gauges) != 0 && gt.outGauge == nil { + t.Errorf("%s: unexpected gauge, %+v\n", gt.ptIn.Name(), gt.outGauge) + } if len(gauges) == 0 { continue } From a95710ed0c9c57ea333d76de1e3e4b0d5b25723b Mon Sep 17 00:00:00 2001 From: Thibault Cohen Date: Mon, 21 Mar 2016 20:10:17 -0400 Subject: [PATCH 200/287] SNMP plugin fixes fixes #873 --- CHANGELOG.md | 1 + plugins/inputs/snmp/README.md | 8 ++++---- plugins/inputs/snmp/snmp.go | 10 ++++------ 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8f605ee95..a5e6aba19 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ - [#656](https://github.com/influxdata/telegraf/issues/656): No longer run `lsof` on linux to get netstat data, fixes permissions issue. - [#907](https://github.com/influxdata/telegraf/issues/907): Fix prometheus invalid label/measurement name key. - [#841](https://github.com/influxdata/telegraf/issues/841): Fix memcached unix socket panic. +- [#873](https://github.com/influxdata/telegraf/issues/873): Fix SNMP plugin sometimes not returning metrics. Thanks @titiliambert! ## v0.11.1 [2016-03-17] diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md index ee6d17857..bee783228 100644 --- a/plugins/inputs/snmp/README.md +++ b/plugins/inputs/snmp/README.md @@ -492,12 +492,12 @@ Note: the plugin will add instance name as tag *instance* # oid attribute is useless # SNMP SUBTABLES - [[plugins.snmp.subtable]] + [[inputs.snmp.subtable]] name = "bytes_recv" oid = ".1.3.6.1.2.1.31.1.1.1.6" unit = "octets" - [[plugins.snmp.subtable]] + [[inputs.snmp.subtable]] name = "bytes_send" oid = ".1.3.6.1.2.1.31.1.1.1.10" unit = "octets" @@ -505,10 +505,10 @@ Note: the plugin will add instance name as tag *instance* #### Configuration notes -- In **plugins.snmp.table** section, the `oid` attribute is useless if +- In **inputs.snmp.table** section, the `oid` attribute is useless if the `sub_tables` attributes is defined -- In **plugins.snmp.subtable** section, you can put a name from `snmptranslate_file` +- In **inputs.snmp.subtable** section, you can put a name from `snmptranslate_file` as `oid` attribute instead of a valid OID ### Measurements & Fields: diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index ba270cb1d..a56e53ff7 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -4,7 +4,6 @@ import ( "io/ioutil" "log" "net" - "regexp" "strconv" "strings" "time" @@ -308,11 +307,10 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { return err } else { for _, line := range strings.Split(string(data), "\n") { - oidsRegEx := regexp.MustCompile(`([^\t]*)\t*([^\t]*)`) - oids := oidsRegEx.FindStringSubmatch(string(line)) - if oids[2] != "" { - oid_name := oids[1] - oid := oids[2] + oids := strings.Fields(string(line)) + if len(oids) == 2 && oids[1] != "" { + oid_name := oids[0] + oid := oids[1] fillnode(s.initNode, oid_name, strings.Split(string(oid), ".")) s.nameToOid[oid_name] = oid } From 474d6db42ffc8459f5f380aacfe5fd689369959c Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 23 Mar 2016 08:57:05 -0600 Subject: [PATCH 201/287] Don't log every string metric that prometheus doesnt support --- .../prometheus_client/prometheus_client.go | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index 79a838304..d5e3f1ced 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -96,6 +96,15 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { } for n, val := range point.Fields() { + // Ignore string and bool fields. + switch val.(type) { + case string: + continue + case bool: + continue + } + + // sanitize the measurement name n = sanitizedChars.Replace(n) var mname string if n == "value" { @@ -104,15 +113,17 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { mname = fmt.Sprintf("%s_%s", key, n) } + // verify that it is a valid measurement name if !metricName.MatchString(mname) { continue } + // Create a new metric if it hasn't been created yet. if _, ok := p.metrics[mname]; !ok { p.metrics[mname] = prometheus.NewUntypedVec( prometheus.UntypedOpts{ Name: mname, - Help: fmt.Sprintf("Telegraf collected point '%s'", mname), + Help: "Telegraf collected metric", }, labels, ) @@ -123,9 +134,6 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { } switch val := val.(type) { - default: - log.Printf("Prometheus output, unsupported type. key: %s, type: %T\n", - mname, val) case int64: m, err := p.metrics[mname].GetMetricWith(l) if err != nil { @@ -144,6 +152,8 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { continue } m.Set(val) + default: + continue } } } From 59085f072a0b5317901d869374af5830cbacb040 Mon Sep 17 00:00:00 2001 From: Adam Argo Date: Thu, 24 Mar 2016 14:31:23 -0700 Subject: [PATCH 202/287] adds ability to parse datadog-formatted tags in the statsd input --- plugins/inputs/statsd/README.md | 3 ++ plugins/inputs/statsd/statsd.go | 50 ++++++++++++++++++ plugins/inputs/statsd/statsd_test.go | 78 ++++++++++++++++++++++++++++ 3 files changed, 131 insertions(+) diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md index 5bb18657c..5156f90df 100644 --- a/plugins/inputs/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -21,6 +21,9 @@ ## convert measurement names, "." to "_" and "-" to "__" convert_names = true + ## parses tags in the datadog statsd format + parse_data_dog_tags = false + ## Statsd data translation templates, more info can be read here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite # templates = [ diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 5e1e85667..b113faa6d 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -47,6 +47,10 @@ type Statsd struct { DeleteTimings bool ConvertNames bool + // This flag enables parsing of tags in the dogstatsd extention to the + // statsd protocol (http://docs.datadoghq.com/guides/dogstatsd/) + ParseDataDogTags bool + // UDPPacketSize is the size of the read packets for the server listening // for statsd UDP packets. This will default to 1500 bytes. UDPPacketSize int `toml:"udp_packet_size"` @@ -148,6 +152,9 @@ const sampleConfig = ` ## convert measurement names, "." to "_" and "-" to "__" convert_names = true + ## parses tags in the datadog statsd format + parse_data_dog_tags = false + ## Statsd data translation templates, more info can be read here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite # templates = [ @@ -318,6 +325,43 @@ func (s *Statsd) parseStatsdLine(line string) error { s.Lock() defer s.Unlock() + lineTags := make(map[string]string) + if s.ParseDataDogTags { + recombinedSegments := make([]string, 0) + // datadog tags look like this: + // users.online:1|c|@0.5|#country:china,environment:production + // users.online:1|c|#sometagwithnovalue + // we will split on the pipe and remove any elements that are datadog + // tags, parse them, and rebuild the line sans the datadog tags + pipesplit := strings.Split(line, "|") + for _, segment := range pipesplit { + if len(segment) > 0 && segment[0] == '#' { + // we have ourselves a tag; they are comma serated + tagstr := segment[1:] + tags := strings.Split(tagstr, ",") + for _, tag := range tags { + ts := strings.Split(tag, ":") + var k, v string + switch len(ts) { + case 1: + // just a tag + k = ts[0] + v = "" + case 2: + k = ts[0] + v = ts[1] + } + if k != "" { + lineTags[k] = v + } + } + } else { + recombinedSegments = append(recombinedSegments, segment) + } + } + line = strings.Join(recombinedSegments, "|") + } + // Validate splitting the line on ":" bits := strings.Split(line, ":") if len(bits) < 2 { @@ -415,6 +459,12 @@ func (s *Statsd) parseStatsdLine(line string) error { m.tags["metric_type"] = "histogram" } + if len(lineTags) > 0 { + for k, v := range lineTags { + m.tags[k] = v + } + } + // Make a unique key for the measurement name/tags var tg []string for k, v := range m.tags { diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index 3a87f00aa..5dffdc9cd 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -410,6 +410,84 @@ func TestParse_Tags(t *testing.T) { } } +// Test that DataDog tags are parsed +func TestParse_DataDogTags(t *testing.T) { + s := NewStatsd() + s.ParseDataDogTags = true + + lines := []string{ + "my_counter:1|c|#host:localhost,environment:prod", + "my_gauge:10.1|g|#live", + "my_set:1|s|#host:localhost", + "my_timer:3|ms|@0.1|#live,host:localhost", + } + + testTags := map[string]map[string]string{ + "my_counter": map[string]string{ + "host": "localhost", + "environment": "prod", + }, + + "my_gauge": map[string]string{ + "live": "", + }, + + "my_set": map[string]string{ + "host": "localhost", + }, + + "my_timer": map[string]string{ + "live": "", + "host": "localhost", + }, + } + + for _, line := range lines { + err := s.parseStatsdLine(line) + if err != nil { + t.Errorf("Parsing line %s should not have resulted in an error\n", line) + } + } + + sourceTags := map[string]map[string]string{ + "my_gauge": tagsForItem(s.gauges), + "my_counter": tagsForItem(s.counters), + "my_set": tagsForItem(s.sets), + "my_timer": tagsForItem(s.timings), + } + + for statName, tags := range testTags { + for k, v := range tags { + otherValue := sourceTags[statName][k] + if sourceTags[statName][k] != v { + t.Errorf("Error with %s, tag %s: %s != %s", statName, k, v, otherValue) + } + } + } +} + +func tagsForItem(m interface{}) map[string]string { + switch m.(type) { + case map[string]cachedcounter: + for _, v := range m.(map[string]cachedcounter) { + return v.tags + } + case map[string]cachedgauge: + for _, v := range m.(map[string]cachedgauge) { + return v.tags + } + case map[string]cachedset: + for _, v := range m.(map[string]cachedset) { + return v.tags + } + case map[string]cachedtimings: + for _, v := range m.(map[string]cachedtimings) { + return v.tags + } + } + return nil +} + // Test that statsd buckets are parsed to measurement names properly func TestParseName(t *testing.T) { s := NewStatsd() From e07c79259b5c6101cba75d2132a888f1b23b6819 Mon Sep 17 00:00:00 2001 From: Adam Argo Date: Thu, 24 Mar 2016 15:14:30 -0700 Subject: [PATCH 203/287] PR feedback changes closes #927 --- CHANGELOG.md | 1 + plugins/inputs/statsd/README.md | 4 +++- plugins/inputs/statsd/statsd.go | 5 +++-- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a5e6aba19..94d327fbd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ ## v0.11.2 [unreleased] ### Features +- [#927](https://github.com/influxdata/telegraf/pull/927): Adds parsing of tags to the statsd input when using DataDog's dogstatsd extension - [#863](https://github.com/influxdata/telegraf/pull/863): AMQP output: allow external auth. Thanks @ekini! - [#707](https://github.com/influxdata/telegraf/pull/707): Improved prometheus plugin. Thanks @titilambert! - [#878](https://github.com/influxdata/telegraf/pull/878): Added json serializer. Thanks @ch3lo! diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md index 5156f90df..78e5a5b9a 100644 --- a/plugins/inputs/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -21,7 +21,8 @@ ## convert measurement names, "." to "_" and "-" to "__" convert_names = true - ## parses tags in the datadog statsd format + ## Parses tags in DataDog's dogstatsd format + ## http://docs.datadoghq.com/guides/dogstatsd/ parse_data_dog_tags = false ## Statsd data translation templates, more info can be read here: @@ -158,6 +159,7 @@ per-measurement in the calculation of percentiles. Raising this limit increases the accuracy of percentiles but also increases the memory usage and cpu time. - **templates** []string: Templates for transforming statsd buckets into influx measurements and tags. +- **parse_data_dog_tags** boolean: Enable parsing of tags in DataDog's dogstatsd format (http://docs.datadoghq.com/guides/dogstatsd/) ### Statsd bucket -> InfluxDB line-protocol Templates diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index b113faa6d..0e7a911e1 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -152,7 +152,8 @@ const sampleConfig = ` ## convert measurement names, "." to "_" and "-" to "__" convert_names = true - ## parses tags in the datadog statsd format + ## Parses tags in the datadog statsd format + ## http://docs.datadoghq.com/guides/dogstatsd/ parse_data_dog_tags = false ## Statsd data translation templates, more info can be read here: @@ -336,7 +337,7 @@ func (s *Statsd) parseStatsdLine(line string) error { pipesplit := strings.Split(line, "|") for _, segment := range pipesplit { if len(segment) > 0 && segment[0] == '#' { - // we have ourselves a tag; they are comma serated + // we have ourselves a tag; they are comma separated tagstr := segment[1:] tags := strings.Split(tagstr, ",") for _, tag := range tags { From 2f215356d6cc2dc125406de40181594cf72d2b65 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 28 Mar 2016 11:57:51 -0600 Subject: [PATCH 204/287] Update statsd graphite parser link to telegraf version --- plugins/inputs/statsd/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md index 78e5a5b9a..8722ce1e9 100644 --- a/plugins/inputs/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -203,4 +203,4 @@ mem.cached.localhost:256|g ``` There are many more options available, -[More details can be found here](https://github.com/influxdata/influxdb/tree/master/services/graphite#templates) +[More details can be found here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite) From b1cfb1afe4adcf58261ae919db01ab9281b5bcfb Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 24 Mar 2016 16:53:26 -0600 Subject: [PATCH 205/287] Deprecate statsd convert_names option, expose separator closes #876 --- CHANGELOG.md | 1 + plugins/inputs/statsd/statsd.go | 36 ++++++------- plugins/inputs/statsd/statsd_test.go | 75 +++++++++++++++++----------- 3 files changed, 63 insertions(+), 49 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 94d327fbd..b1c58e60e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ - [#789](https://github.com/influxdata/telegraf/pull/789): Support multiple field specification and `field*` in graphite templates. Thanks @chrusty! - [#762](https://github.com/influxdata/telegraf/pull/762): Nagios parser for the exec plugin. Thanks @titilambert! - [#848](https://github.com/influxdata/telegraf/issues/848): Provide option to omit host tag from telegraf agent. +- [#928](https://github.com/influxdata/telegraf/pull/928): Deprecating the statsd "convert_names" options, expose separator config. ### Bugfixes - [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided. diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 0e7a911e1..d31e6bfc9 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -21,6 +21,8 @@ const ( UDP_PACKET_SIZE int = 1500 defaultFieldName = "value" + + defaultSeparator = "_" ) var dropwarn = "ERROR: Message queue full. Discarding line [%s] " + @@ -47,6 +49,8 @@ type Statsd struct { DeleteTimings bool ConvertNames bool + // MetricSeparator is the separator between parts of the metric name. + MetricSeparator string // This flag enables parsing of tags in the dogstatsd extention to the // statsd protocol (http://docs.datadoghq.com/guides/dogstatsd/) ParseDataDogTags bool @@ -76,23 +80,6 @@ type Statsd struct { listener *net.UDPConn } -func NewStatsd() *Statsd { - s := Statsd{} - - // Make data structures - s.done = make(chan struct{}) - s.in = make(chan []byte, s.AllowedPendingMessages) - s.gauges = make(map[string]cachedgauge) - s.counters = make(map[string]cachedcounter) - s.sets = make(map[string]cachedset) - s.timings = make(map[string]cachedtimings) - - s.ConvertNames = true - s.UDPPacketSize = UDP_PACKET_SIZE - - return &s -} - // One statsd metric, form is :||@ type metric struct { name string @@ -149,8 +136,8 @@ const sampleConfig = ` ## Percentiles to calculate for timing & histogram stats percentiles = [90] - ## convert measurement names, "." to "_" and "-" to "__" - convert_names = true + ## separator to use between elements of a statsd metric + metric_separator = "_" ## Parses tags in the datadog statsd format ## http://docs.datadoghq.com/guides/dogstatsd/ @@ -257,6 +244,15 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error { s.timings = prevInstance.timings } + if s.ConvertNames { + log.Printf("WARNING statsd: convert_names config option is deprecated," + + " please use metric_separator instead") + } + + if s.MetricSeparator == "" { + s.MetricSeparator = defaultSeparator + } + s.wg.Add(2) // Start the UDP listener go s.udpListen() @@ -500,7 +496,7 @@ func (s *Statsd) parseName(bucket string) (string, string, map[string]string) { var field string name := bucketparts[0] - p, err := graphite.NewGraphiteParser(".", s.Templates, nil) + p, err := graphite.NewGraphiteParser(s.MetricSeparator, s.Templates, nil) if err == nil { p.DefaultTags = tags name, tags, field, _ = p.ApplyTemplate(name) diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index 5dffdc9cd..743e80135 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -8,9 +8,26 @@ import ( "github.com/influxdata/telegraf/testutil" ) +func NewTestStatsd() *Statsd { + s := Statsd{} + + // Make data structures + s.done = make(chan struct{}) + s.in = make(chan []byte, s.AllowedPendingMessages) + s.gauges = make(map[string]cachedgauge) + s.counters = make(map[string]cachedcounter) + s.sets = make(map[string]cachedset) + s.timings = make(map[string]cachedtimings) + + s.MetricSeparator = "_" + s.UDPPacketSize = UDP_PACKET_SIZE + + return &s +} + // Invalid lines should return an error func TestParse_InvalidLines(t *testing.T) { - s := NewStatsd() + s := NewTestStatsd() invalid_lines := []string{ "i.dont.have.a.pipe:45g", "i.dont.have.a.colon45|c", @@ -34,7 +51,7 @@ func TestParse_InvalidLines(t *testing.T) { // Invalid sample rates should be ignored and not applied func TestParse_InvalidSampleRate(t *testing.T) { - s := NewStatsd() + s := NewTestStatsd() invalid_lines := []string{ "invalid.sample.rate:45|c|0.1", "invalid.sample.rate.2:45|c|@foo", @@ -84,9 +101,9 @@ func TestParse_InvalidSampleRate(t *testing.T) { } } -// Names should be parsed like . -> _ and - -> __ +// Names should be parsed like . -> _ func TestParse_DefaultNameParsing(t *testing.T) { - s := NewStatsd() + s := NewTestStatsd() valid_lines := []string{ "valid:1|c", "valid.foo-bar:11|c", @@ -108,7 +125,7 @@ func TestParse_DefaultNameParsing(t *testing.T) { 1, }, { - "valid_foo__bar", + "valid_foo-bar", 11, }, } @@ -123,7 +140,7 @@ func TestParse_DefaultNameParsing(t *testing.T) { // Test that template name transformation works func TestParse_Template(t *testing.T) { - s := NewStatsd() + s := NewTestStatsd() s.Templates = []string{ "measurement.measurement.host.service", } @@ -165,7 +182,7 @@ func TestParse_Template(t *testing.T) { // Test that template filters properly func TestParse_TemplateFilter(t *testing.T) { - s := NewStatsd() + s := NewTestStatsd() s.Templates = []string{ "cpu.idle.* measurement.measurement.host", } @@ -207,7 +224,7 @@ func TestParse_TemplateFilter(t *testing.T) { // Test that most specific template is chosen func TestParse_TemplateSpecificity(t *testing.T) { - s := NewStatsd() + s := NewTestStatsd() s.Templates = []string{ "cpu.* measurement.foo.host", "cpu.idle.* measurement.measurement.host", @@ -245,7 +262,7 @@ func TestParse_TemplateSpecificity(t *testing.T) { // Test that most specific template is chosen func TestParse_TemplateFields(t *testing.T) { - s := NewStatsd() + s := NewTestStatsd() s.Templates = []string{ "* measurement.measurement.field", } @@ -359,7 +376,7 @@ func TestParse_Fields(t *testing.T) { // Test that tags within the bucket are parsed correctly func TestParse_Tags(t *testing.T) { - s := NewStatsd() + s := NewTestStatsd() tests := []struct { bucket string @@ -412,7 +429,7 @@ func TestParse_Tags(t *testing.T) { // Test that DataDog tags are parsed func TestParse_DataDogTags(t *testing.T) { - s := NewStatsd() + s := NewTestStatsd() s.ParseDataDogTags = true lines := []string{ @@ -490,7 +507,7 @@ func tagsForItem(m interface{}) map[string]string { // Test that statsd buckets are parsed to measurement names properly func TestParseName(t *testing.T) { - s := NewStatsd() + s := NewTestStatsd() tests := []struct { in_name string @@ -506,7 +523,7 @@ func TestParseName(t *testing.T) { }, { "foo.bar-baz", - "foo_bar__baz", + "foo_bar-baz", }, } @@ -517,8 +534,8 @@ func TestParseName(t *testing.T) { } } - // Test with ConvertNames = false - s.ConvertNames = false + // Test with separator == "." + s.MetricSeparator = "." tests = []struct { in_name string @@ -549,7 +566,7 @@ func TestParseName(t *testing.T) { // Test that measurements with the same name, but different tags, are treated // as different outputs func TestParse_MeasurementsWithSameName(t *testing.T) { - s := NewStatsd() + s := NewTestStatsd() // Test that counters work valid_lines := []string{ @@ -607,8 +624,8 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) { "valid.multiple.mixed:1|c:1|ms:2|s:1|g", } - s_single := NewStatsd() - s_multiple := NewStatsd() + s_single := NewTestStatsd() + s_multiple := NewTestStatsd() for _, line := range single_lines { err := s_single.parseStatsdLine(line) @@ -701,7 +718,7 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) { // Valid lines should be parsed and their values should be cached func TestParse_ValidLines(t *testing.T) { - s := NewStatsd() + s := NewTestStatsd() valid_lines := []string{ "valid:45|c", "valid:45|s", @@ -720,7 +737,7 @@ func TestParse_ValidLines(t *testing.T) { // Tests low-level functionality of gauges func TestParse_Gauges(t *testing.T) { - s := NewStatsd() + s := NewTestStatsd() // Test that gauge +- values work valid_lines := []string{ @@ -786,7 +803,7 @@ func TestParse_Gauges(t *testing.T) { // Tests low-level functionality of sets func TestParse_Sets(t *testing.T) { - s := NewStatsd() + s := NewTestStatsd() // Test that sets work valid_lines := []string{ @@ -834,7 +851,7 @@ func TestParse_Sets(t *testing.T) { // Tests low-level functionality of counters func TestParse_Counters(t *testing.T) { - s := NewStatsd() + s := NewTestStatsd() // Test that counters work valid_lines := []string{ @@ -888,7 +905,7 @@ func TestParse_Counters(t *testing.T) { // Tests low-level functionality of timings func TestParse_Timings(t *testing.T) { - s := NewStatsd() + s := NewTestStatsd() s.Percentiles = []int{90} acc := &testutil.Accumulator{} @@ -925,7 +942,7 @@ func TestParse_Timings(t *testing.T) { // Tests low-level functionality of timings when multiple fields is enabled // and a measurement template has been defined which can parse field names func TestParse_Timings_MultipleFieldsWithTemplate(t *testing.T) { - s := NewStatsd() + s := NewTestStatsd() s.Templates = []string{"measurement.field"} s.Percentiles = []int{90} acc := &testutil.Accumulator{} @@ -974,7 +991,7 @@ func TestParse_Timings_MultipleFieldsWithTemplate(t *testing.T) { // but a measurement template hasn't been defined so we can't parse field names // In this case the behaviour should be the same as normal behaviour func TestParse_Timings_MultipleFieldsWithoutTemplate(t *testing.T) { - s := NewStatsd() + s := NewTestStatsd() s.Templates = []string{} s.Percentiles = []int{90} acc := &testutil.Accumulator{} @@ -1022,7 +1039,7 @@ func TestParse_Timings_MultipleFieldsWithoutTemplate(t *testing.T) { } func TestParse_Timings_Delete(t *testing.T) { - s := NewStatsd() + s := NewTestStatsd() s.DeleteTimings = true fakeacc := &testutil.Accumulator{} var err error @@ -1046,7 +1063,7 @@ func TestParse_Timings_Delete(t *testing.T) { // Tests the delete_gauges option func TestParse_Gauges_Delete(t *testing.T) { - s := NewStatsd() + s := NewTestStatsd() s.DeleteGauges = true fakeacc := &testutil.Accumulator{} var err error @@ -1072,7 +1089,7 @@ func TestParse_Gauges_Delete(t *testing.T) { // Tests the delete_sets option func TestParse_Sets_Delete(t *testing.T) { - s := NewStatsd() + s := NewTestStatsd() s.DeleteSets = true fakeacc := &testutil.Accumulator{} var err error @@ -1098,7 +1115,7 @@ func TestParse_Sets_Delete(t *testing.T) { // Tests the delete_counters option func TestParse_Counters_Delete(t *testing.T) { - s := NewStatsd() + s := NewTestStatsd() s.DeleteCounters = true fakeacc := &testutil.Accumulator{} var err error From d055d7f496e18ef84aa751e2ad1eecdbc23779d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E5=85=89=E6=9D=83?= Date: Thu, 17 Mar 2016 23:45:29 +0800 Subject: [PATCH 206/287] Add the ipmi plugin --- plugins/inputs/all/all.go | 1 + plugins/inputs/ipmi/README.md | 50 ++++++++ plugins/inputs/ipmi/command.go | 39 ++++++ plugins/inputs/ipmi/connection.go | 90 ++++++++++++++ plugins/inputs/ipmi/ipmi.go | 113 ++++++++++++++++++ plugins/inputs/ipmi/ipmi_test.go | 189 ++++++++++++++++++++++++++++++ 6 files changed, 482 insertions(+) create mode 100644 plugins/inputs/ipmi/README.md create mode 100644 plugins/inputs/ipmi/command.go create mode 100644 plugins/inputs/ipmi/connection.go create mode 100644 plugins/inputs/ipmi/ipmi.go create mode 100644 plugins/inputs/ipmi/ipmi_test.go diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 55a932df2..6c0d933c7 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -16,6 +16,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/haproxy" _ "github.com/influxdata/telegraf/plugins/inputs/httpjson" _ "github.com/influxdata/telegraf/plugins/inputs/influxdb" + _ "github.com/influxdata/telegraf/plugins/inputs/ipmi" _ "github.com/influxdata/telegraf/plugins/inputs/jolokia" _ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/leofs" diff --git a/plugins/inputs/ipmi/README.md b/plugins/inputs/ipmi/README.md new file mode 100644 index 000000000..ca42f7d10 --- /dev/null +++ b/plugins/inputs/ipmi/README.md @@ -0,0 +1,50 @@ +# Telegraf ipmi plugin + +Get bare metal metrics using the command line utility `ipmitool` + +see ipmitool(https://sourceforge.net/projects/ipmitool/files/ipmitool/) + +The plugin will use the following command to collect remote host sensor stats: + +ipmitool -I lan -H 192.168.1.1 -U USERID -P PASSW0RD sdr + +## Measurements + +- ipmi_sensor: + + * Tags: `server`,`host` + * Fields: + - status + - value + +## Configuration + +```toml +[[inputs.ipmi]] + ## specify servers via a url matching: + ## [username[:password]@][protocol[(address)]] + ## e.g. + ## root:passwd@lan(127.0.0.1) + ## + servers = ["USERID:PASSW0RD@lan(10.20.2.203)"] +``` + +## Output + +> ipmi_sensor,host=10.20.2.203,inst=Ambient\ Temp status=1i,value=20 1458488465012559455 +> ipmi_sensor,host=10.20.2.203,inst=Altitude status=1i,value=80 1458488465012688613 +> ipmi_sensor,host=10.20.2.203,inst=Avg\ Power status=1i,value=220 1458488465012776511 +> ipmi_sensor,host=10.20.2.203,inst=Planar\ 3.3V status=1i,value=3.28 1458488465012861875 +> ipmi_sensor,host=10.20.2.203,inst=Planar\ 5V status=1i,value=4.9 1458488465012944188 +> ipmi_sensor,host=10.20.2.203,inst=Planar\ 12V status=1i,value=12.04 1458488465013008485 +> ipmi_sensor,host=10.20.2.203,inst=Planar\ VBAT status=1i,value=3.04 1458488465013072508 +> ipmi_sensor,host=10.20.2.203,inst=Fan\ 1A\ Tach status=1i,value=2610 1458488465013137932 +> ipmi_sensor,host=10.20.2.203,inst=Fan\ 1B\ Tach status=1i,value=1775 1458488465013279896 +> ipmi_sensor,host=10.20.2.203,inst=Fan\ 2A\ Tach status=1i,value=1972 1458488465013358177 +> ipmi_sensor,host=10.20.2.203,inst=Fan\ 2B\ Tach status=1i,value=1275 1458488465013434023 +> ipmi_sensor,host=10.20.2.203,inst=Fan\ 3A\ Tach status=1i,value=2929 1458488465013514567 +> ipmi_sensor,host=10.20.2.203,inst=Fan\ 3B\ Tach status=1i,value=2125 1458488465013582616 +> ipmi_sensor,host=10.20.2.203,inst=Fan\ 1 status=1i,value=0 1458488465013643746 +> ipmi_sensor,host=10.20.2.203,inst=Fan\ 2 status=1i,value=0 1458488465013714887 +> ipmi_sensor,host=10.20.2.203,inst=Fan\ 3 status=1i,value=0 1458488465013861854 + diff --git a/plugins/inputs/ipmi/command.go b/plugins/inputs/ipmi/command.go new file mode 100644 index 000000000..4b3a2e81b --- /dev/null +++ b/plugins/inputs/ipmi/command.go @@ -0,0 +1,39 @@ +// command +package ipmi + +import ( + "bytes" + "fmt" + "os/exec" + "strings" +) + +type CommandRunner struct{} + +func (t CommandRunner) cmd(conn *Connection, args ...string) *exec.Cmd { + path := conn.Path + opts := append(conn.options(), args...) + + if path == "" { + path = "ipmitool" + } + + return exec.Command(path, opts...) + +} + +func (t CommandRunner) Run(conn *Connection, args ...string) (string, error) { + cmd := t.cmd(conn, args...) + var stdout bytes.Buffer + var stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + if err != nil { + return "", fmt.Errorf("run %s %s: %s (%s)", + cmd.Path, strings.Join(cmd.Args, " "), stderr.String(), err) + } + + return stdout.String(), err +} diff --git a/plugins/inputs/ipmi/connection.go b/plugins/inputs/ipmi/connection.go new file mode 100644 index 000000000..de555a4c4 --- /dev/null +++ b/plugins/inputs/ipmi/connection.go @@ -0,0 +1,90 @@ +// connection +package ipmi + +import ( + "fmt" + "net" + "strconv" + "strings" +) + +// Connection properties for a Client +type Connection struct { + Hostname string + Username string + Password string + Path string + Port int + Interface string +} + +func NewConnection(server string) *Connection { + conn := &Connection{} + inx1 := strings.Index(server, "@") + inx2 := strings.Index(server, "(") + inx3 := strings.Index(server, ")") + + connstr := server + + if inx1 > 0 { + security := server[0:inx1] + connstr = server[inx1+1 : len(server)] + up := strings.Split(security, ":") + conn.Username = up[0] + conn.Password = up[1] + } + + if inx2 > 0 { + inx2 = strings.Index(connstr, "(") + inx3 = strings.Index(connstr, ")") + + conn.Interface = connstr[0:inx2] + conn.Hostname = connstr[inx2+1 : inx3] + } + + return conn +} + +func (t *Connection) options() []string { + intf := t.Interface + if intf == "" { + intf = "lan" + } + + options := []string{ + "-H", t.Hostname, + "-U", t.Username, + "-P", t.Password, + "-I", intf, + } + + if t.Port != 0 { + options = append(options, "-p", strconv.Itoa(t.Port)) + } + + return options +} + +// RemoteIP returns the remote (bmc) IP address of the Connection +func (c *Connection) RemoteIP() string { + if net.ParseIP(c.Hostname) == nil { + addrs, err := net.LookupHost(c.Hostname) + if err != nil && len(addrs) > 0 { + return addrs[0] + } + } + return c.Hostname +} + +// LocalIP returns the local (client) IP address of the Connection +func (c *Connection) LocalIP() string { + conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", c.Hostname, c.Port)) + if err != nil { + // don't bother returning an error, since this value will never + // make it to the bmc if we can't connect to it. + return c.Hostname + } + _ = conn.Close() + host, _, _ := net.SplitHostPort(conn.LocalAddr().String()) + return host +} diff --git a/plugins/inputs/ipmi/ipmi.go b/plugins/inputs/ipmi/ipmi.go new file mode 100644 index 000000000..8a34c8d3f --- /dev/null +++ b/plugins/inputs/ipmi/ipmi.go @@ -0,0 +1,113 @@ +// ipmi +package ipmi + +import ( + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type Ipmi struct { + Servers []string + runner Runner +} + +var sampleConfig = ` + ## specify servers via a url matching: + ## [username[:password]@][protocol[(address)]] + ## e.g. + ## root:passwd@lan(127.0.0.1) + ## + servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] +` + +func NewIpmi() *Ipmi { + return &Ipmi{ + runner: CommandRunner{}, + } +} + +func (m *Ipmi) SampleConfig() string { + return sampleConfig +} + +func (m *Ipmi) Description() string { + return "Read metrics from one or many bare metal servers" +} + +func (m *Ipmi) Gather(acc telegraf.Accumulator) error { + if m.runner == nil { + m.runner = CommandRunner{} + } + for _, serv := range m.Servers { + err := m.gatherServer(serv, acc) + if err != nil { + return err + } + } + + return nil +} + +func (m *Ipmi) gatherServer(serv string, acc telegraf.Accumulator) error { + conn := NewConnection(serv) + + res, err := m.runner.Run(conn, "sdr") + if err != nil { + return err + } + + lines := strings.Split(res, "\n") + + for i := 0; i < len(lines); i++ { + vals := strings.Split(lines[i], "|") + if len(vals) == 3 { + tags := map[string]string{"server": conn.Hostname, "name": trim(vals[0])} + fields := make(map[string]interface{}) + if strings.EqualFold("ok", trim(vals[2])) { + fields["status"] = 1 + } else { + fields["status"] = 0 + } + + val1 := trim(vals[1]) + + if strings.Index(val1, " ") > 0 { + val := strings.Split(val1, " ")[0] + fields["value"] = Atofloat(val) + } else { + fields["value"] = 0.0 + } + + acc.AddFields("ipmi_sensor", fields, tags, time.Now()) + } + } + + return nil +} + +type Runner interface { + Run(conn *Connection, args ...string) (string, error) +} + +func Atofloat(val string) float64 { + f, err := strconv.ParseFloat(val, 64) + if err != nil { + return float64(0) + } else { + return float64(f) + } +} + +func trim(s string) string { + return strings.TrimSpace(s) +} + +func init() { + inputs.Add("ipmi", func() telegraf.Input { + return &Ipmi{} + }) +} diff --git a/plugins/inputs/ipmi/ipmi_test.go b/plugins/inputs/ipmi/ipmi_test.go new file mode 100644 index 000000000..1e0a57bd1 --- /dev/null +++ b/plugins/inputs/ipmi/ipmi_test.go @@ -0,0 +1,189 @@ +// ipmi_test +package ipmi + +import ( + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const serv = "USERID:PASSW0RD@lan(192.168.1.1)" + +const cmdReturn = ` +Ambient Temp | 20 degrees C | ok +Altitude | 80 feet | ok +Avg Power | 210 Watts | ok +Planar 3.3V | 3.29 Volts | ok +Planar 5V | 4.90 Volts | ok +Planar 12V | 12.04 Volts | ok +Planar VBAT | 3.05 Volts | ok +Fan 1A Tach | 2610 RPM | ok +Fan 1B Tach | 1775 RPM | ok +Fan 2A Tach | 2001 RPM | ok +Fan 2B Tach | 1275 RPM | ok +Fan 3A Tach | 2929 RPM | ok +Fan 3B Tach | 2125 RPM | ok +Fan 1 | 0x00 | ok +Fan 2 | 0x00 | ok +Fan 3 | 0x00 | ok +Front Panel | 0x00 | ok +Video USB | 0x00 | ok +DASD Backplane 1 | 0x00 | ok +SAS Riser | 0x00 | ok +PCI Riser 1 | 0x00 | ok +PCI Riser 2 | 0x00 | ok +CPU 1 | 0x00 | ok +CPU 2 | 0x00 | ok +All CPUs | 0x00 | ok +One of The CPUs | 0x00 | ok +IOH Temp Status | 0x00 | ok +CPU 1 OverTemp | 0x00 | ok +CPU 2 OverTemp | 0x00 | ok +CPU Fault Reboot | 0x00 | ok +Aux Log | 0x00 | ok +NMI State | 0x00 | ok +ABR Status | 0x00 | ok +Firmware Error | 0x00 | ok +PCIs | 0x00 | ok +CPUs | 0x00 | ok +DIMMs | 0x00 | ok +Sys Board Fault | 0x00 | ok +Power Supply 1 | 0x00 | ok +Power Supply 2 | 0x00 | ok +PS 1 Fan Fault | 0x00 | ok +PS 2 Fan Fault | 0x00 | ok +VT Fault | 0x00 | ok +Pwr Rail A Fault | 0x00 | ok +Pwr Rail B Fault | 0x00 | ok +Pwr Rail C Fault | 0x00 | ok +Pwr Rail D Fault | 0x00 | ok +Pwr Rail E Fault | 0x00 | ok +PS 1 Therm Fault | 0x00 | ok +PS 2 Therm Fault | 0x00 | ok +PS1 12V OV Fault | 0x00 | ok +PS2 12V OV Fault | 0x00 | ok +PS1 12V UV Fault | 0x00 | ok +PS2 12V UV Fault | 0x00 | ok +PS1 12V OC Fault | 0x00 | ok +PS2 12V OC Fault | 0x00 | ok +PS 1 VCO Fault | 0x00 | ok +PS 2 VCO Fault | 0x00 | ok +Power Unit | 0x00 | ok +Cooling Zone 1 | 0x00 | ok +Cooling Zone 2 | 0x00 | ok +Cooling Zone 3 | 0x00 | ok +Drive 0 | 0x00 | ok +Drive 1 | 0x00 | ok +Drive 2 | 0x00 | ok +Drive 3 | 0x00 | ok +Drive 4 | 0x00 | ok +Drive 5 | 0x00 | ok +Drive 6 | 0x00 | ok +Drive 7 | 0x00 | ok +Drive 8 | 0x00 | ok +Drive 9 | 0x00 | ok +Drive 10 | 0x00 | ok +Drive 11 | 0x00 | ok +Drive 12 | 0x00 | ok +Drive 13 | 0x00 | ok +Drive 14 | 0x00 | ok +Drive 15 | 0x00 | ok +All DIMMS | 0x00 | ok +One of the DIMMs | 0x00 | ok +DIMM 1 | 0x00 | ok +DIMM 2 | 0x00 | ok +DIMM 3 | 0x00 | ok +DIMM 4 | 0x00 | ok +DIMM 5 | 0x00 | ok +DIMM 6 | 0x00 | ok +DIMM 7 | 0x00 | ok +DIMM 8 | 0x00 | ok +DIMM 9 | 0x00 | ok +DIMM 10 | 0x00 | ok +DIMM 11 | 0x00 | ok +DIMM 12 | 0x00 | ok +DIMM 13 | 0x00 | ok +DIMM 14 | 0x00 | ok +DIMM 15 | 0x00 | ok +DIMM 16 | 0x00 | ok +DIMM 17 | 0x00 | ok +DIMM 18 | 0x00 | ok +DIMM 1 Temp | 0x00 | ok +DIMM 2 Temp | 0x00 | ok +DIMM 3 Temp | 0x00 | ok +DIMM 4 Temp | 0x00 | ok +DIMM 5 Temp | 0x00 | ok +DIMM 6 Temp | 0x00 | ok +DIMM 7 Temp | 0x00 | ok +DIMM 8 Temp | 0x00 | ok +DIMM 9 Temp | 0x00 | ok +DIMM 10 Temp | 0x00 | ok +DIMM 11 Temp | 0x00 | ok +DIMM 12 Temp | 0x00 | ok +DIMM 13 Temp | 0x00 | ok +DIMM 14 Temp | 0x00 | ok +DIMM 15 Temp | 0x00 | ok +DIMM 16 Temp | 0x00 | ok +DIMM 17 Temp | 0x00 | ok +DIMM 18 Temp | 0x00 | ok +PCI 1 | 0x00 | ok +PCI 2 | 0x00 | ok +PCI 3 | 0x00 | ok +PCI 4 | 0x00 | ok +All PCI Error | 0x00 | ok +One of PCI Error | 0x00 | ok +IPMI Watchdog | 0x00 | ok +Host Power | 0x00 | ok +DASD Backplane 2 | 0x00 | ok +DASD Backplane 3 | Not Readable | ns +DASD Backplane 4 | Not Readable | ns +Backup Memory | 0x00 | ok +Progress | 0x00 | ok +Planar Fault | 0x00 | ok +SEL Fullness | 0x00 | ok +PCI 5 | 0x00 | ok +OS RealTime Mod | 0x00 | ok +` + +type runnerMock struct { + out string + err error +} + +func newRunnerMock(out string, err error) Runner { + return &runnerMock{ + out: out, + err: err, + } +} + +func (r runnerMock) Run(conn *Connection, args ...string) (out string, err error) { + if r.err != nil { + return out, r.err + } + return r.out, nil +} + +func TestIpmi(t *testing.T) { + i := &Ipmi{ + Servers: []string{"USERID:PASSW0RD@lan(192.168.1.1)"}, + runner: newRunnerMock(cmdReturn, nil), + } + + var acc testutil.Accumulator + + err := i.Gather(&acc) + + require.NoError(t, err) + + assert.Equal(t, acc.NFields(), 266, "non-numeric measurements should be ignored") +} + +func TestIpmiConnection(t *testing.T) { + conn := NewConnection(serv) + assert.Equal(t, "USERID", conn.Username) + assert.Equal(t, "lan", conn.Interface) + +} From 21ece2d76d8c03229960f84ad4c9b7a7e8323a75 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 23 Mar 2016 09:40:38 -0600 Subject: [PATCH 207/287] Convert ipmi stats/tags to underscore and lowercase closes #888 --- internal/internal.go | 18 ++++ internal/internal_test.go | 29 ++++++ plugins/inputs/all/all.go | 2 +- plugins/inputs/ipmi/README.md | 50 ----------- plugins/inputs/ipmi_sensor/README.md | 42 +++++++++ .../inputs/{ipmi => ipmi_sensor}/command.go | 3 +- .../{ipmi => ipmi_sensor}/connection.go | 3 +- plugins/inputs/{ipmi => ipmi_sensor}/ipmi.go | 66 ++++++++------ .../inputs/{ipmi => ipmi_sensor}/ipmi_test.go | 90 ++++++++++++++++++- 9 files changed, 221 insertions(+), 82 deletions(-) delete mode 100644 plugins/inputs/ipmi/README.md create mode 100644 plugins/inputs/ipmi_sensor/README.md rename plugins/inputs/{ipmi => ipmi_sensor}/command.go (96%) rename plugins/inputs/{ipmi => ipmi_sensor}/connection.go (98%) rename plugins/inputs/{ipmi => ipmi_sensor}/ipmi.go (61%) rename plugins/inputs/{ipmi => ipmi_sensor}/ipmi_test.go (80%) diff --git a/internal/internal.go b/internal/internal.go index 8a427909e..ff73aae84 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -11,6 +11,7 @@ import ( "os" "strings" "time" + "unicode" ) const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" @@ -122,6 +123,23 @@ func GetTLSConfig( return t, nil } +// SnakeCase converts the given string to snake case following the Golang format: +// acronyms are converted to lower-case and preceded by an underscore. +func SnakeCase(in string) string { + runes := []rune(in) + length := len(runes) + + var out []rune + for i := 0; i < length; i++ { + if i > 0 && unicode.IsUpper(runes[i]) && ((i+1 < length && unicode.IsLower(runes[i+1])) || unicode.IsLower(runes[i-1])) { + out = append(out, '_') + } + out = append(out, unicode.ToLower(runes[i])) + } + + return string(out) +} + // Glob will test a string pattern, potentially containing globs, against a // subject string. The result is a simple true/false, determining whether or // not the glob pattern matched the subject text. diff --git a/internal/internal_test.go b/internal/internal_test.go index 7f0c687a8..e4a5eed14 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -42,3 +42,32 @@ func TestGlob(t *testing.T) { testGlobNoMatch(t, pattern, "this_is_a_test") } } + +type SnakeTest struct { + input string + output string +} + +var tests = []SnakeTest{ + {"a", "a"}, + {"snake", "snake"}, + {"A", "a"}, + {"ID", "id"}, + {"MOTD", "motd"}, + {"Snake", "snake"}, + {"SnakeTest", "snake_test"}, + {"APIResponse", "api_response"}, + {"SnakeID", "snake_id"}, + {"SnakeIDGoogle", "snake_id_google"}, + {"LinuxMOTD", "linux_motd"}, + {"OMGWTFBBQ", "omgwtfbbq"}, + {"omg_wtf_bbq", "omg_wtf_bbq"}, +} + +func TestSnakeCase(t *testing.T) { + for _, test := range tests { + if SnakeCase(test.input) != test.output { + t.Errorf(`SnakeCase("%s"), wanted "%s", got \%s"`, test.input, test.output, SnakeCase(test.input)) + } + } +} diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 6c0d933c7..4f7d45f60 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -16,7 +16,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/haproxy" _ "github.com/influxdata/telegraf/plugins/inputs/httpjson" _ "github.com/influxdata/telegraf/plugins/inputs/influxdb" - _ "github.com/influxdata/telegraf/plugins/inputs/ipmi" + _ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor" _ "github.com/influxdata/telegraf/plugins/inputs/jolokia" _ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/leofs" diff --git a/plugins/inputs/ipmi/README.md b/plugins/inputs/ipmi/README.md deleted file mode 100644 index ca42f7d10..000000000 --- a/plugins/inputs/ipmi/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# Telegraf ipmi plugin - -Get bare metal metrics using the command line utility `ipmitool` - -see ipmitool(https://sourceforge.net/projects/ipmitool/files/ipmitool/) - -The plugin will use the following command to collect remote host sensor stats: - -ipmitool -I lan -H 192.168.1.1 -U USERID -P PASSW0RD sdr - -## Measurements - -- ipmi_sensor: - - * Tags: `server`,`host` - * Fields: - - status - - value - -## Configuration - -```toml -[[inputs.ipmi]] - ## specify servers via a url matching: - ## [username[:password]@][protocol[(address)]] - ## e.g. - ## root:passwd@lan(127.0.0.1) - ## - servers = ["USERID:PASSW0RD@lan(10.20.2.203)"] -``` - -## Output - -> ipmi_sensor,host=10.20.2.203,inst=Ambient\ Temp status=1i,value=20 1458488465012559455 -> ipmi_sensor,host=10.20.2.203,inst=Altitude status=1i,value=80 1458488465012688613 -> ipmi_sensor,host=10.20.2.203,inst=Avg\ Power status=1i,value=220 1458488465012776511 -> ipmi_sensor,host=10.20.2.203,inst=Planar\ 3.3V status=1i,value=3.28 1458488465012861875 -> ipmi_sensor,host=10.20.2.203,inst=Planar\ 5V status=1i,value=4.9 1458488465012944188 -> ipmi_sensor,host=10.20.2.203,inst=Planar\ 12V status=1i,value=12.04 1458488465013008485 -> ipmi_sensor,host=10.20.2.203,inst=Planar\ VBAT status=1i,value=3.04 1458488465013072508 -> ipmi_sensor,host=10.20.2.203,inst=Fan\ 1A\ Tach status=1i,value=2610 1458488465013137932 -> ipmi_sensor,host=10.20.2.203,inst=Fan\ 1B\ Tach status=1i,value=1775 1458488465013279896 -> ipmi_sensor,host=10.20.2.203,inst=Fan\ 2A\ Tach status=1i,value=1972 1458488465013358177 -> ipmi_sensor,host=10.20.2.203,inst=Fan\ 2B\ Tach status=1i,value=1275 1458488465013434023 -> ipmi_sensor,host=10.20.2.203,inst=Fan\ 3A\ Tach status=1i,value=2929 1458488465013514567 -> ipmi_sensor,host=10.20.2.203,inst=Fan\ 3B\ Tach status=1i,value=2125 1458488465013582616 -> ipmi_sensor,host=10.20.2.203,inst=Fan\ 1 status=1i,value=0 1458488465013643746 -> ipmi_sensor,host=10.20.2.203,inst=Fan\ 2 status=1i,value=0 1458488465013714887 -> ipmi_sensor,host=10.20.2.203,inst=Fan\ 3 status=1i,value=0 1458488465013861854 - diff --git a/plugins/inputs/ipmi_sensor/README.md b/plugins/inputs/ipmi_sensor/README.md new file mode 100644 index 000000000..2ece4ea20 --- /dev/null +++ b/plugins/inputs/ipmi_sensor/README.md @@ -0,0 +1,42 @@ +# Telegraf ipmi plugin + +Get bare metal metrics using the command line utility `ipmitool` + +see ipmitool(https://sourceforge.net/projects/ipmitool/files/ipmitool/) + +The plugin will use the following command to collect remote host sensor stats: + +ipmitool -I lan -H 192.168.1.1 -U USERID -P PASSW0RD sdr + +## Measurements + +- ipmi_sensor: + + * Tags: `name`, `server`, `unit` + * Fields: + - status + - value + +## Configuration + +```toml +[[inputs.ipmi]] + ## specify servers via a url matching: + ## [username[:password]@][protocol[(address)]] + ## e.g. + ## root:passwd@lan(127.0.0.1) + ## + servers = ["USERID:PASSW0RD@lan(10.20.2.203)"] +``` + +## Output + +``` +> ipmi_sensor,server=10.20.2.203,unit=degrees_c,name=ambient_temp status=1i,value=20 1458488465012559455 +> ipmi_sensor,server=10.20.2.203,unit=feet,name=altitude status=1i,value=80 1458488465012688613 +> ipmi_sensor,server=10.20.2.203,unit=watts,name=avg_power status=1i,value=220 1458488465012776511 +> ipmi_sensor,server=10.20.2.203,unit=volts,name=planar_3.3v status=1i,value=3.28 1458488465012861875 +> ipmi_sensor,server=10.20.2.203,unit=volts,name=planar_vbat status=1i,value=3.04 1458488465013072508 +> ipmi_sensor,server=10.20.2.203,unit=rpm,name=fan_1a_tach status=1i,value=2610 1458488465013137932 +> ipmi_sensor,server=10.20.2.203,unit=rpm,name=fan_1b_tach status=1i,value=1775 1458488465013279896 +``` diff --git a/plugins/inputs/ipmi/command.go b/plugins/inputs/ipmi_sensor/command.go similarity index 96% rename from plugins/inputs/ipmi/command.go rename to plugins/inputs/ipmi_sensor/command.go index 4b3a2e81b..353c27d36 100644 --- a/plugins/inputs/ipmi/command.go +++ b/plugins/inputs/ipmi_sensor/command.go @@ -1,5 +1,4 @@ -// command -package ipmi +package ipmi_sensor import ( "bytes" diff --git a/plugins/inputs/ipmi/connection.go b/plugins/inputs/ipmi_sensor/connection.go similarity index 98% rename from plugins/inputs/ipmi/connection.go rename to plugins/inputs/ipmi_sensor/connection.go index de555a4c4..3f4461438 100644 --- a/plugins/inputs/ipmi/connection.go +++ b/plugins/inputs/ipmi_sensor/connection.go @@ -1,5 +1,4 @@ -// connection -package ipmi +package ipmi_sensor import ( "fmt" diff --git a/plugins/inputs/ipmi/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go similarity index 61% rename from plugins/inputs/ipmi/ipmi.go rename to plugins/inputs/ipmi_sensor/ipmi.go index 8a34c8d3f..aec56a0e4 100644 --- a/plugins/inputs/ipmi/ipmi.go +++ b/plugins/inputs/ipmi_sensor/ipmi.go @@ -1,5 +1,4 @@ -// ipmi -package ipmi +package ipmi_sensor import ( "strconv" @@ -60,30 +59,41 @@ func (m *Ipmi) gatherServer(serv string, acc telegraf.Accumulator) error { return err } + // each line will look something like + // Planar VBAT | 3.05 Volts | ok lines := strings.Split(res, "\n") - for i := 0; i < len(lines); i++ { vals := strings.Split(lines[i], "|") - if len(vals) == 3 { - tags := map[string]string{"server": conn.Hostname, "name": trim(vals[0])} - fields := make(map[string]interface{}) - if strings.EqualFold("ok", trim(vals[2])) { - fields["status"] = 1 - } else { - fields["status"] = 0 - } - - val1 := trim(vals[1]) - - if strings.Index(val1, " ") > 0 { - val := strings.Split(val1, " ")[0] - fields["value"] = Atofloat(val) - } else { - fields["value"] = 0.0 - } - - acc.AddFields("ipmi_sensor", fields, tags, time.Now()) + if len(vals) != 3 { + continue } + + tags := map[string]string{ + "server": conn.Hostname, + "name": transform(vals[0]), + } + + fields := make(map[string]interface{}) + if strings.EqualFold("ok", trim(vals[2])) { + fields["status"] = 1 + } else { + fields["status"] = 0 + } + + val1 := trim(vals[1]) + + if strings.Index(val1, " ") > 0 { + // split middle column into value and unit + valunit := strings.SplitN(val1, " ", 2) + fields["value"] = Atofloat(valunit[0]) + if len(valunit) > 1 { + tags["unit"] = transform(valunit[1]) + } + } else { + fields["value"] = 0.0 + } + + acc.AddFields("ipmi_sensor", fields, tags, time.Now()) } return nil @@ -96,9 +106,9 @@ type Runner interface { func Atofloat(val string) float64 { f, err := strconv.ParseFloat(val, 64) if err != nil { - return float64(0) + return 0.0 } else { - return float64(f) + return f } } @@ -106,8 +116,14 @@ func trim(s string) string { return strings.TrimSpace(s) } +func transform(s string) string { + s = trim(s) + s = strings.ToLower(s) + return strings.Replace(s, " ", "_", -1) +} + func init() { - inputs.Add("ipmi", func() telegraf.Input { + inputs.Add("ipmi_sensor", func() telegraf.Input { return &Ipmi{} }) } diff --git a/plugins/inputs/ipmi/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_test.go similarity index 80% rename from plugins/inputs/ipmi/ipmi_test.go rename to plugins/inputs/ipmi_sensor/ipmi_test.go index 1e0a57bd1..c62447e39 100644 --- a/plugins/inputs/ipmi/ipmi_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_test.go @@ -1,5 +1,4 @@ -// ipmi_test -package ipmi +package ipmi_sensor import ( "testing" @@ -179,6 +178,93 @@ func TestIpmi(t *testing.T) { require.NoError(t, err) assert.Equal(t, acc.NFields(), 266, "non-numeric measurements should be ignored") + + var tests = []struct { + fields map[string]interface{} + tags map[string]string + }{ + { + map[string]interface{}{ + "value": float64(20), + "status": int(1), + }, + map[string]string{ + "name": "ambient_temp", + "server": "192.168.1.1", + "unit": "degrees_c", + }, + }, + { + map[string]interface{}{ + "value": float64(80), + "status": int(1), + }, + map[string]string{ + "name": "altitude", + "server": "192.168.1.1", + "unit": "feet", + }, + }, + { + map[string]interface{}{ + "value": float64(210), + "status": int(1), + }, + map[string]string{ + "name": "avg_power", + "server": "192.168.1.1", + "unit": "watts", + }, + }, + { + map[string]interface{}{ + "value": float64(4.9), + "status": int(1), + }, + map[string]string{ + "name": "planar_5v", + "server": "192.168.1.1", + "unit": "volts", + }, + }, + { + map[string]interface{}{ + "value": float64(3.05), + "status": int(1), + }, + map[string]string{ + "name": "planar_vbat", + "server": "192.168.1.1", + "unit": "volts", + }, + }, + { + map[string]interface{}{ + "value": float64(2610), + "status": int(1), + }, + map[string]string{ + "name": "fan_1a_tach", + "server": "192.168.1.1", + "unit": "rpm", + }, + }, + { + map[string]interface{}{ + "value": float64(1775), + "status": int(1), + }, + map[string]string{ + "name": "fan_1b_tach", + "server": "192.168.1.1", + "unit": "rpm", + }, + }, + } + + for _, test := range tests { + acc.AssertContainsTaggedFields(t, "ipmi_sensor", test.fields, test.tags) + } } func TestIpmiConnection(t *testing.T) { From 2cfc882c6274317c9ce94ecbad772198f42afa39 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 29 Mar 2016 12:18:23 -0600 Subject: [PATCH 208/287] changelog & readme update --- CHANGELOG.md | 3 ++- README.md | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b1c58e60e..06a964aa2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## v0.11.2 [unreleased] +## v0.12.0 [unreleased] ### Features - [#927](https://github.com/influxdata/telegraf/pull/927): Adds parsing of tags to the statsd input when using DataDog's dogstatsd extension @@ -14,6 +14,7 @@ - [#762](https://github.com/influxdata/telegraf/pull/762): Nagios parser for the exec plugin. Thanks @titilambert! - [#848](https://github.com/influxdata/telegraf/issues/848): Provide option to omit host tag from telegraf agent. - [#928](https://github.com/influxdata/telegraf/pull/928): Deprecating the statsd "convert_names" options, expose separator config. +- [](https://github.com/influxdata/telegraf/pull/919): ipmi_sensor input plugin. Thanks @ebookbug! ### Bugfixes - [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided. diff --git a/README.md b/README.md index 440af37cf..000b27620 100644 --- a/README.md +++ b/README.md @@ -170,6 +170,7 @@ Currently implemented sources: * haproxy * httpjson (generic JSON-emitting http service plugin) * influxdb +* ipmi_sensor * jolokia * leofs * lustre2 From bd640ae2c5e6adebc9f98850675e95f8771be54b Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 29 Mar 2016 12:19:07 -0600 Subject: [PATCH 209/287] changelog fixup --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 06a964aa2..001630ae0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,7 @@ - [#762](https://github.com/influxdata/telegraf/pull/762): Nagios parser for the exec plugin. Thanks @titilambert! - [#848](https://github.com/influxdata/telegraf/issues/848): Provide option to omit host tag from telegraf agent. - [#928](https://github.com/influxdata/telegraf/pull/928): Deprecating the statsd "convert_names" options, expose separator config. -- [](https://github.com/influxdata/telegraf/pull/919): ipmi_sensor input plugin. Thanks @ebookbug! +- [#919](https://github.com/influxdata/telegraf/pull/919): ipmi_sensor input plugin. Thanks @ebookbug! ### Bugfixes - [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided. From 4ad551be9a3098b8829666600a78a583530c3ed7 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 28 Mar 2016 13:36:44 -0600 Subject: [PATCH 210/287] add '*' to metric prefixes for consistency --- docs/CONFIGURATION.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 853dc6d05..810dc9470 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -141,12 +141,12 @@ fields which begin with `time_`. # Drop all metrics about containers for kubelet [[inputs.prometheus]] urls = ["http://kube-node-1:4194/metrics"] - namedrop = ["container_"] + namedrop = ["container_*"] # Only store rest client related metrics for kubelet [[inputs.prometheus]] urls = ["http://kube-node-1:4194/metrics"] - namepass = ["rest_client_"] + namepass = ["rest_client_*"] ``` #### Input config: prefix, suffix, and override From 2f41ae24f87863c119bfdb35fbb3f8b46a1bb6fa Mon Sep 17 00:00:00 2001 From: Ross McDonald Date: Wed, 30 Mar 2016 10:51:21 -0500 Subject: [PATCH 211/287] Swap systemd command, as it was causing issues on Debian. --- scripts/post-install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/post-install.sh b/scripts/post-install.sh index d4c5df443..53d745ca9 100644 --- a/scripts/post-install.sh +++ b/scripts/post-install.sh @@ -64,7 +64,7 @@ elif [[ -f /etc/debian_version ]]; then which systemctl &>/dev/null if [[ $? -eq 0 ]]; then install_systemd - deb-systemd-invoke restart telegraf.service + systemctl restart telegraf else # Assuming sysv install_init From e03f684508ef6182d04e38c64b6534fa3e604155 Mon Sep 17 00:00:00 2001 From: Rudenkovk Konstantin Date: Fri, 25 Mar 2016 19:24:46 +0400 Subject: [PATCH 212/287] Fix parse fcgi URI path in php-fpm input module closes #934 --- CHANGELOG.md | 1 + plugins/inputs/phpfpm/phpfpm.go | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 001630ae0..08c4b6ceb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ - [#907](https://github.com/influxdata/telegraf/issues/907): Fix prometheus invalid label/measurement name key. - [#841](https://github.com/influxdata/telegraf/issues/841): Fix memcached unix socket panic. - [#873](https://github.com/influxdata/telegraf/issues/873): Fix SNMP plugin sometimes not returning metrics. Thanks @titiliambert! +- [#934](https://github.com/influxdata/telegraf/pull/934): phpfpm: Fix fcgi uri path. Thanks @rudenkovk! ## v0.11.1 [2016-03-17] diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index 199b0005b..169fe2194 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -122,6 +122,11 @@ func (g *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { fcgiIp := socketAddr[0] fcgiPort, _ := strconv.Atoi(socketAddr[1]) fcgi, err = newFcgiClient(fcgiIp, fcgiPort) + if len(u.Path) > 1 { + statusPath = strings.Trim(u.Path, "/") + } else { + statusPath = "status" + } } else { socketAddr := strings.Split(addr, ":") if len(socketAddr) >= 2 { From 62105bb3531bad2e9c25d68731bbdb215163c8a5 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 30 Mar 2016 11:54:01 -0600 Subject: [PATCH 213/287] Use github paho mqtt client instead of gerrit this might fix #921 --- Godeps | 2 +- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 6 +++--- plugins/inputs/mqtt_consumer/mqtt_consumer_test.go | 2 +- plugins/outputs/mqtt/mqtt.go | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Godeps b/Godeps index 75cb813ba..2fc53d8c5 100644 --- a/Godeps +++ b/Godeps @@ -1,4 +1,3 @@ -git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git 617c801af238c3af2d9e72c5d4a0f02edad03ce5 github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9 github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687 @@ -12,6 +11,7 @@ github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3 github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367 +github.com/eclipse/paho.mqtt.golang 4ab3e867810d1ec5f35157c59e965054dbf43a0d github.com/fsouza/go-dockerclient a49c8269a6899cae30da1f8a4b82e0ce945f9967 github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032 diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index e36889703..50a20740a 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -11,7 +11,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" - "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" + "github.com/eclipse/paho.mqtt.golang" ) type MQTTConsumer struct { @@ -39,7 +39,7 @@ type MQTTConsumer struct { InsecureSkipVerify bool sync.Mutex - client *mqtt.Client + client mqtt.Client // channel of all incoming raw mqtt messages in chan mqtt.Message done chan struct{} @@ -163,7 +163,7 @@ func (m *MQTTConsumer) receiver() { } } -func (m *MQTTConsumer) recvMessage(_ *mqtt.Client, msg mqtt.Message) { +func (m *MQTTConsumer) recvMessage(_ mqtt.Client, msg mqtt.Message) { m.in <- msg } diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index e926ebbb2..7090a46c3 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/assert" - "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" + "github.com/eclipse/paho.mqtt.golang" ) const ( diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index 10c1b1a9e..f13500db9 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -10,7 +10,7 @@ import ( "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" - paho "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" + paho "github.com/eclipse/paho.mqtt.golang" ) var sampleConfig = ` @@ -57,7 +57,7 @@ type MQTT struct { // Use SSL but skip chain & host verification InsecureSkipVerify bool - client *paho.Client + client paho.Client opts *paho.ClientOptions serializer serializers.Serializer From 91957f084800ad000ad1a2f46920277de4fa6322 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 30 Mar 2016 14:43:05 -0600 Subject: [PATCH 214/287] Update Godeps_windows file to HEAD --- Godeps_windows | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/Godeps_windows b/Godeps_windows index c4a2561d1..f499fa915 100644 --- a/Godeps_windows +++ b/Godeps_windows @@ -1,4 +1,3 @@ -git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git 617c801af238c3af2d9e72c5d4a0f02edad03ce5 github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9 github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5 @@ -6,22 +5,28 @@ github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687 github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857 github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4 github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99 +github.com/couchbase/go-couchbase cb664315a324d87d19c879d9cc67fda6be8c2ac1 +github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2 +github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6 github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc -github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d +github.com/davecgh/go-spew fc32781af5e85e548d3f1abaf0fa3dbe8a72495c github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3 github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367 +github.com/eclipse/paho.mqtt.golang 4ab3e867810d1ec5f35157c59e965054dbf43a0d github.com/fsouza/go-dockerclient a49c8269a6899cae30da1f8a4b82e0ce945f9967 +github.com/go-ini/ini 776aa739ce9373377cd16f526cdf06cb4c89b40f github.com/go-ole/go-ole 50055884d646dd9434f16bbb5c9801749b9bafe4 github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032 -github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7 +github.com/golang/snappy 5979233c5d6225d4a8e438cdd0b411888449ddab github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2 github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478 github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da -github.com/influxdata/influxdb e3fef5593c21644f2b43af55d6e17e70910b0e48 +github.com/influxdata/influxdb c190778997f4154294e6160c41b90140641ac915 github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0 +github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74 github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720 github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36 github.com/lxn/win 9a7734ea4db26bc593d52f6a8a957afdad39c5c1 @@ -32,15 +37,17 @@ github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3 github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980 +github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2 github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831 github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37 github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8 github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f -github.com/shirou/gopsutil 1de1357e7737a536c7f4ff6be7bd27977db4d2cb +github.com/shirou/gopsutil 1f32ce1bb380845be7f5d174ac641a2c592c0c42 github.com/shirou/w32 ada3ba68f000aa1b58580e45c9d308fe0b7fc5c5 github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744 +github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94 github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3 github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8 From 9347a70425cccfc2d619b391fc61891c945746ef Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 31 Mar 2016 20:37:04 -0600 Subject: [PATCH 215/287] Fix httpjson README closes #947 --- plugins/inputs/httpjson/README.md | 65 +++++++++++++------------------ 1 file changed, 28 insertions(+), 37 deletions(-) diff --git a/plugins/inputs/httpjson/README.md b/plugins/inputs/httpjson/README.md index fc45dd567..707b256df 100644 --- a/plugins/inputs/httpjson/README.md +++ b/plugins/inputs/httpjson/README.md @@ -6,7 +6,7 @@ For example, if you have a service called _mycollector_, which has HTTP endpoint plugin like this: ``` -[[httpjson.services]] +[[inputs.httpjson]] name = "mycollector" servers = [ @@ -24,7 +24,7 @@ plugin like this: You can also specify which keys from server response should be considered tags: ``` -[[httpjson.services]] +[[inputs.httpjson]] ... tag_keys = [ @@ -36,10 +36,10 @@ You can also specify which keys from server response should be considered tags: You can also specify additional request parameters for the service: ``` -[[httpjson.services]] +[[inputs.httpjson]] ... - [httpjson.services.parameters] + [inputs.httpjson.parameters] event_type = "cpu_spike" threshold = "0.75" @@ -48,10 +48,10 @@ You can also specify additional request parameters for the service: You can also specify additional request header parameters for the service: ``` -[[httpjson.services]] +[[inputs.httpjson]] ... - [httpjson.services.headers] + [inputs.httpjson.headers] X-Auth-Token = "my-xauth-token" apiVersion = "v1" ``` @@ -61,18 +61,14 @@ You can also specify additional request header parameters for the service: Let's say that we have a service named "mycollector" configured like this: ``` -[httpjson] - [[httpjson.services]] - name = "mycollector" - - servers = [ - "http://my.service.com/_stats" - ] - - # HTTP method to use (case-sensitive) - method = "GET" - - tag_keys = ["service"] +[[inputs.httpjson]] + name = "mycollector" + servers = [ + "http://my.service.com/_stats" + ] + # HTTP method to use (case-sensitive) + method = "GET" + tag_keys = ["service"] ``` which responds with the following JSON: @@ -102,26 +98,21 @@ There is also the option to collect JSON from multiple services, here is an example doing that. ``` -[httpjson] - [[httpjson.services]] - name = "mycollector1" +[[inputs.httpjson]] + name = "mycollector1" + servers = [ + "http://my.service1.com/_stats" + ] + # HTTP method to use (case-sensitive) + method = "GET" - servers = [ - "http://my.service1.com/_stats" - ] - - # HTTP method to use (case-sensitive) - method = "GET" - - [[httpjson.services]] - name = "mycollector2" - - servers = [ - "http://service.net/json/stats" - ] - - # HTTP method to use (case-sensitive) - method = "POST" +[[inputs.httpjson]] + name = "mycollector2" + servers = [ + "http://service.net/json/stats" + ] + # HTTP method to use (case-sensitive) + method = "POST" ``` The services respond with the following JSON: From 6ff0fc6d831a360b21ba87a6baaa1263a2d3cd23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Florent=20Rami=C3=A8re?= Date: Thu, 31 Mar 2016 11:14:20 +0200 Subject: [PATCH 216/287] Add compression/acks/retry conf to Kafka output plugin The following configuration is now possible ## CompressionCodec represents the various compression codecs recognized by Kafka in messages. ## "none" : No compression ## "gzip" : Gzip compression ## "snappy" : Snappy compression # compression_codec = "none" ## RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements it must see before responding ## "none" : the producer never waits for an acknowledgement from the broker. This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails). ## "leader" : the producer gets an acknowledgement after the leader replica has received the data. This option provides better durability as the client waits until the server acknowledges the request as successful (only messages that were written to the now-dead leader but not yet replicated will be lost). ## "leader_and_replicas" : the producer gets an acknowledgement after all in-sync replicas have received the data. This option provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains. # required_acks = "leader_and_replicas" ## The total number of times to retry sending a message # max_retry = "3" --- plugins/outputs/kafka/kafka.go | 85 ++++++++++++++++++++++++++++++++-- 1 file changed, 81 insertions(+), 4 deletions(-) diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 8dea2b2a1..2bba2e77e 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -3,6 +3,8 @@ package kafka import ( "crypto/tls" "fmt" + "strconv" + "strings" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" @@ -19,6 +21,12 @@ type Kafka struct { Topic string // Routing Key Tag RoutingTag string `toml:"routing_tag"` + // Compression Codec Tag + CompressionCodec string + // RequiredAcks Tag + RequiredAcks string + // MaxRetry Tag + MaxRetry string // Legacy SSL config options // TLS client certificate @@ -53,6 +61,21 @@ var sampleConfig = ` ## ie, if this tag exists, it's value will be used as the routing key routing_tag = "host" + ## CompressionCodec represents the various compression codecs recognized by Kafka in messages. + ## "none" : No compression + ## "gzip" : Gzip compression + ## "snappy" : Snappy compression + # compression_codec = "none" + + ## RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements it must see before responding + ## "none" : the producer never waits for an acknowledgement from the broker. This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails). + ## "leader" : the producer gets an acknowledgement after the leader replica has received the data. This option provides better durability as the client waits until the server acknowledges the request as successful (only messages that were written to the now-dead leader but not yet replicated will be lost). + ## "leader_and_replicas" : the producer gets an acknowledgement after all in-sync replicas have received the data. This option provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains. + # required_acks = "leader_and_replicas" + + ## The total number of times to retry sending a message + # max_retry = "3" + ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" # ssl_cert = "/etc/telegraf/cert.pem" @@ -71,12 +94,66 @@ func (k *Kafka) SetSerializer(serializer serializers.Serializer) { k.serializer = serializer } +func requiredAcks(value string) (sarama.RequiredAcks, error) { + switch strings.ToLower(value) { + case "none": + return sarama.NoResponse, nil + case "leader": + return sarama.WaitForLocal, nil + case "", "leader_and_replicas": + return sarama.WaitForAll, nil + default: + return 0, fmt.Errorf("Failed to recognize required_acks: %s", value) + } +} + +func compressionCodec(value string) (sarama.CompressionCodec, error) { + switch strings.ToLower(value) { + case "gzip": + return sarama.CompressionGZIP, nil + case "snappy": + return sarama.CompressionSnappy, nil + case "", "none": + return sarama.CompressionNone, nil + default: + return 0, fmt.Errorf("Failed to recognize compression_codec: %s", value) + } +} + +func maxRetry(value string) (int, error) { + if value == "" { + return 3, nil + } + maxRetry, err := strconv.Atoi(value) + if err != nil { + return -1, fmt.Errorf("Failed to parse max_retry: %s", value) + } + if maxRetry < 0 { + return -1, fmt.Errorf("max_retry is %s but it should not be negative", value) + } + return maxRetry, nil +} + func (k *Kafka) Connect() error { config := sarama.NewConfig() - // Wait for all in-sync replicas to ack the message - config.Producer.RequiredAcks = sarama.WaitForAll - // Retry up to 10 times to produce the message - config.Producer.Retry.Max = 10 + + requiredAcks, err := requiredAcks(k.RequiredAcks) + if err != nil { + return err + } + config.Producer.RequiredAcks = requiredAcks + + compressionCodec, err := compressionCodec(k.CompressionCodec) + if err != nil { + return err + } + config.Producer.Compression = compressionCodec + + maxRetry, err := maxRetry(k.MaxRetry) + if err != nil { + return err + } + config.Producer.Retry.Max = maxRetry // Legacy support ssl config if k.Certificate != "" { From 8c3371c4acf84775d5a206af8541cdfe7e648d1d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Florent=20Rami=C3=A8re?= Date: Thu, 31 Mar 2016 17:27:14 +0200 Subject: [PATCH 217/287] Use numerical codes instead of symbolic ones --- plugins/outputs/kafka/kafka.go | 92 +++++++--------------------------- 1 file changed, 18 insertions(+), 74 deletions(-) diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 2bba2e77e..3cecfeeab 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -3,8 +3,6 @@ package kafka import ( "crypto/tls" "fmt" - "strconv" - "strings" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" @@ -22,11 +20,11 @@ type Kafka struct { // Routing Key Tag RoutingTag string `toml:"routing_tag"` // Compression Codec Tag - CompressionCodec string + CompressionCodec int // RequiredAcks Tag - RequiredAcks string + RequiredAcks int // MaxRetry Tag - MaxRetry string + MaxRetry int // Legacy SSL config options // TLS client certificate @@ -61,20 +59,20 @@ var sampleConfig = ` ## ie, if this tag exists, it's value will be used as the routing key routing_tag = "host" - ## CompressionCodec represents the various compression codecs recognized by Kafka in messages. - ## "none" : No compression - ## "gzip" : Gzip compression - ## "snappy" : Snappy compression - # compression_codec = "none" + ## CompressionCodec represents the various compression codecs recognized by Kafka in messages. + ## 0 : No compression + ## 1 : Gzip compression + ## 2 : Snappy compression + compression_codec = 0 - ## RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements it must see before responding - ## "none" : the producer never waits for an acknowledgement from the broker. This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails). - ## "leader" : the producer gets an acknowledgement after the leader replica has received the data. This option provides better durability as the client waits until the server acknowledges the request as successful (only messages that were written to the now-dead leader but not yet replicated will be lost). - ## "leader_and_replicas" : the producer gets an acknowledgement after all in-sync replicas have received the data. This option provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains. - # required_acks = "leader_and_replicas" + ## RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements it must see before responding + ## 0 : the producer never waits for an acknowledgement from the broker. This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails). + ## 1 : the producer gets an acknowledgement after the leader replica has received the data. This option provides better durability as the client waits until the server acknowledges the request as successful (only messages that were written to the now-dead leader but not yet replicated will be lost). + ## -1 : the producer gets an acknowledgement after all in-sync replicas have received the data. This option provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains. + required_acks = -1 - ## The total number of times to retry sending a message - # max_retry = "3" + ## The total number of times to retry sending a message + max_retry = 3 ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" @@ -94,66 +92,12 @@ func (k *Kafka) SetSerializer(serializer serializers.Serializer) { k.serializer = serializer } -func requiredAcks(value string) (sarama.RequiredAcks, error) { - switch strings.ToLower(value) { - case "none": - return sarama.NoResponse, nil - case "leader": - return sarama.WaitForLocal, nil - case "", "leader_and_replicas": - return sarama.WaitForAll, nil - default: - return 0, fmt.Errorf("Failed to recognize required_acks: %s", value) - } -} - -func compressionCodec(value string) (sarama.CompressionCodec, error) { - switch strings.ToLower(value) { - case "gzip": - return sarama.CompressionGZIP, nil - case "snappy": - return sarama.CompressionSnappy, nil - case "", "none": - return sarama.CompressionNone, nil - default: - return 0, fmt.Errorf("Failed to recognize compression_codec: %s", value) - } -} - -func maxRetry(value string) (int, error) { - if value == "" { - return 3, nil - } - maxRetry, err := strconv.Atoi(value) - if err != nil { - return -1, fmt.Errorf("Failed to parse max_retry: %s", value) - } - if maxRetry < 0 { - return -1, fmt.Errorf("max_retry is %s but it should not be negative", value) - } - return maxRetry, nil -} - func (k *Kafka) Connect() error { config := sarama.NewConfig() - requiredAcks, err := requiredAcks(k.RequiredAcks) - if err != nil { - return err - } - config.Producer.RequiredAcks = requiredAcks - - compressionCodec, err := compressionCodec(k.CompressionCodec) - if err != nil { - return err - } - config.Producer.Compression = compressionCodec - - maxRetry, err := maxRetry(k.MaxRetry) - if err != nil { - return err - } - config.Producer.Retry.Max = maxRetry + config.Producer.RequiredAcks = sarama.RequiredAcks(k.RequiredAcks) + config.Producer.Compression = sarama.CompressionCodec(k.CompressionCodec) + config.Producer.Retry.Max = k.MaxRetry // Legacy support ssl config if k.Certificate != "" { From 51f4e9c0d3ee6889cba7ce86cd5a0566f8bf8be4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Florent=20Rami=C3=A8re?= Date: Thu, 31 Mar 2016 17:30:39 +0200 Subject: [PATCH 218/287] Update changelog closes #945 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 08c4b6ceb..f43aca161 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ - [#848](https://github.com/influxdata/telegraf/issues/848): Provide option to omit host tag from telegraf agent. - [#928](https://github.com/influxdata/telegraf/pull/928): Deprecating the statsd "convert_names" options, expose separator config. - [#919](https://github.com/influxdata/telegraf/pull/919): ipmi_sensor input plugin. Thanks @ebookbug! +- [#945](https://github.com/influxdata/telegraf/pull/945): KAFKA output: codec, acks, and retry configuration. Thanks @framiere! ### Bugfixes - [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided. From e436b2d72004a940549c21751ba33acf13117004 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 31 Mar 2016 17:50:24 -0600 Subject: [PATCH 219/287] Cleanup & standardize config file changes: - -sample-config will now comment out all but a few default plugins. - config file parse errors will output path to bad conf file. - cleanup 80-char line-length and some other style issues. - default package conf file will now have all plugins, but commented out. closes #199 closes #944 --- etc/telegraf.conf | 1178 ++++++++++++++++- internal/config/config.go | 164 ++- plugins/inputs/disque/disque.go | 5 +- plugins/inputs/dns_query/dns_query.go | 3 +- plugins/inputs/exec/exec.go | 2 +- .../inputs/kafka_consumer/kafka_consumer.go | 2 +- plugins/inputs/mesos/mesos.go | 11 +- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 2 +- plugins/inputs/nats_consumer/nats_consumer.go | 2 +- plugins/inputs/postgresql/postgresql.go | 3 +- .../postgresql_extensible.go | 57 +- plugins/inputs/prometheus/prometheus.go | 8 +- plugins/inputs/snmp/snmp.go | 5 +- plugins/inputs/tcp_listener/tcp_listener.go | 2 +- plugins/inputs/udp_listener/udp_listener.go | 2 +- plugins/outputs/amqp/amqp.go | 2 +- plugins/outputs/file/file.go | 2 +- plugins/outputs/kafka/kafka.go | 23 +- plugins/outputs/mqtt/mqtt.go | 2 +- plugins/outputs/nsq/nsq.go | 2 +- 20 files changed, 1347 insertions(+), 130 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 0e740f5c8..43d647beb 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -1,19 +1,21 @@ -# Telegraf configuration - +# Telegraf Configuration +# # Telegraf is entirely plugin driven. All metrics are gathered from the # declared inputs, and sent to the declared outputs. - +# # Plugins must be declared in here to be active. # To deactivate a plugin, comment out the name and any variables. - +# # Use 'telegraf -config telegraf.conf -test' to see what metrics a config # file would generate. + # Global tags can be specified here in key="value" format. [global_tags] # dc = "us-east-1" # will tag all metrics with dc=us-east-1 # rack = "1a" + # Configuration for telegraf agent [agent] ## Default data collection interval for all inputs @@ -48,10 +50,12 @@ quiet = false ## Override default hostname, if empty use os.Hostname() hostname = "" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = false ############################################################################### -# OUTPUTS # +# OUTPUT PLUGINS # ############################################################################### # Configuration for influxdb server to send metrics to @@ -87,59 +91,1189 @@ # insecure_skip_verify = false +# # Configuration for Amon Server to send metrics to. +# [[outputs.amon]] +# ## Amon Server Key +# server_key = "my-server-key" # required. +# +# ## Amon Instance URL +# amon_instance = "https://youramoninstance" # required +# +# ## Connection timeout. +# # timeout = "5s" + + +# # Configuration for the AMQP server to send metrics to +# [[outputs.amqp]] +# ## AMQP url +# url = "amqp://localhost:5672/influxdb" +# ## AMQP exchange +# exchange = "telegraf" +# ## Auth method. PLAIN and EXTERNAL are supported +# # auth_method = "PLAIN" +# ## Telegraf tag to use as a routing key +# ## ie, if this tag exists, it's value will be used as the routing key +# routing_tag = "host" +# +# ## InfluxDB retention policy +# # retention_policy = "default" +# ## InfluxDB database +# # database = "telegraf" +# ## InfluxDB precision +# # precision = "s" +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for AWS CloudWatch output. +# [[outputs.cloudwatch]] +# ## Amazon REGION +# region = 'us-east-1' +# +# ## Namespace for the CloudWatch MetricDatums +# namespace = 'InfluxData/Telegraf' + + +# # Configuration for DataDog API to send metrics to. +# [[outputs.datadog]] +# ## Datadog API key +# apikey = "my-secret-key" # required. +# +# ## Connection timeout. +# # timeout = "5s" + + +# # Send telegraf metrics to file(s) +# [[outputs.file]] +# ## Files to write to, "stdout" is a specially handled file. +# files = ["stdout", "/tmp/metrics.out"] +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for Graphite server to send metrics to +# [[outputs.graphite]] +# ## TCP endpoint for your graphite instance. +# servers = ["localhost:2003"] +# ## Prefix metrics name +# prefix = "" +# ## timeout in seconds for the write connection to graphite +# timeout = 2 + + +# # Configuration for the Kafka server to send metrics to +# [[outputs.kafka]] +# ## URLs of kafka brokers +# brokers = ["localhost:9092"] +# ## Kafka topic for producer messages +# topic = "telegraf" +# ## Telegraf tag to use as a routing key +# ## ie, if this tag exists, it's value will be used as the routing key +# routing_tag = "host" +# +# ## CompressionCodec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : No compression +# ## 1 : Gzip compression +# ## 2 : Snappy compression +# compression_codec = 0 +# +# ## RequiredAcks is used in Produce Requests to tell the broker how many +# ## replica acknowledgements it must see before responding +# ## 0 : the producer never waits for an acknowledgement from the broker. +# ## This option provides the lowest latency but the weakest durability +# ## guarantees (some data will be lost when a server fails). +# ## 1 : the producer gets an acknowledgement after the leader replica has +# ## received the data. This option provides better durability as the +# ## client waits until the server acknowledges the request as successful +# ## (only messages that were written to the now-dead leader but not yet +# ## replicated will be lost). +# ## -1: the producer gets an acknowledgement after all in-sync replicas have +# ## received the data. This option provides the best durability, we +# ## guarantee that no messages will be lost as long as at least one in +# ## sync replica remains. +# required_acks = -1 +# +# ## The total number of times to retry sending a message +# max_retry = 3 +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for the AWS Kinesis output. +# [[outputs.kinesis]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# ## PartitionKey as used for sharding data. +# partitionkey = "PartitionKey" +# ## format of the Data payload in the kinesis PutRecord, supported +# ## String and Custom. +# format = "string" +# ## debug will show upstream aws messages. +# debug = false + + +# # Configuration for Librato API to send metrics to. +# [[outputs.librato]] +# ## Librator API Docs +# ## http://dev.librato.com/v1/metrics-authentication +# +# ## Librato API user +# api_user = "telegraf@influxdb.com" # required. +# +# ## Librato API token +# api_token = "my-secret-token" # required. +# +# ### Debug +# # debug = false +# +# ### Tag Field to populate source attribute (optional) +# ### This is typically the _hostname_ from which the metric was obtained. +# source_tag = "host" +# +# ## Connection timeout. +# # timeout = "5s" + + +# # Configuration for MQTT server to send metrics to +# [[outputs.mqtt]] +# servers = ["localhost:1883"] # required. +# +# ## MQTT outputs send metrics to this topic format +# ## "///" +# ## ex: prefix/web01.example.com/mem +# topic_prefix = "telegraf" +# +# ## username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send telegraf measurements to NSQD +# [[outputs.nsq]] +# ## Location of nsqd instance listening on TCP +# server = "localhost:4150" +# ## NSQ topic for producer messages +# topic = "telegraf" +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for OpenTSDB server to send metrics to +# [[outputs.opentsdb]] +# ## prefix for metrics keys +# prefix = "my.specific.prefix." +# +# ## Telnet Mode ## +# ## DNS name of the OpenTSDB server in telnet mode +# host = "opentsdb.example.com" +# +# ## Port of the OpenTSDB server in telnet mode +# port = 4242 +# +# ## Debug true - Prints OpenTSDB communication +# debug = false + + +# # Configuration for the Prometheus client to spawn +# [[outputs.prometheus_client]] +# ## Address to listen on +# # listen = ":9126" + + +# # Configuration for the Riemann server to send metrics to +# [[outputs.riemann]] +# ## URL of server +# url = "localhost:5555" +# ## transport protocol to use either tcp or udp +# transport = "tcp" +# ## separator to use between input name and field name in Riemann service name +# separator = " " + + + ############################################################################### -# INPUTS # +# INPUT PLUGINS # ############################################################################### # Read metrics about cpu usage [[inputs.cpu]] - # Whether to report per-cpu stats or not + ## Whether to report per-cpu stats or not percpu = true - # Whether to report total system cpu stats or not + ## Whether to report total system cpu stats or not totalcpu = true - # Comment this line if you want the raw CPU time metrics + ## Comment this line if you want the raw CPU time metrics fielddrop = ["time_*"] + # Read metrics about disk usage by mount point [[inputs.disk]] - # By default, telegraf gather stats for all mountpoints. - # Setting mountpoints will restrict the stats to the specified mountpoints. - # mount_points=["/"] + ## By default, telegraf gather stats for all mountpoints. + ## Setting mountpoints will restrict the stats to the specified mountpoints. + # mount_points = ["/"] - # Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually - # present on /run, /var/run, /dev/shm or /dev). + ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually + ## present on /run, /var/run, /dev/shm or /dev). ignore_fs = ["tmpfs", "devtmpfs"] + # Read metrics about disk IO by device [[inputs.diskio]] - # By default, telegraf will gather stats for all devices including - # disk partitions. - # Setting devices will restrict the stats to the specified devices. + ## By default, telegraf will gather stats for all devices including + ## disk partitions. + ## Setting devices will restrict the stats to the specified devices. # devices = ["sda", "sdb"] - # Uncomment the following line if you do not need disk serial numbers. + ## Uncomment the following line if you do not need disk serial numbers. # skip_serial_number = true -# Get kernel statistics from /proc/stat -[[inputs.kernel]] - # no configuration # Read metrics about memory usage [[inputs.mem]] # no configuration + # Get the number of processes and group them by status [[inputs.processes]] # no configuration + # Read metrics about swap memory usage [[inputs.swap]] # no configuration + # Read metrics about system load & uptime [[inputs.system]] # no configuration +# # Read stats from an aerospike server +# [[inputs.aerospike]] +# ## Aerospike servers to connect to (with port) +# ## This plugin will query all namespaces the aerospike +# ## server has configured and get stats for them. +# servers = ["localhost:3000"] + + +# # Read Apache status information (mod_status) +# [[inputs.apache]] +# ## An array of Apache status URI to gather stats. +# urls = ["http://localhost/server-status?auto"] + + +# # Read metrics of bcache from stats_total and dirty_data +# [[inputs.bcache]] +# ## Bcache sets path +# ## If not specified, then default is: +# bcachePath = "/sys/fs/bcache" +# +# ## By default, telegraf gather stats for all bcache devices +# ## Setting devices will restrict the stats to the specified +# ## bcache devices. +# bcacheDevs = ["bcache0"] + + +# # Read metrics from one or many couchbase clusters +# [[inputs.couchbase]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## http://couchbase-0.example.com/ +# ## http://admin:secret@couchbase-0.example.com:8091/ +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no protocol is specifed, HTTP is used. +# ## If no port is specified, 8091 is used. +# servers = ["http://localhost:8091"] + + +# # Read CouchDB Stats from one or more servers +# [[inputs.couchdb]] +# ## Works with CouchDB stats endpoints out of the box +# ## Multiple HOSTs from which to read CouchDB stats: +# hosts = ["http://localhost:8086/_stats"] + + +# # Read metrics from one or many disque servers +# [[inputs.disque]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port and password. +# ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost"] + + +# # Query given DNS server and gives statistics +# [[inputs.dns_query]] +# ## servers to query +# servers = ["8.8.8.8"] # required +# +# ## Domains or subdomains to query. "."(root) is default +# domains = ["."] # optional +# +# ## Query record type. Default is "A" +# ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. +# record_type = "A" # optional +# +# ## Dns server port. 53 is default +# port = 53 # optional +# +# ## Query timeout in seconds. Default is 2 seconds +# timeout = 2 # optional + + +# # Read metrics about docker containers +# [[inputs.docker]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# endpoint = "unix:///var/run/docker.sock" +# ## Only collect metrics for these containers, collect all if empty +# container_names = [] + + +# # Read statistics from one or many dovecot servers +# [[inputs.dovecot]] +# ## specify dovecot servers via an address:port list +# ## e.g. +# ## localhost:24242 +# ## +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost:24242"] +# ## Only collect metrics for these domains, collect all if empty +# domains = [] + + +# # Read stats from one or more Elasticsearch servers or clusters +# [[inputs.elasticsearch]] +# ## specify a list of one or more Elasticsearch servers +# servers = ["http://localhost:9200"] +# +# ## set local to false when you want to read the indices stats from all nodes +# ## within the cluster +# local = true +# +# ## set cluster_health to true when you want to also obtain cluster level stats +# cluster_health = false + + +# # Read metrics from one or more commands that can output to stdout +# [[inputs.exec]] +# ## Commands array +# commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] +# +# ## measurement name suffix (for separating different commands) +# name_suffix = "_mycollector" +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics of haproxy, via socket or csv stats page +# [[inputs.haproxy]] +# ## An array of address to gather stats about. Specify an ip on hostname +# ## with optional port. ie localhost, 10.10.3.33:1936, etc. +# +# ## If no servers are specified, then default to 127.0.0.1:1936 +# servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"] +# ## Or you can also use local socket(not work yet) +# ## servers = ["socket://run/haproxy/admin.sock"] + + +# # Read flattened metrics from one or more JSON HTTP endpoints +# [[inputs.httpjson]] +# ## NOTE This plugin only reads numerical measurements, strings and booleans +# ## will be ignored. +# +# ## a name for the service being polled +# name = "webserver_stats" +# +# ## URL of each server in the service's cluster +# servers = [ +# "http://localhost:9999/stats/", +# "http://localhost:9998/stats/", +# ] +# +# ## HTTP method to use: GET or POST (case-sensitive) +# method = "GET" +# +# ## List of tag names to extract from top-level of JSON server response +# # tag_keys = [ +# # "my_tag_1", +# # "my_tag_2" +# # ] +# +# ## HTTP parameters (all values must be strings) +# [inputs.httpjson.parameters] +# event_type = "cpu_spike" +# threshold = "0.75" +# +# ## HTTP Header parameters (all values must be strings) +# # [inputs.httpjson.headers] +# # X-Auth-Token = "my-xauth-token" +# # apiVersion = "v1" +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.influxdb]] +# ## Works with InfluxDB debug endpoints out of the box, +# ## but other services can use this format too. +# ## See the influxdb plugin's README for more details. +# +# ## Multiple URLs from which to read InfluxDB-formatted JSON +# urls = [ +# "http://localhost:8086/debug/vars" +# ] + + +# # Read metrics from one or many bare metal servers +# [[inputs.ipmi_sensor]] +# ## specify servers via a url matching: +# ## [username[:password]@][protocol[(address)]] +# ## e.g. +# ## root:passwd@lan(127.0.0.1) +# ## +# servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] + + +# # Read JMX metrics through Jolokia +# [[inputs.jolokia]] +# ## This is the context root used to compose the jolokia url +# context = "/jolokia/read" +# +# ## List of servers exposing jolokia read service +# [[inputs.jolokia.servers]] +# name = "stable" +# host = "192.168.103.2" +# port = "8180" +# # username = "myuser" +# # password = "mypassword" +# +# ## List of metrics collected on above servers +# ## Each metric consists in a name, a jmx path and either +# ## a pass or drop slice attribute. +# ## This collect all heap memory usage metrics. +# [[inputs.jolokia.metrics]] +# name = "heap_memory_usage" +# jmx = "/java.lang:type=Memory/HeapMemoryUsage" + + +# # Read metrics from a LeoFS Server via SNMP +# [[inputs.leofs]] +# ## An array of URI to gather stats about LeoFS. +# ## Specify an ip or hostname with port. ie 127.0.0.1:4020 +# servers = ["127.0.0.1:4021"] + + +# # Read metrics from local Lustre service on OST, MDS +# [[inputs.lustre2]] +# ## An array of /proc globs to search for Lustre stats +# ## If not specified, the default will work on Lustre 2.5.x +# ## +# # ost_procfiles = [ +# # "/proc/fs/lustre/obdfilter/*/stats", +# # "/proc/fs/lustre/osd-ldiskfs/*/stats" +# # ] +# # mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"] + + +# # Gathers metrics from the /3.0/reports MailChimp API +# [[inputs.mailchimp]] +# ## MailChimp API key +# ## get from https://admin.mailchimp.com/account/api/ +# api_key = "" # required +# ## Reports for campaigns sent more than days_old ago will not be collected. +# ## 0 means collect all. +# days_old = 0 +# ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old +# # campaign_id = "" + + +# # Read metrics from one or many memcached servers +# [[inputs.memcached]] +# ## An array of address to gather stats about. Specify an ip on hostname +# ## with optional port. ie localhost, 10.0.0.1:11211, etc. +# servers = ["localhost:11211"] +# # unix_sockets = ["/var/run/memcached.sock"] + + +# # Telegraf plugin for gathering metrics from N Mesos masters +# [[inputs.mesos]] +# # Timeout, in ms. +# timeout = 100 +# # A list of Mesos masters, default value is localhost:5050. +# masters = ["localhost:5050"] +# # Metrics groups to be collected, by default, all enabled. +# master_collections = [ +# "resources", +# "master", +# "system", +# "slaves", +# "frameworks", +# "messages", +# "evqueue", +# "registrar", +# ] + + +# # Read metrics from one or many MongoDB servers +# [[inputs.mongodb]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port add password. ie, +# ## mongodb://user:auth_key@10.10.3.30:27017, +# ## mongodb://10.10.3.33:18832, +# ## 10.0.0.1:10000, etc. +# servers = ["127.0.0.1:27017"] + + +# # Read metrics from one or many mysql servers +# [[inputs.mysql]] +# ## specify servers via a url matching: +# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] +# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name +# ## e.g. +# ## root:passwd@tcp(127.0.0.1:3306)/?tls=false +# ## root@tcp(127.0.0.1:3306)/?tls=false +# ## +# ## If no servers are specified, then localhost is used as the host. +# servers = ["tcp(127.0.0.1:3306)/"] + + +# # Read metrics about network interface usage +# [[inputs.net]] +# ## By default, telegraf gathers stats from any up interface (excluding loopback) +# ## Setting interfaces will tell it to gather these explicit interfaces, +# ## regardless of status. +# ## +# # interfaces = ["eth0"] + + +# # TCP or UDP 'ping' given url and collect response time in seconds +# [[inputs.net_response]] +# ## Protocol, must be "tcp" or "udp" +# protocol = "tcp" +# ## Server address (default localhost) +# address = "github.com:80" +# ## Set timeout (default 1.0 seconds) +# timeout = 1.0 +# ## Set read timeout (default 1.0 seconds) +# read_timeout = 1.0 +# ## Optional string sent to the server +# # send = "ssh" +# ## Optional expected string in answer +# # expect = "ssh" + + +# # Read TCP metrics such as established, time wait and sockets counts. +# [[inputs.netstat]] +# # no configuration + + +# # Read Nginx's basic status information (ngx_http_stub_status_module) +# [[inputs.nginx]] +# ## An array of Nginx stub_status URI to gather stats. +# urls = ["http://localhost/status"] + + +# # Read NSQ topic and channel statistics. +# [[inputs.nsq]] +# ## An array of NSQD HTTP API endpoints +# endpoints = ["http://localhost:4151"] + + +# # Get standard NTP query metrics, requires ntpq executable. +# [[inputs.ntpq]] +# ## If false, set the -n ntpq flag. Can reduce metric gather time. +# dns_lookup = true + + +# # Read metrics of passenger using passenger-status +# [[inputs.passenger]] +# ## Path of passenger-status. +# ## +# ## Plugin gather metric via parsing XML output of passenger-status +# ## More information about the tool: +# ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html +# ## +# ## If no path is specified, then the plugin simply execute passenger-status +# ## hopefully it can be found in your PATH +# command = "passenger-status -v --show=xml" + + +# # Read metrics of phpfpm, via HTTP status page or socket +# [[inputs.phpfpm]] +# ## An array of addresses to gather stats about. Specify an ip or hostname +# ## with optional port and path +# ## +# ## Plugin can be configured in three modes (either can be used): +# ## - http: the URL must start with http:// or https://, ie: +# ## "http://localhost/status" +# ## "http://192.168.130.1/status?full" +# ## +# ## - unixsocket: path to fpm socket, ie: +# ## "/var/run/php5-fpm.sock" +# ## or using a custom fpm status path: +# ## "/var/run/php5-fpm.sock:fpm-custom-status-path" +# ## +# ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: +# ## "fcgi://10.0.0.12:9000/status" +# ## "cgi://10.0.10.12:9001/status" +# ## +# ## Example of multiple gathering from local socket and remove host +# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] +# urls = ["http://localhost/status"] + + +# # Ping given url(s) and return statistics +# [[inputs.ping]] +# ## NOTE: this plugin forks the ping command. You may need to set capabilities +# ## via setcap cap_net_raw+p /bin/ping +# +# ## urls to ping +# urls = ["www.google.com"] # required +# ## number of pings to send (ping -c ) +# count = 1 # required +# ## interval, in s, at which to ping. 0 == default (ping -i ) +# ping_interval = 0.0 +# ## ping timeout, in s. 0 == no timeout (ping -t ) +# timeout = 0.0 +# ## interface to send ping from (ping -I ) +# interface = "" + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# ## +# address = "host=localhost user=postgres sslmode=disable" +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. +# # databases = ["app_production", "testing"] + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql_extensible]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# # +# ## All connection parameters are optional. # +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# # +# address = "host=localhost user=postgres sslmode=disable" +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. +# ## databases = ["app_production", "testing"] +# # +# ## Define the toml config where the sql queries are stored +# ## New queries can be added, if the withdbname is set to true and there is no +# ## databases defined in the 'databases field', the sql query is ended by a +# ## 'is not null' in order to make the query succeed. +# ## Example : +# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become +# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" +# ## because the databases variable was set to ['postgres', 'pgbench' ] and the +# ## withdbname was true. Be careful that if the withdbname is set to false you +# ## don't have to define the where clause (aka with the dbname) the tagvalue +# ## field is used to define custom tags (separated by comas) +# # +# ## Structure : +# ## [[inputs.postgresql_extensible.query]] +# ## sqlquery string +# ## version string +# ## withdbname boolean +# ## tagvalue string (coma separated) +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_database" +# version=901 +# withdbname=false +# tagvalue="" +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_bgwriter" +# version=901 +# withdbname=false +# tagvalue="" + + +# # Read metrics from one or many PowerDNS servers +# [[inputs.powerdns]] +# ## An array of sockets to gather stats about. +# ## Specify a path to unix socket. +# unix_sockets = ["/var/run/pdns.controlsocket"] + + +# # Monitor process cpu and memory usage +# [[inputs.procstat]] +# ## Must specify one of: pid_file, exe, or pattern +# ## PID file to monitor process +# pid_file = "/var/run/nginx.pid" +# ## executable name (ie, pgrep ) +# # exe = "nginx" +# ## pattern as argument for pgrep (ie, pgrep -f ) +# # pattern = "nginx" +# ## user as argument for pgrep (ie, pgrep -u ) +# # user = "nginx" +# +# ## Field name prefix +# prefix = "" + + +# # Read metrics from one or many prometheus clients +# [[inputs.prometheus]] +# ## An array of urls to scrape metrics from. +# urls = ["http://localhost:9100/metrics"] +# +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# ## Use bearer token for authorization +# # bearer_token = /path/to/bearer/token + + +# # Reads last_run_summary.yaml file and converts to measurments +# [[inputs.puppetagent]] +# ## Location of puppet last run summary file +# location = "/var/lib/puppet/state/last_run_summary.yaml" + + +# # Read metrics from one or many RabbitMQ servers via the management API +# [[inputs.rabbitmq]] +# url = "http://localhost:15672" # required +# # name = "rmq-server-1" # optional tag +# # username = "guest" +# # password = "guest" +# +# ## A list of nodes to pull metrics about. If not specified, metrics for +# ## all nodes are gathered. +# # nodes = ["rabbit@node1", "rabbit@node2"] + + +# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers) +# [[inputs.raindrops]] +# ## An array of raindrops middleware URI to gather stats. +# urls = ["http://localhost:8080/_raindrops"] + + +# # Read metrics from one or many redis servers +# [[inputs.redis]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## tcp://localhost:6379 +# ## tcp://:password@192.168.99.100 +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 6379 is used +# servers = ["tcp://localhost:6379"] + + +# # Read metrics from one or many RethinkDB servers +# [[inputs.rethinkdb]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port add password. ie, +# ## rethinkdb://user:auth_key@10.10.3.30:28105, +# ## rethinkdb://10.10.3.33:18832, +# ## 10.0.0.1:10000, etc. +# servers = ["127.0.0.1:28015"] + + +# # Read metrics one or many Riak servers +# [[inputs.riak]] +# # Specify a list of one or more riak http servers +# servers = ["http://localhost:8098"] + + +# # Reads oids value from one or many snmp agents +# [[inputs.snmp]] +# ## Use 'oids.txt' file to translate oids to names +# ## To generate 'oids.txt' you need to run: +# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt +# ## Or if you have an other MIB folder with custom MIBs +# ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt +# snmptranslate_file = "/tmp/oids.txt" +# [[inputs.snmp.host]] +# address = "192.168.2.2:161" +# # SNMP community +# community = "public" # default public +# # SNMP version (1, 2 or 3) +# # Version 3 not supported yet +# version = 2 # default 2 +# # SNMP response timeout +# timeout = 2.0 # default 2.0 +# # SNMP request retries +# retries = 2 # default 2 +# # Which get/bulk do you want to collect for this host +# collect = ["mybulk", "sysservices", "sysdescr"] +# # Simple list of OIDs to get, in addition to "collect" +# get_oids = [] +# +# [[inputs.snmp.host]] +# address = "192.168.2.3:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# collect = ["mybulk"] +# get_oids = [ +# "ifNumber", +# ".1.3.6.1.2.1.1.3.0", +# ] +# +# [[inputs.snmp.get]] +# name = "ifnumber" +# oid = "ifNumber" +# +# [[inputs.snmp.get]] +# name = "interface_speed" +# oid = "ifSpeed" +# instance = "0" +# +# [[inputs.snmp.get]] +# name = "sysuptime" +# oid = ".1.3.6.1.2.1.1.3.0" +# unit = "second" +# +# [[inputs.snmp.bulk]] +# name = "mybulk" +# max_repetition = 127 +# oid = ".1.3.6.1.2.1.1" +# +# [[inputs.snmp.bulk]] +# name = "ifoutoctets" +# max_repetition = 127 +# oid = "ifOutOctets" +# +# [[inputs.snmp.host]] +# address = "192.168.2.13:161" +# #address = "127.0.0.1:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# #collect = ["mybulk", "sysservices", "sysdescr", "systype"] +# collect = ["sysuptime" ] +# [[inputs.snmp.host.table]] +# name = "iftable3" +# include_instances = ["enp5s0", "eth1"] +# +# # SNMP TABLEs +# # table without mapping neither subtables +# [[inputs.snmp.table]] +# name = "iftable1" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# +# # table without mapping but with subtables +# [[inputs.snmp.table]] +# name = "iftable2" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# sub_tables = [".1.3.6.1.2.1.2.2.1.13"] +# +# # table with mapping but without subtables +# [[inputs.snmp.table]] +# name = "iftable3" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty. get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty, get all subtables +# +# # table with both mapping and subtables +# [[inputs.snmp.table]] +# name = "iftable4" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty get all subtables +# # sub_tables could be not "real subtables" +# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] + + +# # Read metrics from Microsoft SQL Server +# [[inputs.sqlserver]] +# ## Specify instances to monitor with a list of connection strings. +# ## All connection parameters are optional. +# ## By default, the host is localhost, listening on default port, TCP 1433. +# ## for Windows, the user is the currently running AD user (SSO). +# ## See https://github.com/denisenkom/go-mssqldb for detailed connection +# ## parameters. +# # servers = [ +# # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +# # ] + + +# # Inserts sine and cosine waves for demonstration purposes +# [[inputs.trig]] +# ## Set the amplitude +# amplitude = 10.0 + + +# # Read Twemproxy stats data +# [[inputs.twemproxy]] +# ## Twemproxy stats address and port (no scheme) +# addr = "localhost:22222" +# ## Monitor pool name +# pools = ["redis_pool", "mc_pool"] + + +# # Read metrics of ZFS from arcstats, zfetchstats and vdev_cache_stats +# [[inputs.zfs]] +# ## ZFS kstat path +# ## If not specified, then default is: +# kstatPath = "/proc/spl/kstat/zfs" +# +# ## By default, telegraf gather all zfs stats +# ## If not specified, then default is: +# kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] +# +# ## By default, don't gather zpool stats +# poolMetrics = false + + +# # Reads 'mntr' stats from one or many zookeeper servers +# [[inputs.zookeeper]] +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie localhost:2181, 10.0.0.1:2181, etc. +# +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 2181 is used +# servers = [":2181"] + + + ############################################################################### -# SERVICE INPUTS # +# SERVICE INPUT PLUGINS # ############################################################################### + +# # Generic UDP listener +# [[inputs.udp_listener]] +# ## Address and port to host UDP listener on +# service_address = ":8092" +# +# ## Number of UDP messages allowed to queue up. Once filled, the +# ## UDP listener will start dropping packets. +# allowed_pending_messages = 10000 +# +# ## UDP packet size for the server to listen for. This will depend +# ## on the size of the packets that the client is sending, which is +# ## usually 1500 bytes, but can be as large as 65,535 bytes. +# udp_packet_size = 1500 +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # A Github Webhook Event collector +# [[inputs.github_webhooks]] +# ## Address and port to host Webhook listener on +# service_address = ":1618" + + +# # Read metrics from Kafka topic(s) +# [[inputs.kafka_consumer]] +# ## topic(s) to consume +# topics = ["telegraf"] +# ## an array of Zookeeper connection strings +# zookeeper_peers = ["localhost:2181"] +# ## Zookeeper Chroot +# zookeeper_chroot = "/" +# ## the name of the consumer group +# consumer_group = "telegraf_metrics_consumers" +# ## Offset (must be either "oldest" or "newest") +# offset = "oldest" +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from MQTT topic(s) +# [[inputs.mqtt_consumer]] +# servers = ["localhost:1883"] +# ## MQTT QoS, must be 0, 1, or 2 +# qos = 0 +# +# ## Topics to subscribe to +# topics = [ +# "telegraf/host01/cpu", +# "telegraf/+/mem", +# "sensors/#", +# ] +# +# # if true, messages that can't be delivered while the subscriber is offline +# # will be delivered when it comes back (such as on service restart). +# # NOTE: if true, client_id MUST be set +# persistent_session = false +# # If empty, a random client ID will be generated. +# client_id = "" +# +# ## username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from NATS subject(s) +# [[inputs.nats_consumer]] +# ## urls of NATS servers +# servers = ["nats://localhost:4222"] +# ## Use Transport Layer Security +# secure = false +# ## subject(s) to consume +# subjects = ["telegraf"] +# ## name a queue group +# queue_group = "telegraf_consumers" +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Statsd Server +# [[inputs.statsd]] +# ## Address and port to host UDP listener on +# service_address = ":8125" +# ## Delete gauges every interval (default=false) +# delete_gauges = false +# ## Delete counters every interval (default=false) +# delete_counters = false +# ## Delete sets every interval (default=false) +# delete_sets = false +# ## Delete timings & histograms every interval (default=true) +# delete_timings = true +# ## Percentiles to calculate for timing & histogram stats +# percentiles = [90] +# +# ## separator to use between elements of a statsd metric +# metric_separator = "_" +# +# ## Parses tags in the datadog statsd format +# ## http://docs.datadoghq.com/guides/dogstatsd/ +# parse_data_dog_tags = false +# +# ## Statsd data translation templates, more info can be read here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite +# # templates = [ +# # "cpu.* measurement*" +# # ] +# +# ## Number of UDP messages allowed to queue up, once filled, +# ## the statsd server will start dropping packets +# allowed_pending_messages = 10000 +# +# ## Number of timing/histogram values to track per-measurement in the +# ## calculation of percentiles. Raising this limit increases the accuracy +# ## of percentiles but also increases the memory usage and cpu time. +# percentile_limit = 1000 +# +# ## UDP packet size for the server to listen for. This will depend on the size +# ## of the packets that the client is sending, which is usually 1500 bytes. +# udp_packet_size = 1500 + + +# # Generic TCP listener +# [[inputs.tcp_listener]] +# ## Address and port to host TCP listener on +# service_address = ":8094" +# +# ## Number of TCP messages allowed to queue up. Once filled, the +# ## TCP listener will start dropping packets. +# allowed_pending_messages = 10000 +# +# ## Maximum number of concurrent TCP connections to allow +# max_tcp_connections = 250 +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + diff --git a/internal/config/config.go b/internal/config/config.go index b15c5e651..715fa777c 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -22,6 +22,15 @@ import ( "github.com/influxdata/toml/ast" ) +var ( + // Default input plugins + inputDefaults = []string{"cpu", "mem", "swap", "system", "kernel", + "processes", "disk", "diskio"} + + // Default output plugins + outputDefaults = []string{"influxdb"} +) + // Config specifies the URL/user/password for the database that telegraf // will be logging to, as well as all the plugins that the user has // specified @@ -135,21 +144,23 @@ func (c *Config) ListTags() string { } var header = `# Telegraf Configuration - +# # Telegraf is entirely plugin driven. All metrics are gathered from the # declared inputs, and sent to the declared outputs. - +# # Plugins must be declared in here to be active. # To deactivate a plugin, comment out the name and any variables. - +# # Use 'telegraf -config telegraf.conf -test' to see what metrics a config # file would generate. + # Global tags can be specified here in key="value" format. [global_tags] # dc = "us-east-1" # will tag all metrics with dc=us-east-1 # rack = "1a" + # Configuration for telegraf agent [agent] ## Default data collection interval for all inputs @@ -188,55 +199,72 @@ var header = `# Telegraf Configuration omit_hostname = false -# -# OUTPUTS: -# - +############################################################################### +# OUTPUT PLUGINS # +############################################################################### ` -var pluginHeader = ` -# -# INPUTS: -# +var inputHeader = ` + +############################################################################### +# INPUT PLUGINS # +############################################################################### ` var serviceInputHeader = ` -# -# SERVICE INPUTS: -# + +############################################################################### +# SERVICE INPUT PLUGINS # +############################################################################### ` // PrintSampleConfig prints the sample config -func PrintSampleConfig(pluginFilters []string, outputFilters []string) { +func PrintSampleConfig(inputFilters []string, outputFilters []string) { fmt.Printf(header) - // Filter outputs - var onames []string - for oname := range outputs.Outputs { - if len(outputFilters) == 0 || sliceContains(oname, outputFilters) { - onames = append(onames, oname) + if len(outputFilters) != 0 { + printFilteredOutputs(outputFilters, false) + } else { + printFilteredOutputs(outputDefaults, false) + // Print non-default outputs, commented + var pnames []string + for pname := range outputs.Outputs { + if !sliceContains(pname, outputDefaults) { + pnames = append(pnames, pname) + } } - } - sort.Strings(onames) - - // Print Outputs - for _, oname := range onames { - creator := outputs.Outputs[oname] - output := creator() - printConfig(oname, output, "outputs") + sort.Strings(pnames) + printFilteredOutputs(pnames, true) } + fmt.Printf(inputHeader) + if len(inputFilters) != 0 { + printFilteredInputs(inputFilters, false) + } else { + printFilteredInputs(inputDefaults, false) + // Print non-default inputs, commented + var pnames []string + for pname := range inputs.Inputs { + if !sliceContains(pname, inputDefaults) { + pnames = append(pnames, pname) + } + } + sort.Strings(pnames) + printFilteredInputs(pnames, true) + } +} + +func printFilteredInputs(inputFilters []string, commented bool) { // Filter inputs var pnames []string for pname := range inputs.Inputs { - if len(pluginFilters) == 0 || sliceContains(pname, pluginFilters) { + if sliceContains(pname, inputFilters) { pnames = append(pnames, pname) } } sort.Strings(pnames) // Print Inputs - fmt.Printf(pluginHeader) servInputs := make(map[string]telegraf.ServiceInput) for _, pname := range pnames { creator := inputs.Inputs[pname] @@ -248,13 +276,34 @@ func PrintSampleConfig(pluginFilters []string, outputFilters []string) { continue } - printConfig(pname, input, "inputs") + printConfig(pname, input, "inputs", commented) } // Print Service Inputs + if len(servInputs) == 0 { + return + } fmt.Printf(serviceInputHeader) for name, input := range servInputs { - printConfig(name, input, "inputs") + printConfig(name, input, "inputs", commented) + } +} + +func printFilteredOutputs(outputFilters []string, commented bool) { + // Filter outputs + var onames []string + for oname := range outputs.Outputs { + if sliceContains(oname, outputFilters) { + onames = append(onames, oname) + } + } + sort.Strings(onames) + + // Print Outputs + for _, oname := range onames { + creator := outputs.Outputs[oname] + output := creator() + printConfig(oname, output, "outputs", commented) } } @@ -263,13 +312,26 @@ type printer interface { SampleConfig() string } -func printConfig(name string, p printer, op string) { - fmt.Printf("\n# %s\n[[%s.%s]]", p.Description(), op, name) +func printConfig(name string, p printer, op string, commented bool) { + comment := "" + if commented { + comment = "# " + } + fmt.Printf("\n%s# %s\n%s[[%s.%s]]", comment, p.Description(), comment, + op, name) + config := p.SampleConfig() if config == "" { - fmt.Printf("\n # no configuration\n") + fmt.Printf("\n%s # no configuration\n\n", comment) } else { - fmt.Printf(config) + lines := strings.Split(config, "\n") + for i, line := range lines { + if i == 0 || i == len(lines)-1 { + fmt.Print("\n") + continue + } + fmt.Print(comment + line + "\n") + } } } @@ -285,7 +347,7 @@ func sliceContains(name string, list []string) bool { // PrintInputConfig prints the config usage of a single input. func PrintInputConfig(name string) error { if creator, ok := inputs.Inputs[name]; ok { - printConfig(name, creator(), "inputs") + printConfig(name, creator(), "inputs", false) } else { return errors.New(fmt.Sprintf("Input %s not found", name)) } @@ -295,7 +357,7 @@ func PrintInputConfig(name string) error { // PrintOutputConfig prints the config usage of a single output. func PrintOutputConfig(name string) error { if creator, ok := outputs.Outputs[name]; ok { - printConfig(name, creator(), "outputs") + printConfig(name, creator(), "outputs", false) } else { return errors.New(fmt.Sprintf("Output %s not found", name)) } @@ -327,42 +389,42 @@ func (c *Config) LoadDirectory(path string) error { func (c *Config) LoadConfig(path string) error { tbl, err := config.ParseFile(path) if err != nil { - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } for name, val := range tbl.Fields { subTable, ok := val.(*ast.Table) if !ok { - return errors.New("invalid configuration") + return fmt.Errorf("%s: invalid configuration", path) } switch name { case "agent": if err = config.UnmarshalTable(subTable, c.Agent); err != nil { log.Printf("Could not parse [agent] config\n") - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } case "global_tags", "tags": if err = config.UnmarshalTable(subTable, c.Tags); err != nil { log.Printf("Could not parse [global_tags] config\n") - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } case "outputs": for pluginName, pluginVal := range subTable.Fields { switch pluginSubTable := pluginVal.(type) { case *ast.Table: if err = c.addOutput(pluginName, pluginSubTable); err != nil { - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } case []*ast.Table: for _, t := range pluginSubTable { if err = c.addOutput(pluginName, t); err != nil { - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } } default: - return fmt.Errorf("Unsupported config format: %s", - pluginName) + return fmt.Errorf("Unsupported config format: %s, file %s", + pluginName, path) } } case "inputs", "plugins": @@ -370,24 +432,24 @@ func (c *Config) LoadConfig(path string) error { switch pluginSubTable := pluginVal.(type) { case *ast.Table: if err = c.addInput(pluginName, pluginSubTable); err != nil { - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } case []*ast.Table: for _, t := range pluginSubTable { if err = c.addInput(pluginName, t); err != nil { - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } } default: - return fmt.Errorf("Unsupported config format: %s", - pluginName) + return fmt.Errorf("Unsupported config format: %s, file %s", + pluginName, path) } } // Assume it's an input input for legacy config file support if no other // identifiers are present default: if err = c.addInput(name, subTable); err != nil { - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } } } diff --git a/plugins/inputs/disque/disque.go b/plugins/inputs/disque/disque.go index 822e5924f..d726590b4 100644 --- a/plugins/inputs/disque/disque.go +++ b/plugins/inputs/disque/disque.go @@ -24,9 +24,8 @@ type Disque struct { var sampleConfig = ` ## An array of URI to gather stats about. Specify an ip or hostname - ## with optional port and password. ie disque://localhost, disque://10.10.3.33:18832, - ## 10.0.0.1:10000, etc. - + ## with optional port and password. + ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. ## If no servers are specified, then localhost is used as the host. servers = ["localhost"] ` diff --git a/plugins/inputs/dns_query/dns_query.go b/plugins/inputs/dns_query/dns_query.go index 397482a98..2231f2921 100644 --- a/plugins/inputs/dns_query/dns_query.go +++ b/plugins/inputs/dns_query/dns_query.go @@ -35,7 +35,8 @@ var sampleConfig = ` ## Domains or subdomains to query. "."(root) is default domains = ["."] # optional - ## Query record type. Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. Default is "NS" + ## Query record type. Default is "A" + ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. record_type = "A" # optional ## Dns server port. 53 is default diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index 9fd9491ca..d2e09ccd0 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -22,7 +22,7 @@ const sampleConfig = ` ## measurement name suffix (for separating different commands) name_suffix = "_mycollector" - ## Data format to consume. This can be "json", "influx", "graphite" or "nagios + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 07c87199f..a2cda43d6 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -56,7 +56,7 @@ var sampleConfig = ` ## Offset (must be either "oldest" or "newest") offset = "oldest" - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index ccb76daae..b096a20d9 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -34,7 +34,16 @@ var sampleConfig = ` # A list of Mesos masters, default value is localhost:5050. masters = ["localhost:5050"] # Metrics groups to be collected, by default, all enabled. - master_collections = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"] + master_collections = [ + "resources", + "master", + "system", + "slaves", + "frameworks", + "messages", + "evqueue", + "registrar", + ] ` // SampleConfig returns a sample configuration block diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 50a20740a..c64d2139b 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -78,7 +78,7 @@ var sampleConfig = ` ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index 235601100..232d5740f 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -55,7 +55,7 @@ var sampleConfig = ` ## name a queue group queue_group = "telegraf_consumers" - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index d8d0d1978..da8ee8001 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -26,7 +26,8 @@ var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_rese var sampleConfig = ` ## specify address via a url matching: - ## postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] + ## postgres://[pqgotest[:password]]@localhost[/dbname]\ + ## ?sslmode=[disable|verify-ca|verify-full] ## or a simple string: ## host=localhost user=pqotest password=... sslmode=... dbname=app_production ## diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index 67097db4b..4ebf752ff 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -38,38 +38,41 @@ type query []struct { var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true} var sampleConfig = ` - # specify address via a url matching: - # postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] - # or a simple string: - # host=localhost user=pqotest password=... sslmode=... dbname=app_production + ## specify address via a url matching: + ## postgres://[pqgotest[:password]]@localhost[/dbname]\ + ## ?sslmode=[disable|verify-ca|verify-full] + ## or a simple string: + ## host=localhost user=pqotest password=... sslmode=... dbname=app_production # - # All connection parameters are optional. # - # Without the dbname parameter, the driver will default to a database - # with the same name as the user. This dbname is just for instantiating a - # connection with the server and doesn't restrict the databases we are trying - # to grab metrics for. + ## All connection parameters are optional. # + ## Without the dbname parameter, the driver will default to a database + ## with the same name as the user. This dbname is just for instantiating a + ## connection with the server and doesn't restrict the databases we are trying + ## to grab metrics for. # address = "host=localhost user=postgres sslmode=disable" - # A list of databases to pull metrics about. If not specified, metrics for all - # databases are gathered. - # databases = ["app_production", "testing"] + ## A list of databases to pull metrics about. If not specified, metrics for all + ## databases are gathered. + ## databases = ["app_production", "testing"] # - # Define the toml config where the sql queries are stored - # New queries can be added, if the withdbname is set to true and there is no databases defined - # in the 'databases field', the sql query is ended by a 'is not null' in order to make the query - # succeed. - # Example : - # The sqlquery : "SELECT * FROM pg_stat_database where datname" become "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" - # because the databases variable was set to ['postgres', 'pgbench' ] and the withdbname was true. - # Be careful that if the withdbname is set to false you d'ont have to define the where clause (aka with the dbname) - # the tagvalue field is used to define custom tags (separated by comas) + ## Define the toml config where the sql queries are stored + ## New queries can be added, if the withdbname is set to true and there is no + ## databases defined in the 'databases field', the sql query is ended by a + ## 'is not null' in order to make the query succeed. + ## Example : + ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become + ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" + ## because the databases variable was set to ['postgres', 'pgbench' ] and the + ## withdbname was true. Be careful that if the withdbname is set to false you + ## don't have to define the where clause (aka with the dbname) the tagvalue + ## field is used to define custom tags (separated by comas) # - # Structure : - # [[inputs.postgresql_extensible.query]] - # sqlquery string - # version string - # withdbname boolean - # tagvalue string (coma separated) + ## Structure : + ## [[inputs.postgresql_extensible.query]] + ## sqlquery string + ## version string + ## withdbname boolean + ## tagvalue string (coma separated) [[inputs.postgresql_extensible.query]] sqlquery="SELECT * FROM pg_stat_database" version=901 diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 0281cc24a..460a79faf 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -26,10 +26,10 @@ var sampleConfig = ` ## An array of urls to scrape metrics from. urls = ["http://localhost:9100/metrics"] - ### Use SSL but skip chain & host verification - # insecure_skip_verify = false - ### Use bearer token for authorization - # bearer_token = /path/to/bearer/token + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false + ## Use bearer token for authorization + # bearer_token = /path/to/bearer/token ` func (p *Prometheus) SampleConfig() string { diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index a56e53ff7..4c2de93c9 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -178,7 +178,6 @@ var sampleConfig = ` max_repetition = 127 oid = "ifOutOctets" - [[inputs.snmp.host]] address = "192.168.2.13:161" #address = "127.0.0.1:161" @@ -219,10 +218,8 @@ var sampleConfig = ` # if empty get all instances mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" # if empty get all subtables - # sub_tables could be not "real subtables" + # sub_tables could be not "real subtables" sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] - - ` // SampleConfig returns sample configuration message diff --git a/plugins/inputs/tcp_listener/tcp_listener.go b/plugins/inputs/tcp_listener/tcp_listener.go index dd239fedf..a1b991058 100644 --- a/plugins/inputs/tcp_listener/tcp_listener.go +++ b/plugins/inputs/tcp_listener/tcp_listener.go @@ -53,7 +53,7 @@ const sampleConfig = ` ## Maximum number of concurrent TCP connections to allow max_tcp_connections = 250 - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/udp_listener/udp_listener.go b/plugins/inputs/udp_listener/udp_listener.go index 9b0a65d6f..794f1791d 100644 --- a/plugins/inputs/udp_listener/udp_listener.go +++ b/plugins/inputs/udp_listener/udp_listener.go @@ -48,7 +48,7 @@ const sampleConfig = ` ## usually 1500 bytes, but can be as large as 65,535 bytes. udp_packet_size = 1500 - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index c9531b2a5..bf9353d6e 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -89,7 +89,7 @@ var sampleConfig = ` ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ## Data format to output. This can be "influx" or "graphite" + ## Data format to output. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md diff --git a/plugins/outputs/file/file.go b/plugins/outputs/file/file.go index 743c0f03f..1d47642b2 100644 --- a/plugins/outputs/file/file.go +++ b/plugins/outputs/file/file.go @@ -23,7 +23,7 @@ var sampleConfig = ` ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/metrics.out"] - ## Data format to output. This can be "influx" or "graphite" + ## Data format to output. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 3cecfeeab..1fafa1353 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -59,16 +59,27 @@ var sampleConfig = ` ## ie, if this tag exists, it's value will be used as the routing key routing_tag = "host" - ## CompressionCodec represents the various compression codecs recognized by Kafka in messages. + ## CompressionCodec represents the various compression codecs recognized by + ## Kafka in messages. ## 0 : No compression ## 1 : Gzip compression ## 2 : Snappy compression compression_codec = 0 - ## RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements it must see before responding - ## 0 : the producer never waits for an acknowledgement from the broker. This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails). - ## 1 : the producer gets an acknowledgement after the leader replica has received the data. This option provides better durability as the client waits until the server acknowledges the request as successful (only messages that were written to the now-dead leader but not yet replicated will be lost). - ## -1 : the producer gets an acknowledgement after all in-sync replicas have received the data. This option provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains. + ## RequiredAcks is used in Produce Requests to tell the broker how many + ## replica acknowledgements it must see before responding + ## 0 : the producer never waits for an acknowledgement from the broker. + ## This option provides the lowest latency but the weakest durability + ## guarantees (some data will be lost when a server fails). + ## 1 : the producer gets an acknowledgement after the leader replica has + ## received the data. This option provides better durability as the + ## client waits until the server acknowledges the request as successful + ## (only messages that were written to the now-dead leader but not yet + ## replicated will be lost). + ## -1: the producer gets an acknowledgement after all in-sync replicas have + ## received the data. This option provides the best durability, we + ## guarantee that no messages will be lost as long as at least one in + ## sync replica remains. required_acks = -1 ## The total number of times to retry sending a message @@ -81,7 +92,7 @@ var sampleConfig = ` ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ## Data format to output. This can be "influx" or "graphite" + ## Data format to output. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index f13500db9..c57ee8cd0 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -32,7 +32,7 @@ var sampleConfig = ` ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ## Data format to output. This can be "influx" or "graphite" + ## Data format to output. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md diff --git a/plugins/outputs/nsq/nsq.go b/plugins/outputs/nsq/nsq.go index 75b998484..fd4053222 100644 --- a/plugins/outputs/nsq/nsq.go +++ b/plugins/outputs/nsq/nsq.go @@ -24,7 +24,7 @@ var sampleConfig = ` ## NSQ topic for producer messages topic = "telegraf" - ## Data format to output. This can be "influx" or "graphite" + ## Data format to output. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md From f5246eb1678a44ec7b959ac30c525848684ddc68 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 1 Apr 2016 11:45:09 -0600 Subject: [PATCH 220/287] Update changelog with config file PR --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f43aca161..1095364f5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ ## v0.12.0 [unreleased] ### Features +- [#948](https://github.com/influxdata/telegraf/pull/948): Cleanup config file and make default package version include all plugins (but commented). - [#927](https://github.com/influxdata/telegraf/pull/927): Adds parsing of tags to the statsd input when using DataDog's dogstatsd extension - [#863](https://github.com/influxdata/telegraf/pull/863): AMQP output: allow external auth. Thanks @ekini! - [#707](https://github.com/influxdata/telegraf/pull/707): Improved prometheus plugin. Thanks @titilambert! From 9211d22b2b7dfd5fa12280b4e6c719395bb6fe5e Mon Sep 17 00:00:00 2001 From: Rubycut Date: Fri, 1 Apr 2016 17:59:09 +0200 Subject: [PATCH 221/287] Add writing in documentation. closes #950 --- plugins/inputs/nginx/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/inputs/nginx/README.md b/plugins/inputs/nginx/README.md index 8c64f6311..918ee08ad 100644 --- a/plugins/inputs/nginx/README.md +++ b/plugins/inputs/nginx/README.md @@ -18,6 +18,7 @@ - reading - requests - waiting + - writing ### Tags: From 8e041420cd9c34ae8a91e73ae62dab4d76937d0e Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 1 Apr 2016 13:53:34 -0600 Subject: [PATCH 222/287] config: parse environment variables in the config file closes #663 --- CHANGELOG.md | 1 + docs/CONFIGURATION.md | 6 +++ etc/telegraf.conf | 48 +++++++++++-------- internal/config/config.go | 47 ++++++++++++++++-- internal/config/config_test.go | 44 +++++++++++++++++ .../testdata/single_plugin_env_vars.toml | 11 +++++ 6 files changed, 132 insertions(+), 25 deletions(-) create mode 100644 internal/config/testdata/single_plugin_env_vars.toml diff --git a/CHANGELOG.md b/CHANGELOG.md index 1095364f5..305fa6d03 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ ## v0.12.0 [unreleased] ### Features +- [#951](https://github.com/influxdata/telegraf/pull/951): Parse environment variables in the config file. - [#948](https://github.com/influxdata/telegraf/pull/948): Cleanup config file and make default package version include all plugins (but commented). - [#927](https://github.com/influxdata/telegraf/pull/927): Adds parsing of tags to the statsd input when using DataDog's dogstatsd extension - [#863](https://github.com/influxdata/telegraf/pull/863): AMQP output: allow external auth. Thanks @ekini! diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 810dc9470..0afaa120f 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -9,6 +9,12 @@ To generate a file with specific inputs and outputs, you can use the -input-filter and -output-filter flags: `telegraf -sample-config -input-filter cpu:mem:net:swap -output-filter influxdb:kafka` +## Environment Variables + +Environment variables can be used anywhere in the config file, simply prepend +them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), +for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) + ## `[global_tags]` Configuration Global tags can be specific in the `[global_tags]` section of the config file in diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 43d647beb..633483e22 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -8,12 +8,18 @@ # # Use 'telegraf -config telegraf.conf -test' to see what metrics a config # file would generate. +# +# Environment variables can be used anywhere in this config file, simply prepend +# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), +# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) # Global tags can be specified here in key="value" format. [global_tags] # dc = "us-east-1" # will tag all metrics with dc=us-east-1 # rack = "1a" + ## Environment variables can be used as tags, and throughout the config file + # user = "$USER" # Configuration for telegraf agent @@ -1114,27 +1120,6 @@ # SERVICE INPUT PLUGINS # ############################################################################### -# # Generic UDP listener -# [[inputs.udp_listener]] -# ## Address and port to host UDP listener on -# service_address = ":8092" -# -# ## Number of UDP messages allowed to queue up. Once filled, the -# ## UDP listener will start dropping packets. -# allowed_pending_messages = 10000 -# -# ## UDP packet size for the server to listen for. This will depend -# ## on the size of the packets that the client is sending, which is -# ## usually 1500 bytes, but can be as large as 65,535 bytes. -# udp_packet_size = 1500 -# -# ## Data format to consume. -# ## Each data format has it's own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" - - # # A Github Webhook Event collector # [[inputs.github_webhooks]] # ## Address and port to host Webhook listener on @@ -1277,3 +1262,24 @@ # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" + +# # Generic UDP listener +# [[inputs.udp_listener]] +# ## Address and port to host UDP listener on +# service_address = ":8092" +# +# ## Number of UDP messages allowed to queue up. Once filled, the +# ## UDP listener will start dropping packets. +# allowed_pending_messages = 10000 +# +# ## UDP packet size for the server to listen for. This will depend +# ## on the size of the packets that the client is sending, which is +# ## usually 1500 bytes, but can be as large as 65,535 bytes. +# udp_packet_size = 1500 +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + diff --git a/internal/config/config.go b/internal/config/config.go index 715fa777c..1e07234e8 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1,11 +1,14 @@ package config import ( + "bytes" "errors" "fmt" "io/ioutil" "log" + "os" "path/filepath" + "regexp" "sort" "strings" "time" @@ -19,6 +22,7 @@ import ( "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/config" + "github.com/influxdata/toml" "github.com/influxdata/toml/ast" ) @@ -29,6 +33,9 @@ var ( // Default output plugins outputDefaults = []string{"influxdb"} + + // envVarRe is a regex to find environment variables in the config file + envVarRe = regexp.MustCompile(`\$\w+`) ) // Config specifies the URL/user/password for the database that telegraf @@ -153,12 +160,18 @@ var header = `# Telegraf Configuration # # Use 'telegraf -config telegraf.conf -test' to see what metrics a config # file would generate. +# +# Environment variables can be used anywhere in this config file, simply prepend +# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), +# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) # Global tags can be specified here in key="value" format. [global_tags] # dc = "us-east-1" # will tag all metrics with dc=us-east-1 # rack = "1a" + ## Environment variables can be used as tags, and throughout the config file + # user = "$USER" # Configuration for telegraf agent @@ -264,8 +277,12 @@ func printFilteredInputs(inputFilters []string, commented bool) { } sort.Strings(pnames) - // Print Inputs + // cache service inputs to print them at the end servInputs := make(map[string]telegraf.ServiceInput) + // for alphabetical looping: + servInputNames := []string{} + + // Print Inputs for _, pname := range pnames { creator := inputs.Inputs[pname] input := creator() @@ -273,6 +290,7 @@ func printFilteredInputs(inputFilters []string, commented bool) { switch p := input.(type) { case telegraf.ServiceInput: servInputs[pname] = p + servInputNames = append(servInputNames, pname) continue } @@ -283,9 +301,10 @@ func printFilteredInputs(inputFilters []string, commented bool) { if len(servInputs) == 0 { return } + sort.Strings(servInputNames) fmt.Printf(serviceInputHeader) - for name, input := range servInputs { - printConfig(name, input, "inputs", commented) + for _, name := range servInputNames { + printConfig(name, servInputs[name], "inputs", commented) } } @@ -387,7 +406,7 @@ func (c *Config) LoadDirectory(path string) error { // LoadConfig loads the given config file and applies it to c func (c *Config) LoadConfig(path string) error { - tbl, err := config.ParseFile(path) + tbl, err := parseFile(path) if err != nil { return fmt.Errorf("Error parsing %s, %s", path, err) } @@ -456,6 +475,26 @@ func (c *Config) LoadConfig(path string) error { return nil } +// parseFile loads a TOML configuration from a provided path and +// returns the AST produced from the TOML parser. When loading the file, it +// will find environment variables and replace them. +func parseFile(fpath string) (*ast.Table, error) { + contents, err := ioutil.ReadFile(fpath) + if err != nil { + return nil, err + } + + env_vars := envVarRe.FindAll(contents, -1) + for _, env_var := range env_vars { + env_val := os.Getenv(strings.TrimPrefix(string(env_var), "$")) + if env_val != "" { + contents = bytes.Replace(contents, env_var, []byte(env_val), 1) + } + } + + return toml.Parse(contents) +} + func (c *Config) addOutput(name string, table *ast.Table) error { if len(c.OutputFilters) > 0 && !sliceContains(name, c.OutputFilters) { return nil diff --git a/internal/config/config_test.go b/internal/config/config_test.go index f0add8b98..d78a8d6b8 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -1,6 +1,7 @@ package config import ( + "os" "testing" "time" @@ -10,9 +11,52 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/memcached" "github.com/influxdata/telegraf/plugins/inputs/procstat" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/stretchr/testify/assert" ) +func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) { + c := NewConfig() + err := os.Setenv("MY_TEST_SERVER", "192.168.1.1") + assert.NoError(t, err) + err = os.Setenv("TEST_INTERVAL", "10s") + assert.NoError(t, err) + c.LoadConfig("./testdata/single_plugin_env_vars.toml") + + memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) + memcached.Servers = []string{"192.168.1.1"} + + mConfig := &internal_models.InputConfig{ + Name: "memcached", + Filter: internal_models.Filter{ + NameDrop: []string{"metricname2"}, + NamePass: []string{"metricname1"}, + FieldDrop: []string{"other", "stuff"}, + FieldPass: []string{"some", "strings"}, + TagDrop: []internal_models.TagFilter{ + internal_models.TagFilter{ + Name: "badtag", + Filter: []string{"othertag"}, + }, + }, + TagPass: []internal_models.TagFilter{ + internal_models.TagFilter{ + Name: "goodtag", + Filter: []string{"mytag"}, + }, + }, + IsActive: true, + }, + Interval: 10 * time.Second, + } + mConfig.Tags = make(map[string]string) + + assert.Equal(t, memcached, c.Inputs[0].Input, + "Testdata did not produce a correct memcached struct.") + assert.Equal(t, mConfig, c.Inputs[0].Config, + "Testdata did not produce correct memcached metadata.") +} + func TestConfig_LoadSingleInput(t *testing.T) { c := NewConfig() c.LoadConfig("./testdata/single_plugin.toml") diff --git a/internal/config/testdata/single_plugin_env_vars.toml b/internal/config/testdata/single_plugin_env_vars.toml new file mode 100644 index 000000000..6600a77b3 --- /dev/null +++ b/internal/config/testdata/single_plugin_env_vars.toml @@ -0,0 +1,11 @@ +[[inputs.memcached]] + servers = ["$MY_TEST_SERVER"] + namepass = ["metricname1"] + namedrop = ["metricname2"] + fieldpass = ["some", "strings"] + fielddrop = ["other", "stuff"] + interval = "$TEST_INTERVAL" + [inputs.memcached.tagpass] + goodtag = ["mytag"] + [inputs.memcached.tagdrop] + badtag = ["othertag"] From 0f1b4e06f519e206cbc9607017a2cffb5873b108 Mon Sep 17 00:00:00 2001 From: Nikhil Bafna Date: Sat, 2 Apr 2016 10:13:21 +0530 Subject: [PATCH 223/287] Update README.md Fix redis input plugin name in configuration example --- plugins/inputs/redis/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/redis/README.md b/plugins/inputs/redis/README.md index d7d98ccc9..1cbaea0ca 100644 --- a/plugins/inputs/redis/README.md +++ b/plugins/inputs/redis/README.md @@ -62,7 +62,7 @@ Using this configuration: ``` -[[inputs.nginx]] +[[inputs.redis]] ## specify servers via a url matching: ## [protocol://][:password]@address[:port] ## e.g. From 357849c34846f0c347e6048f4372ed1f39014d06 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Sat, 2 Apr 2016 13:35:40 -0600 Subject: [PATCH 224/287] Godeps: update wvanbergen/kafka dependency see https://github.com/wvanbergen/kafka/pull/87 fixes #805 --- CHANGELOG.md | 1 + Godeps | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 305fa6d03..9546aba14 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,7 @@ - [#841](https://github.com/influxdata/telegraf/issues/841): Fix memcached unix socket panic. - [#873](https://github.com/influxdata/telegraf/issues/873): Fix SNMP plugin sometimes not returning metrics. Thanks @titiliambert! - [#934](https://github.com/influxdata/telegraf/pull/934): phpfpm: Fix fcgi uri path. Thanks @rudenkovk! +- [#805](https://github.com/influxdata/telegraf/issues/805): Kafka consumer stops gathering after i/o timeout. ## v0.11.1 [2016-03-17] diff --git a/Godeps b/Godeps index 2fc53d8c5..255b95ab5 100644 --- a/Godeps +++ b/Godeps @@ -41,7 +41,7 @@ github.com/shirou/gopsutil 1f32ce1bb380845be7f5d174ac641a2c592c0c42 github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744 github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c -github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3 +github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866 github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8 github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363 golang.org/x/crypto 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3 From 8509101b830ff5a521dd5b99ae1a568edf3a04a3 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 4 Apr 2016 15:16:58 -0600 Subject: [PATCH 225/287] drop cpu_time_* from procstat by default closes #963 --- plugins/inputs/procstat/procstat.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index e5ae207fe..a0e63fd6f 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -43,6 +43,8 @@ var sampleConfig = ` ## Field name prefix prefix = "" + ## comment this out if you want raw cpu_time stats + fielddrop = ["cpu_time_*"] ` func (_ *Procstat) SampleConfig() string { From 5fe8903fd2c229260e140af99d91255531b4f5ef Mon Sep 17 00:00:00 2001 From: Pierre Fersing Date: Mon, 4 Apr 2016 11:59:28 +0200 Subject: [PATCH 226/287] Use timeout smaller than 10 seconds closes #959 --- plugins/inputs/mongodb/mongodb.go | 2 +- plugins/inputs/mongodb/mongodb_test.go | 2 +- plugins/inputs/prometheus/prometheus.go | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go index 3be04477b..381814531 100644 --- a/plugins/inputs/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -103,7 +103,7 @@ func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error { dialAddrs[0], err.Error()) } dialInfo.Direct = true - dialInfo.Timeout = time.Duration(10) * time.Second + dialInfo.Timeout = 5 * time.Second if m.Ssl.Enabled { tlsConfig := &tls.Config{} diff --git a/plugins/inputs/mongodb/mongodb_test.go b/plugins/inputs/mongodb/mongodb_test.go index 174128d19..73e68ed37 100644 --- a/plugins/inputs/mongodb/mongodb_test.go +++ b/plugins/inputs/mongodb/mongodb_test.go @@ -43,7 +43,7 @@ func testSetup(m *testing.M) { log.Fatalf("Unable to parse URL (%s), %s\n", dialAddrs[0], err.Error()) } dialInfo.Direct = true - dialInfo.Timeout = time.Duration(10) * time.Second + dialInfo.Timeout = 5 * time.Second sess, err := mgo.DialWithInfo(dialInfo) if err != nil { log.Fatalf("Unable to connect to MongoDB, %s\n", err.Error()) diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 460a79faf..1c60a363e 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -80,10 +80,10 @@ func (p *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error { var rt http.RoundTripper = &http.Transport{ Dial: (&net.Dialer{ - Timeout: 10 * time.Second, + Timeout: 5 * time.Second, KeepAlive: 30 * time.Second, }).Dial, - TLSHandshakeTimeout: 10 * time.Second, + TLSHandshakeTimeout: 5 * time.Second, TLSClientConfig: &tls.Config{ InsecureSkipVerify: p.InsecureSkipVerify, }, From d9bb1ceaeca516a1eb60e86297af5622b86e9491 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 4 Apr 2016 16:12:50 -0600 Subject: [PATCH 227/287] Changelog update --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9546aba14..46895cb05 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,7 @@ - [#873](https://github.com/influxdata/telegraf/issues/873): Fix SNMP plugin sometimes not returning metrics. Thanks @titiliambert! - [#934](https://github.com/influxdata/telegraf/pull/934): phpfpm: Fix fcgi uri path. Thanks @rudenkovk! - [#805](https://github.com/influxdata/telegraf/issues/805): Kafka consumer stops gathering after i/o timeout. +- [#959](https://github.com/influxdata/telegraf/pull/959): reduce mongodb & prometheus collection timeouts. Thanks @PierreF! ## v0.11.1 [2016-03-17] From d2d91e713a0727ce0ba7cdbe8027c723aecb3d9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Florent=20Rami=C3=A8re?= Date: Mon, 4 Apr 2016 23:57:52 +0200 Subject: [PATCH 228/287] Add plugin links closes #964 --- README.md | 140 +++++++++++++++++++++++++++--------------------------- 1 file changed, 70 insertions(+), 70 deletions(-) diff --git a/README.md b/README.md index 000b27620..65d2d3226 100644 --- a/README.md +++ b/README.md @@ -156,55 +156,55 @@ more information on each, please look at the directory of the same name in Currently implemented sources: -* aerospike -* apache -* bcache -* couchbase -* couchdb -* disque -* dns query time -* docker -* dovecot -* elasticsearch -* exec (generic executable plugin, support JSON, influx, graphite and nagios) -* haproxy -* httpjson (generic JSON-emitting http service plugin) -* influxdb -* ipmi_sensor -* jolokia -* leofs -* lustre2 -* mailchimp -* memcached -* mesos -* mongodb -* mysql -* net_response -* nginx -* nsq -* ntpq -* phpfpm -* phusion passenger -* ping -* postgresql -* postgresql_extensible -* powerdns -* procstat -* prometheus -* puppetagent -* rabbitmq -* raindrops -* redis -* rethinkdb -* riak -* sensors (only available if built from source) -* snmp -* sql server (microsoft) -* twemproxy -* zfs -* zookeeper -* win_perf_counters (windows performance counters) -* system +* [aerospike](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/aerospike) +* [apache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/apache) +* [bcache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/bcache) +* [couchbase](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchbase) +* [couchdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchdb) +* [disque](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/disque) +* [dns query time](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dns_query) +* [docker](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/docker) +* [dovecot](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dovecot) +* [elasticsearch](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/elasticsearch) +* [exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec ) (generic executable plugin, support JSON, influx, graphite and nagios) +* [haproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/haproxy) +* [httpjson ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/httpjson ) (generic JSON-emitting http service plugin) +* [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb) +* [ipmi_sensor](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ipmi_sensor) +* [jolokia](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia) +* [leofs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/leofs) +* [lustre2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/lustre2) +* [mailchimp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mailchimp) +* [memcached](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/memcached) +* [mesos](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mesos) +* [mongodb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mongodb) +* [mysql](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mysql) +* [net_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/net_response) +* [nginx](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nginx) +* [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq) +* [ntpq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ntpq) +* [phpfpm](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/phpfpm) +* [phusion passenger](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/passenger) +* [ping](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ping) +* [postgresql](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/postgresql) +* [postgresql_extensible](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/postgresql_extensible) +* [powerdns](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/powerdns) +* [procstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/procstat) +* [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/prometheus) +* [puppetagent](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/puppetagent) +* [rabbitmq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rabbitmq) +* [raindrops](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/raindrops) +* [redis](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/redis) +* [rethinkdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rethinkdb) +* [riak](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/riak) +* [sensors ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sensors) (only available if built from source) +* [snmp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp) +* [sql server](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sqlserver) (microsoft) +* [twemproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/twemproxy) +* [zfs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zfs) +* [zookeeper](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zookeeper) +* [win_perf_counters ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters) (windows performance counters) +* [system](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/system) * cpu * mem * net @@ -217,33 +217,33 @@ Currently implemented sources: Telegraf can also collect metrics via the following service plugins: -* statsd -* udp_listener -* tcp_listener -* mqtt_consumer -* kafka_consumer -* nats_consumer -* github_webhooks +* [statsd](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/statsd) +* [udp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/udp_listener) +* [tcp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tcp_listener) +* [mqtt_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer) +* [kafka_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer) +* [nats_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nats_consumer) +* [github_webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/github_webhooks) We'll be adding support for many more over the coming months. Read on if you want to add support for another service or third-party API. ## Supported Output Plugins -* influxdb -* amon -* amqp -* aws kinesis -* aws cloudwatch -* datadog -* graphite -* kafka -* librato -* mqtt -* nsq -* opentsdb -* prometheus -* riemann +* [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/influxdb) +* [amon](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/amon) +* [amqp](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/amqp) +* [aws kinesis](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kinesis) +* [aws cloudwatch](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/cloudwatch) +* [datadog](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/datadog) +* [graphite](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/graphite) +* [kafka](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kafka) +* [librato](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/librato) +* [mqtt](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/mqtt) +* [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/nsq) +* [opentsdb](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb) +* [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/prometheus_client) +* [riemann](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/riemann) ## Contributing From a4a140bfadb667d92c6dfad3334b97d0a1427adb Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 4 Apr 2016 16:30:24 -0600 Subject: [PATCH 229/287] etc/telegraf.conf update for procstat change --- etc/telegraf.conf | 2 ++ 1 file changed, 2 insertions(+) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 633483e22..694bd6564 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -894,6 +894,8 @@ # # ## Field name prefix # prefix = "" +# ## comment this out if you want raw cpu_time stats +# fielddrop = ["cpu_time_*"] # # Read metrics from one or many prometheus clients From 70ef61ac6df9e7da234dbbe9ca488f1c2d770b2b Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 4 Apr 2016 16:34:41 -0600 Subject: [PATCH 230/287] Release 0.12 --- CHANGELOG.md | 2 +- README.md | 112 +++++++++++++++++++++++++-------------------------- 2 files changed, 57 insertions(+), 57 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 46895cb05..754701e44 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## v0.12.0 [unreleased] +## v0.12.0 [2016-04-05] ### Features - [#951](https://github.com/influxdata/telegraf/pull/951): Parse environment variables in the config file. diff --git a/README.md b/README.md index 65d2d3226..9813ca6d4 100644 --- a/README.md +++ b/README.md @@ -20,12 +20,12 @@ new plugins. ### Linux deb and rpm Packages: Latest: -* http://get.influxdb.org/telegraf/telegraf_0.11.1-1_amd64.deb -* http://get.influxdb.org/telegraf/telegraf-0.11.1-1.x86_64.rpm +* http://get.influxdb.org/telegraf/telegraf_0.12.0-1_amd64.deb +* http://get.influxdb.org/telegraf/telegraf-0.12.0-1.x86_64.rpm Latest (arm): -* http://get.influxdb.org/telegraf/telegraf_0.11.1-1_armhf.deb -* http://get.influxdb.org/telegraf/telegraf-0.11.1-1.armhf.rpm +* http://get.influxdb.org/telegraf/telegraf_0.12.0-1_armhf.deb +* http://get.influxdb.org/telegraf/telegraf-0.12.0-1.armhf.rpm ##### Package Instructions: @@ -46,28 +46,28 @@ to use this repo to install & update telegraf. ### Linux tarballs: Latest: -* http://get.influxdb.org/telegraf/telegraf-0.11.1-1_linux_amd64.tar.gz -* http://get.influxdb.org/telegraf/telegraf-0.11.1-1_linux_i386.tar.gz -* http://get.influxdb.org/telegraf/telegraf-0.11.1-1_linux_armhf.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.12.0-1_linux_amd64.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.12.0-1_linux_i386.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.12.0-1_linux_armhf.tar.gz ##### tarball Instructions: To install the full directory structure with config file, run: ``` -sudo tar -C / -zxvf ./telegraf-0.11.1-1_linux_amd64.tar.gz +sudo tar -C / -zxvf ./telegraf-0.12.0-1_linux_amd64.tar.gz ``` To extract only the binary, run: ``` -tar -zxvf telegraf-0.11.1-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf +tar -zxvf telegraf-0.12.0-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf ``` ### FreeBSD tarball: Latest: -* http://get.influxdb.org/telegraf/telegraf-0.11.1-1_freebsd_amd64.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.12.0-1_freebsd_amd64.tar.gz ##### tarball Instructions: @@ -87,8 +87,8 @@ brew install telegraf ### Windows Binaries (EXPERIMENTAL) Latest: -* http://get.influxdb.org/telegraf/telegraf-0.11.1-1_windows_amd64.zip -* http://get.influxdb.org/telegraf/telegraf-0.11.1-1_windows_i386.zip +* http://get.influxdb.org/telegraf/telegraf-0.12.0-1_windows_amd64.zip +* http://get.influxdb.org/telegraf/telegraf-0.12.0-1_windows_i386.zip ### From Source: @@ -156,55 +156,55 @@ more information on each, please look at the directory of the same name in Currently implemented sources: -* [aerospike](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/aerospike) -* [apache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/apache) -* [bcache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/bcache) -* [couchbase](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchbase) -* [couchdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchdb) -* [disque](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/disque) -* [dns query time](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dns_query) -* [docker](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/docker) -* [dovecot](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dovecot) -* [elasticsearch](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/elasticsearch) +* [aerospike](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/aerospike) +* [apache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/apache) +* [bcache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/bcache) +* [couchbase](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchbase) +* [couchdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchdb) +* [disque](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/disque) +* [dns query time](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dns_query) +* [docker](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/docker) +* [dovecot](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dovecot) +* [elasticsearch](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/elasticsearch) * [exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec ) (generic executable plugin, support JSON, influx, graphite and nagios) -* [haproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/haproxy) +* [haproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/haproxy) * [httpjson ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/httpjson ) (generic JSON-emitting http service plugin) -* [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb) -* [ipmi_sensor](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ipmi_sensor) -* [jolokia](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia) -* [leofs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/leofs) -* [lustre2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/lustre2) -* [mailchimp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mailchimp) -* [memcached](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/memcached) -* [mesos](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mesos) -* [mongodb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mongodb) -* [mysql](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mysql) -* [net_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/net_response) -* [nginx](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nginx) -* [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq) -* [ntpq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ntpq) -* [phpfpm](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/phpfpm) -* [phusion passenger](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/passenger) -* [ping](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ping) -* [postgresql](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/postgresql) -* [postgresql_extensible](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/postgresql_extensible) -* [powerdns](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/powerdns) -* [procstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/procstat) -* [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/prometheus) -* [puppetagent](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/puppetagent) -* [rabbitmq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rabbitmq) -* [raindrops](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/raindrops) -* [redis](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/redis) -* [rethinkdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rethinkdb) -* [riak](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/riak) +* [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb) +* [ipmi_sensor](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ipmi_sensor) +* [jolokia](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia) +* [leofs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/leofs) +* [lustre2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/lustre2) +* [mailchimp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mailchimp) +* [memcached](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/memcached) +* [mesos](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mesos) +* [mongodb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mongodb) +* [mysql](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mysql) +* [net_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/net_response) +* [nginx](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nginx) +* [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq) +* [ntpq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ntpq) +* [phpfpm](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/phpfpm) +* [phusion passenger](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/passenger) +* [ping](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ping) +* [postgresql](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/postgresql) +* [postgresql_extensible](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/postgresql_extensible) +* [powerdns](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/powerdns) +* [procstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/procstat) +* [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/prometheus) +* [puppetagent](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/puppetagent) +* [rabbitmq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rabbitmq) +* [raindrops](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/raindrops) +* [redis](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/redis) +* [rethinkdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rethinkdb) +* [riak](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/riak) * [sensors ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sensors) (only available if built from source) -* [snmp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp) +* [snmp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp) * [sql server](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sqlserver) (microsoft) -* [twemproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/twemproxy) -* [zfs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zfs) -* [zookeeper](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zookeeper) +* [twemproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/twemproxy) +* [zfs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zfs) +* [zookeeper](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zookeeper) * [win_perf_counters ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters) (windows performance counters) -* [system](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/system) +* [system](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/system) * cpu * mem * net From d871e9aee7d8828c6b11ab61fb76a30a7eda6f71 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 4 Apr 2016 17:43:53 -0600 Subject: [PATCH 231/287] Dummy kernel plugin added for consistent config generation --- etc/telegraf.conf | 5 +++++ plugins/inputs/system/kernel_notlinux.go | 27 ++++++++++++++++++++++++ 2 files changed, 32 insertions(+) create mode 100644 plugins/inputs/system/kernel_notlinux.go diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 694bd6564..3d65aaf62 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -378,6 +378,11 @@ # skip_serial_number = true +# Get kernel statistics from /proc/stat +[[inputs.kernel]] + # no configuration + + # Read metrics about memory usage [[inputs.mem]] # no configuration diff --git a/plugins/inputs/system/kernel_notlinux.go b/plugins/inputs/system/kernel_notlinux.go new file mode 100644 index 000000000..9053b5c04 --- /dev/null +++ b/plugins/inputs/system/kernel_notlinux.go @@ -0,0 +1,27 @@ +// +build !linux + +package system + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type Kernel struct { +} + +func (k *Kernel) Description() string { + return "Get kernel statistics from /proc/stat" +} + +func (k *Kernel) SampleConfig() string { return "" } + +func (k *Kernel) Gather(acc telegraf.Accumulator) error { + return nil +} + +func init() { + inputs.Add("kernel", func() telegraf.Input { + return &Kernel{} + }) +} From 863cbe512d63b9da0001190eb0c9d39ea0c6b24d Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 5 Apr 2016 10:21:57 -0600 Subject: [PATCH 232/287] processes plugin: fix case where there are spaces in cmd name fixes #968 --- CHANGELOG.md | 7 ++++++ plugins/inputs/system/processes.go | 13 ++++++++--- plugins/inputs/system/processes_test.go | 30 +++++++++++++++++++++++++ 3 files changed, 47 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 754701e44..82a224f21 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +## v0.12.1 [unreleased] + +### Features + +### Bugfixes +- [#968](https://github.com/influxdata/telegraf/issues/968): Processes plugin gets unknown state when spaces are in (command name) + ## v0.12.0 [2016-04-05] ### Features diff --git a/plugins/inputs/system/processes.go b/plugins/inputs/system/processes.go index aae0e6ba4..8c50a4ebd 100644 --- a/plugins/inputs/system/processes.go +++ b/plugins/inputs/system/processes.go @@ -144,11 +144,18 @@ func (p *Processes) gatherFromProc(fields map[string]interface{}) error { continue } + // Parse out data after () + i := bytes.LastIndex(data, []byte(")")) + if i == -1 { + continue + } + data = data[i+2:] + stats := bytes.Fields(data) if len(stats) < 3 { return fmt.Errorf("Something is terribly wrong with %s", statFile) } - switch stats[2][0] { + switch stats[0][0] { case 'R': fields["running"] = fields["running"].(int64) + int64(1) case 'S': @@ -163,11 +170,11 @@ func (p *Processes) gatherFromProc(fields map[string]interface{}) error { fields["paging"] = fields["paging"].(int64) + int64(1) default: log.Printf("processes: Unknown state [ %s ] in file %s", - string(stats[2][0]), statFile) + string(stats[0][0]), statFile) } fields["total"] = fields["total"].(int64) + int64(1) - threads, err := strconv.Atoi(string(stats[19])) + threads, err := strconv.Atoi(string(stats[17])) if err != nil { log.Printf("processes: Error parsing thread count: %s", err) continue diff --git a/plugins/inputs/system/processes_test.go b/plugins/inputs/system/processes_test.go index de9b6aa5b..eef52cd67 100644 --- a/plugins/inputs/system/processes_test.go +++ b/plugins/inputs/system/processes_test.go @@ -82,6 +82,28 @@ func TestFromProcFiles(t *testing.T) { acc.AssertContainsTaggedFields(t, "processes", fields, map[string]string{}) } +func TestFromProcFilesWithSpaceInCmd(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skip("This test only runs on linux") + } + tester := tester{} + processes := &Processes{ + readProcFile: tester.testProcFile2, + forceProc: true, + } + + var acc testutil.Accumulator + err := processes.Gather(&acc) + require.NoError(t, err) + + fields := getEmptyFields() + fields["sleeping"] = tester.calls + fields["total_threads"] = tester.calls * 2 + fields["total"] = tester.calls + + acc.AssertContainsTaggedFields(t, "processes", fields, map[string]string{}) +} + func testExecPS() ([]byte, error) { return []byte(testPSOut), nil } @@ -96,6 +118,11 @@ func (t *tester) testProcFile(_ string) ([]byte, error) { return []byte(fmt.Sprintf(testProcStat, "S", "2")), nil } +func (t *tester) testProcFile2(_ string) ([]byte, error) { + t.calls++ + return []byte(fmt.Sprintf(testProcStat2, "S", "2")), nil +} + func testExecPSError() ([]byte, error) { return []byte(testPSOut), fmt.Errorf("ERROR!") } @@ -149,3 +176,6 @@ S+ const testProcStat = `10 (rcuob/0) %s 2 0 0 0 -1 2129984 0 0 0 0 0 0 0 0 20 0 %s 0 11 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ` + +const testProcStat2 = `10 (rcuob 0) %s 2 0 0 0 -1 2129984 0 0 0 0 0 0 0 0 20 0 %s 0 11 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +` From bcf1fc658dbbac8f86b5d457cbfe93a225b3c88a Mon Sep 17 00:00:00 2001 From: Armin Wolfermann Date: Tue, 5 Apr 2016 15:24:24 +0200 Subject: [PATCH 233/287] ipmi_sensors: Allow : in password closes #969 --- CHANGELOG.md | 1 + plugins/inputs/ipmi_sensor/connection.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 82a224f21..b7b327fde 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ ### Bugfixes - [#968](https://github.com/influxdata/telegraf/issues/968): Processes plugin gets unknown state when spaces are in (command name) +- [#969](https://github.com/influxdata/telegraf/issues/968): ipmi_sensors: allow : in password. Thanks @awaw! ## v0.12.0 [2016-04-05] diff --git a/plugins/inputs/ipmi_sensor/connection.go b/plugins/inputs/ipmi_sensor/connection.go index 3f4461438..1e9bfbdcb 100644 --- a/plugins/inputs/ipmi_sensor/connection.go +++ b/plugins/inputs/ipmi_sensor/connection.go @@ -28,7 +28,7 @@ func NewConnection(server string) *Connection { if inx1 > 0 { security := server[0:inx1] connstr = server[inx1+1 : len(server)] - up := strings.Split(security, ":") + up := strings.SplitN(security, ":", 2) conn.Username = up[0] conn.Password = up[1] } From 73bd98df577ebce5685930787191eac35dc2f9cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martti=20Rannanj=C3=A4rvi?= Date: Tue, 5 Apr 2016 16:56:00 +0300 Subject: [PATCH 234/287] dovecot: remove extra newline from stats query Extra newline in the stats query is interpreted as an empty query which is an error for dovecot. closes #972 --- CHANGELOG.md | 3 ++- plugins/inputs/dovecot/dovecot.go | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b7b327fde..a742541ab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,8 @@ ### Bugfixes - [#968](https://github.com/influxdata/telegraf/issues/968): Processes plugin gets unknown state when spaces are in (command name) -- [#969](https://github.com/influxdata/telegraf/issues/968): ipmi_sensors: allow : in password. Thanks @awaw! +- [#969](https://github.com/influxdata/telegraf/pull/969): ipmi_sensors: allow : in password. Thanks @awaw! +- [#972](https://github.com/influxdata/telegraf/pull/972): dovecot: remove extra newline in dovecot command. Thanks @mrannanj! ## v0.12.0 [2016-04-05] diff --git a/plugins/inputs/dovecot/dovecot.go b/plugins/inputs/dovecot/dovecot.go index 3a6607da9..bf1b20269 100644 --- a/plugins/inputs/dovecot/dovecot.go +++ b/plugins/inputs/dovecot/dovecot.go @@ -85,7 +85,7 @@ func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, doms map[s // Extend connection c.SetDeadline(time.Now().Add(defaultTimeout)) - c.Write([]byte("EXPORT\tdomain\n\n")) + c.Write([]byte("EXPORT\tdomain\n")) var buf bytes.Buffer io.Copy(&buf, c) // buf := bufio.NewReader(c) From 03f2a35b31aeb2a3bc39b21c49dccc43ca7a0090 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 5 Apr 2016 13:54:02 -0600 Subject: [PATCH 235/287] Update jolokia plugin readme --- plugins/inputs/jolokia/README.md | 62 ++++++++++++-------------------- 1 file changed, 23 insertions(+), 39 deletions(-) diff --git a/plugins/inputs/jolokia/README.md b/plugins/inputs/jolokia/README.md index bda0c5f93..3a528b33f 100644 --- a/plugins/inputs/jolokia/README.md +++ b/plugins/inputs/jolokia/README.md @@ -1,16 +1,28 @@ # Telegraf plugin: Jolokia -#### Plugin arguments: -- **context** string: Context root used of jolokia url -- **servers** []Server: List of servers - + **name** string: Server's logical name - + **host** string: Server's ip address or hostname - + **port** string: Server's listening port -- **metrics** []Metric - + **name** string: Name of the measure - + **jmx** string: Jmx path that identifies mbeans attributes - + **pass** []string: Attributes to retain when collecting values - + **drop** []string: Attributes to drop when collecting values +#### Configuration + +```toml +[[inputs.jolokia]] + ## This is the context root used to compose the jolokia url + context = "/jolokia/read" + + ## List of servers exposing jolokia read service + [[inputs.jolokia.servers]] + name = "stable" + host = "192.168.103.2" + port = "8180" + # username = "myuser" + # password = "mypassword" + + ## List of metrics collected on above servers + ## Each metric consists in a name, a jmx path and either + ## a pass or drop slice attribute. + ## This collect all heap memory usage metrics. + [[inputs.jolokia.metrics]] + name = "heap_memory_usage" + jmx = "/java.lang:type=Memory/HeapMemoryUsage" +``` #### Description @@ -21,31 +33,3 @@ See: https://jolokia.org/ # Measurements: Jolokia plugin produces one measure for each metric configured, adding Server's `name`, `host` and `port` as tags. - -Given a configuration like: - -```ini -[jolokia] - -[[jolokia.servers]] - name = "as-service-1" - host = "127.0.0.1" - port = "8080" - -[[jolokia.servers]] - name = "as-service-2" - host = "127.0.0.1" - port = "8180" - -[[jolokia.metrics]] - name = "heap_memory_usage" - jmx = "/java.lang:type=Memory/HeapMemoryUsage" - pass = ["used", "max"] -``` - -The collected metrics will be: - -``` -jolokia_heap_memory_usage name=as-service-1,host=127.0.0.1,port=8080 used=xxx,max=yyy -jolokia_heap_memory_usage name=as-service-2,host=127.0.0.1,port=8180 used=vvv,max=zzz -``` From 4dd364e1c3d764e07f4f25c9e2cd92c5c32897e1 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 5 Apr 2016 14:42:20 -0600 Subject: [PATCH 236/287] Update all readme instances of data formats --- CONTRIBUTING.md | 4 ++-- docs/DATA_FORMATS_INPUT.md | 16 ++++++++------ docs/DATA_FORMATS_OUTPUT.md | 12 ++++++---- plugins/inputs/exec/README.md | 29 ++++++++++--------------- plugins/inputs/kafka_consumer/README.md | 3 ++- plugins/inputs/mqtt_consumer/README.md | 2 +- plugins/inputs/nats_consumer/README.md | 3 ++- plugins/inputs/tcp_listener/README.md | 3 ++- plugins/inputs/udp_listener/README.md | 3 ++- 9 files changed, 40 insertions(+), 35 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 68c9da478..3997a448e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -114,7 +114,7 @@ creating the `Parser` object. You should also add the following to your SampleConfig() return: ```toml - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md @@ -244,7 +244,7 @@ instantiating and creating the `Serializer` object. You should also add the following to your SampleConfig() return: ```toml - ## Data format to output. This can be "influx" or "graphite" + ## Data format to output. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index 589db53a3..3f970ec38 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -5,7 +5,8 @@ Telegraf is able to parse the following input data formats into metrics: 1. InfluxDB Line Protocol 1. JSON 1. Graphite -1. Value, ie 45 or "booyah" +1. Value, ie: 45 or "booyah" +1. Nagios (exec input only) Telegraf metrics, like InfluxDB [points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/), @@ -38,7 +39,7 @@ example, in the exec plugin: ## measurement name suffix (for separating different commands) name_suffix = "_mycollector" - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md @@ -65,7 +66,7 @@ metrics are parsed directly into Telegraf metrics. ## measurement name suffix (for separating different commands) name_suffix = "_mycollector" - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md @@ -110,7 +111,7 @@ For example, if you had this configuration: ## measurement name suffix (for separating different commands) name_suffix = "_mycollector" - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md @@ -165,7 +166,7 @@ plugin. ## override the default metric name of "exec" name_override = "entropy_available" - ## Data format to consume. This can be "json", "value", influx" or "graphite" + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md @@ -301,7 +302,8 @@ There are many more options available, ## measurement name suffix (for separating different commands) name_suffix = "_mycollector" - ## Data format to consume. This can be "json", "influx" or "graphite" (line-protocol) + ## Data format to consume. + (line-protocol) ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md @@ -344,7 +346,7 @@ Note: Nagios Input Data Formats is only supported in `exec` input plugin. ## measurement name suffix (for separating different commands) name_suffix = "_mycollector" - ## Data format to consume. This can be "json", "influx", "graphite" or "nagios" + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/docs/DATA_FORMATS_OUTPUT.md b/docs/DATA_FORMATS_OUTPUT.md index a75816a71..28f8cd6c3 100644 --- a/docs/DATA_FORMATS_OUTPUT.md +++ b/docs/DATA_FORMATS_OUTPUT.md @@ -29,7 +29,8 @@ config option, for example, in the `file` output plugin: ## Files to write to, "stdout" is a specially handled file. files = ["stdout"] - ## Data format to output. This can be "influx" or "graphite" + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md @@ -53,7 +54,8 @@ metrics are serialized directly into InfluxDB line-protocol. ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/metrics.out"] - ## Data format to output. This can be "influx", "json" or "graphite" + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md @@ -87,7 +89,8 @@ tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690 ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/metrics.out"] - ## Data format to output. This can be "influx", "json" or "graphite" + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md @@ -123,7 +126,8 @@ The Json data format serialized Telegraf metrics in json format. The format is: ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/metrics.out"] - ## Data format to output. This can be "influx", "json" or "graphite" + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md diff --git a/plugins/inputs/exec/README.md b/plugins/inputs/exec/README.md index 730da1fd5..9912c4a48 100644 --- a/plugins/inputs/exec/README.md +++ b/plugins/inputs/exec/README.md @@ -2,18 +2,11 @@ Please also see: [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md) -The exec input plugin can execute arbitrary commands which output: - -* JSON [javascript object notation](http://www.json.org/) -* InfluxDB [line-protocol](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/) -* Graphite [graphite-protocol](http://graphite.readthedocs.org/en/latest/feeding-carbon.html) - - ### Example 1 - JSON #### Configuration -In this example a script called ```/tmp/test.sh``` and a script called ```/tmp/test2.sh``` +In this example a script called ```/tmp/test.sh``` and a script called ```/tmp/test2.sh``` are configured for ```[[inputs.exec]]``` in JSON format. ``` @@ -22,7 +15,8 @@ are configured for ```[[inputs.exec]]``` in JSON format. # Shell/commands array commands = ["/tmp/test.sh", "/tmp/test2.sh"] - # Data format to consume. This can be "json", "influx" or "graphite" (line-protocol) + # Data format to consume. + (line-protocol) # NOTE json only reads numerical measurements, strings and booleans are ignored. data_format = "json" @@ -81,7 +75,7 @@ and strings will be ignored. ### Example 2 - Influx Line-Protocol In this example an application called ```/usr/bin/line_protocol_collector``` -and a script called ```/tmp/test2.sh``` are configured for ```[[inputs.exec]]``` +and a script called ```/tmp/test2.sh``` are configured for ```[[inputs.exec]]``` in influx line-protocol format. #### Configuration @@ -94,7 +88,7 @@ in influx line-protocol format. # command = "/usr/bin/line_protocol_collector" commands = ["/usr/bin/line_protocol_collector","/tmp/test2.sh"] - # Data format to consume. This can be "json" or "influx" (line-protocol) + # Data format to consume. # NOTE json only reads numerical measurements, strings and booleans are ignored. data_format = "influx" ``` @@ -113,7 +107,7 @@ cpu,cpu=cpu6,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 You will get data in InfluxDB exactly as it is defined above, tags are cpu=cpuN, host=foo, and datacenter=us-east with fields usage_idle -and usage_busy. They will receive a timestamp at collection time. +and usage_busy. They will receive a timestamp at collection time. Each line must end in \n, just as the Influx line protocol does. @@ -121,8 +115,8 @@ Each line must end in \n, just as the Influx line protocol does. We can also change the data_format to "graphite" to use the metrics collecting scripts such as (compatible with graphite): -* Nagios [Mertics Plugins] (https://exchange.nagios.org/directory/Plugins) -* Sensu [Mertics Plugins] (https://github.com/sensu-plugins) +* Nagios [Metrics Plugins](https://exchange.nagios.org/directory/Plugins) +* Sensu [Metrics Plugins](https://github.com/sensu-plugins) In this example a script called /tmp/test.sh and a script called /tmp/test2.sh are configured for [[inputs.exec]] in graphite format. @@ -133,7 +127,8 @@ In this example a script called /tmp/test.sh and a script called /tmp/test2.sh a # Shell/commands array commands = ["/tmp/test.sh","/tmp/test2.sh"] - # Data format to consume. This can be "json", "influx" or "graphite" (line-protocol) + # Data format to consume. + (line-protocol) # NOTE json only reads numerical measurements, strings and booleans are ignored. data_format = "graphite" @@ -186,5 +181,5 @@ sensu.metric.net.server0.eth0.rx_dropped 0 1444234982 The templates configuration will be used to parse the graphite metrics to support influxdb/opentsdb tagging store engines. -More detail information about templates, please refer to [The graphite Input] (https://github.com/influxdata/influxdb/blob/master/services/graphite/README.md) - +More detail information about templates, please refer to [The graphite Input](https://github.com/influxdata/influxdb/blob/master/services/graphite/README.md) + diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index 885c67a28..f5f6a359e 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -22,7 +22,8 @@ from the same topic in parallel. ## Offset (must be either "oldest" or "newest") offset = "oldest" - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. + ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/mqtt_consumer/README.md b/plugins/inputs/mqtt_consumer/README.md index 787494975..d5518b632 100644 --- a/plugins/inputs/mqtt_consumer/README.md +++ b/plugins/inputs/mqtt_consumer/README.md @@ -35,7 +35,7 @@ The plugin expects messages in the ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/nats_consumer/README.md b/plugins/inputs/nats_consumer/README.md index 90563ff55..31d947588 100644 --- a/plugins/inputs/nats_consumer/README.md +++ b/plugins/inputs/nats_consumer/README.md @@ -23,7 +23,8 @@ from a NATS cluster in parallel. ## Maximum number of metrics to buffer between collection intervals metric_buffer = 100000 - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. + ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/tcp_listener/README.md b/plugins/inputs/tcp_listener/README.md index 63a7dea3c..d2dfeb575 100644 --- a/plugins/inputs/tcp_listener/README.md +++ b/plugins/inputs/tcp_listener/README.md @@ -22,7 +22,8 @@ This is a sample configuration for the plugin. ## Maximum number of concurrent TCP connections to allow max_tcp_connections = 250 - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. + ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/udp_listener/README.md b/plugins/inputs/udp_listener/README.md index 724ae43ae..1dd03a2a7 100644 --- a/plugins/inputs/udp_listener/README.md +++ b/plugins/inputs/udp_listener/README.md @@ -23,7 +23,8 @@ This is a sample configuration for the plugin. ## usually 1500 bytes. udp_packet_size = 1500 - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. + ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md From 40f2dd8c6c33541e52705b41cec163d3b508fef7 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 5 Apr 2016 15:22:58 -0600 Subject: [PATCH 237/287] Readme fixup for exec plugin --- plugins/inputs/exec/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/inputs/exec/README.md b/plugins/inputs/exec/README.md index 9912c4a48..549d54d68 100644 --- a/plugins/inputs/exec/README.md +++ b/plugins/inputs/exec/README.md @@ -128,7 +128,6 @@ In this example a script called /tmp/test.sh and a script called /tmp/test2.sh a commands = ["/tmp/test.sh","/tmp/test2.sh"] # Data format to consume. - (line-protocol) # NOTE json only reads numerical measurements, strings and booleans are ignored. data_format = "graphite" From 7e97787d9d7b1209ce21f8372aecf0aaab045607 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 5 Apr 2016 16:17:45 -0600 Subject: [PATCH 238/287] More readme fixups --- docs/DATA_FORMATS_INPUT.md | 1 - plugins/inputs/exec/README.md | 1 - 2 files changed, 2 deletions(-) diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index 3f970ec38..b282d1f8f 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -303,7 +303,6 @@ There are many more options available, name_suffix = "_mycollector" ## Data format to consume. - (line-protocol) ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/exec/README.md b/plugins/inputs/exec/README.md index 549d54d68..a75ae7856 100644 --- a/plugins/inputs/exec/README.md +++ b/plugins/inputs/exec/README.md @@ -16,7 +16,6 @@ are configured for ```[[inputs.exec]]``` in JSON format. commands = ["/tmp/test.sh", "/tmp/test2.sh"] # Data format to consume. - (line-protocol) # NOTE json only reads numerical measurements, strings and booleans are ignored. data_format = "json" From 64066c4ea87e52f6d15ff3e7a9783545834fcdaf Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 5 Apr 2016 16:25:50 -0600 Subject: [PATCH 239/287] Update input data format readme --- docs/DATA_FORMATS_INPUT.md | 37 ++++++++++++++++++------------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index b282d1f8f..6a916711b 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -2,11 +2,11 @@ Telegraf is able to parse the following input data formats into metrics: -1. InfluxDB Line Protocol -1. JSON -1. Graphite -1. Value, ie: 45 or "booyah" -1. Nagios (exec input only) +1. [InfluxDB Line Protocol](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx) +1. [JSON](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#json) +1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite) +1. [Value](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#value), ie: 45 or "booyah" +1. [Nagios](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#nagios) (exec input only) Telegraf metrics, like InfluxDB [points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/), @@ -51,7 +51,7 @@ example, in the exec plugin: Each data_format has an additional set of configuration options available, which I'll go over below. -## Influx: +# Influx: There are no additional configuration options for InfluxDB line-protocol. The metrics are parsed directly into Telegraf metrics. @@ -73,7 +73,7 @@ metrics are parsed directly into Telegraf metrics. data_format = "influx" ``` -## JSON: +# JSON: The JSON data format flattens JSON into metric _fields_. For example, this JSON: @@ -142,21 +142,20 @@ Your Telegraf metrics would get tagged with "my_tag_1" exec_mycollector,my_tag_1=foo a=5,b_c=6 ``` -## Value: +# Value: The "value" data format translates single values into Telegraf metrics. This -is done by assigning a measurement name (which can be overridden using the -`name_override` config option), and setting a single field ("value") as the -parsed metric. +is done by assigning a measurement name and setting a single field ("value") +as the parsed metric. #### Value Configuration: -You can tell Telegraf what type of metric to collect by using the `data_type` -configuration option. +You **must** tell Telegraf what type of metric to collect by using the +`data_type` configuration option. -It is also recommended that you set `name_override` to a measurement name that -makes sense for your metric, otherwise it will just be set to the name of the -plugin. +**Note:** It is also recommended that you set `name_override` to a measurement +name that makes sense for your metric, otherwise it will just be set to the +name of the plugin. ```toml [[inputs.exec]] @@ -171,10 +170,10 @@ plugin. ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "value" - data_type = "integer" + data_type = "integer" # required ``` -## Graphite: +# Graphite: The Graphite data format translates graphite _dot_ buckets directly into telegraf measurement names, with a single value field, and without any tags. For @@ -328,7 +327,7 @@ There are many more options available, ] ``` -## Nagios: +# Nagios: There are no additional configuration options for Nagios line-protocol. The metrics are parsed directly into Telegraf metrics. From 30464396d9750cadfdf301088d0d8c3905f6f576 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 5 Apr 2016 10:37:21 -0600 Subject: [PATCH 240/287] Make the UDP input buffer only once --- CHANGELOG.md | 1 + plugins/inputs/statsd/statsd.go | 17 ++++++++--------- plugins/inputs/udp_listener/udp_listener.go | 15 +++++++-------- 3 files changed, 16 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a742541ab..9c7d0f507 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ ## v0.12.1 [unreleased] ### Features +- [#976](https://github.com/influxdata/telegraf/pull/976): Reduce allocations in the UDP and statsd inputs. ### Bugfixes - [#968](https://github.com/influxdata/telegraf/issues/968): Processes plugin gets unknown state when spaces are in (command name) diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index d31e6bfc9..84687511e 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -18,7 +18,9 @@ import ( ) const ( - UDP_PACKET_SIZE int = 1500 + // UDP packet limit, see + // https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure + UDP_PACKET_SIZE int = 65507 defaultFieldName = "value" @@ -157,10 +159,6 @@ const sampleConfig = ` ## calculation of percentiles. Raising this limit increases the accuracy ## of percentiles but also increases the memory usage and cpu time. percentile_limit = 1000 - - ## UDP packet size for the server to listen for. This will depend on the size - ## of the packets that the client is sending, which is usually 1500 bytes. - udp_packet_size = 1500 ` func (_ *Statsd) SampleConfig() string { @@ -274,12 +272,12 @@ func (s *Statsd) udpListen() error { } log.Println("Statsd listener listening on: ", s.listener.LocalAddr().String()) + buf := make([]byte, s.UDPPacketSize) for { select { case <-s.done: return nil default: - buf := make([]byte, s.UDPPacketSize) n, _, err := s.listener.ReadFromUDP(buf) if err != nil && !strings.Contains(err.Error(), "closed network") { log.Printf("ERROR READ: %s\n", err.Error()) @@ -300,11 +298,12 @@ func (s *Statsd) udpListen() error { // single statsd metric into a struct. func (s *Statsd) parser() error { defer s.wg.Done() + var packet []byte for { select { case <-s.done: return nil - case packet := <-s.in: + case packet = <-s.in: lines := strings.Split(string(packet), "\n") for _, line := range lines { line = strings.TrimSpace(line) @@ -631,8 +630,8 @@ func (s *Statsd) Stop() { func init() { inputs.Add("statsd", func() telegraf.Input { return &Statsd{ - ConvertNames: true, - UDPPacketSize: UDP_PACKET_SIZE, + MetricSeparator: "_", + UDPPacketSize: UDP_PACKET_SIZE, } }) } diff --git a/plugins/inputs/udp_listener/udp_listener.go b/plugins/inputs/udp_listener/udp_listener.go index 794f1791d..442cf98b3 100644 --- a/plugins/inputs/udp_listener/udp_listener.go +++ b/plugins/inputs/udp_listener/udp_listener.go @@ -30,7 +30,9 @@ type UdpListener struct { listener *net.UDPConn } -const UDP_PACKET_SIZE int = 1500 +// UDP packet limit, see +// https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure +const UDP_PACKET_SIZE int = 65507 var dropwarn = "ERROR: Message queue full. Discarding line [%s] " + "You may want to increase allowed_pending_messages in the config\n" @@ -43,11 +45,6 @@ const sampleConfig = ` ## UDP listener will start dropping packets. allowed_pending_messages = 10000 - ## UDP packet size for the server to listen for. This will depend - ## on the size of the packets that the client is sending, which is - ## usually 1500 bytes, but can be as large as 65,535 bytes. - udp_packet_size = 1500 - ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: @@ -107,12 +104,12 @@ func (u *UdpListener) udpListen() error { } log.Println("UDP server listening on: ", u.listener.LocalAddr().String()) + buf := make([]byte, u.UDPPacketSize) for { select { case <-u.done: return nil default: - buf := make([]byte, u.UDPPacketSize) n, _, err := u.listener.ReadFromUDP(buf) if err != nil && !strings.Contains(err.Error(), "closed network") { log.Printf("ERROR: %s\n", err.Error()) @@ -130,11 +127,13 @@ func (u *UdpListener) udpListen() error { func (u *UdpListener) udpParser() error { defer u.wg.Done() + + var packet []byte for { select { case <-u.done: return nil - case packet := <-u.in: + case packet = <-u.in: metrics, err := u.parser.Parse(packet) if err == nil { u.storeMetrics(metrics) From 0f16c0f4cf58b8a7ebb096e26d541c5d896269e8 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 5 Apr 2016 14:35:14 -0600 Subject: [PATCH 241/287] Reduce TCP listener allocations --- CHANGELOG.md | 1 + plugins/inputs/tcp_listener/tcp_listener.go | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9c7d0f507..5a4552698 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ### Features - [#976](https://github.com/influxdata/telegraf/pull/976): Reduce allocations in the UDP and statsd inputs. +- [#979](https://github.com/influxdata/telegraf/pull/979): Reduce allocations in the TCP listener. ### Bugfixes - [#968](https://github.com/influxdata/telegraf/issues/968): Processes plugin gets unknown state when spaces are in (command name) diff --git a/plugins/inputs/tcp_listener/tcp_listener.go b/plugins/inputs/tcp_listener/tcp_listener.go index a1b991058..4559a3bf5 100644 --- a/plugins/inputs/tcp_listener/tcp_listener.go +++ b/plugins/inputs/tcp_listener/tcp_listener.go @@ -39,7 +39,7 @@ type TcpListener struct { acc telegraf.Accumulator } -var dropwarn = "ERROR: Message queue full. Discarding line [%s] " + +var dropwarn = "ERROR: Message queue full. Discarding metric. " + "You may want to increase allowed_pending_messages in the config\n" const sampleConfig = ` @@ -202,11 +202,10 @@ func (t *TcpListener) handler(conn *net.TCPConn, id string) { if !scanner.Scan() { return } - buf := scanner.Bytes() select { - case t.in <- buf: + case t.in <- scanner.Bytes(): default: - log.Printf(dropwarn, string(buf)) + log.Printf(dropwarn) } } } @@ -215,11 +214,12 @@ func (t *TcpListener) handler(conn *net.TCPConn, id string) { // tcpParser parses the incoming tcp byte packets func (t *TcpListener) tcpParser() error { defer t.wg.Done() + var packet []byte for { select { case <-t.done: return nil - case packet := <-t.in: + case packet = <-t.in: if len(packet) == 0 { continue } From 9320a6e115b0bc2d7a832ae56ef0c8329df9db79 Mon Sep 17 00:00:00 2001 From: Ricard Clau Date: Sat, 2 Apr 2016 11:28:44 +0100 Subject: [PATCH 242/287] windows service docs closes #954 --- docs/WINDOWS_SERVICE.md | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 docs/WINDOWS_SERVICE.md diff --git a/docs/WINDOWS_SERVICE.md b/docs/WINDOWS_SERVICE.md new file mode 100644 index 000000000..679a41527 --- /dev/null +++ b/docs/WINDOWS_SERVICE.md @@ -0,0 +1,36 @@ +# Running Telegraf as a Windows Service + +If you have tried to install Go binaries as Windows Services with the **sc.exe** +tool you may have seen that the service errors and stops running after a while. + +**NSSM** (the Non-Sucking Service Manager) is a tool that helps you in a +[number of scenarios](http://nssm.cc/scenarios) including running Go binaries +that were not specifically designed to run only in Windows platforms. + +## NSSM Installation via Chocolatey + +You can install [Chocolatey](https://chocolatey.org/) and [NSSM](http://nssm.cc/) +with these commands + +```powershell +iex ((new-object net.webclient).DownloadString('https://chocolatey.org/install.ps1')) +choco install -y nssm +``` + +## Installing Telegraf as a Windows Service with NSSM + +You can download the latest Telegraf Windows binaries (still Experimental at +the moment) from [the Telegraf Github repo](https://github.com/influxdata/telegraf). + +Then you can create a C:\telegraf folder, unzip the binary there and modify the +**telegraf.conf** sample to allocate the metrics you want to send to **InfluxDB**. + +Once you have NSSM installed in your system, the process is quite straightforward. +You only need to type this command in your Windows shell + +```powershell +nssm install Telegraf c:\telegraf\telegraf.exe -config c:\telegraf\telegraf.config +``` + +And now your service will be installed in Windows and you will be able to start and +stop it gracefully \ No newline at end of file From 32213cad018efb46f8296d9927b15bfcd1195ea1 Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Sun, 3 Apr 2016 00:34:34 +0200 Subject: [PATCH 243/287] input(docker): docker/engine-api * Made required changes to get it to compile * First manual tests looking good, still unit tests need fixing * Made go linter happier --- plugins/inputs/docker/docker.go | 184 +++++++++++++++----------------- 1 file changed, 89 insertions(+), 95 deletions(-) diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index cdc8ec1e5..b1bbc63b3 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -3,6 +3,7 @@ package system import ( "encoding/json" "fmt" + "io" "log" "regexp" "strconv" @@ -10,12 +11,15 @@ import ( "sync" "time" + "golang.org/x/net/context" + + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - - "github.com/fsouza/go-dockerclient" ) +// Docker object type Docker struct { Endpoint string ContainerNames []string @@ -23,14 +27,14 @@ type Docker struct { client DockerClient } +// DockerClient interface, useful for testing type DockerClient interface { - // Docker Client wrapper - // Useful for test - Info() (*docker.Env, error) - ListContainers(opts docker.ListContainersOptions) ([]docker.APIContainers, error) - Stats(opts docker.StatsOptions) error + Info(ctx context.Context) (types.Info, error) + ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) + ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error) } +// KB, MB, GB, TB, PB...human friendly const ( KB = 1000 MB = 1000 * KB @@ -52,28 +56,32 @@ var sampleConfig = ` container_names = [] ` +// Description returns input description func (d *Docker) Description() string { return "Read metrics about docker containers" } +// SampleConfig prints sampleConfig func (d *Docker) SampleConfig() string { return sampleConfig } +// Gather starts stats collection func (d *Docker) Gather(acc telegraf.Accumulator) error { if d.client == nil { - var c *docker.Client + var c *client.Client var err error + defaultHeaders := map[string]string{"User-Agent": "engine-api-cli-1.0"} if d.Endpoint == "ENV" { - c, err = docker.NewClientFromEnv() + c, err = client.NewEnvClient() if err != nil { return err } } else if d.Endpoint == "" { - c, err = docker.NewClient("unix:///var/run/docker.sock") + c, err = client.NewClient("unix:///var/run/docker.sock", "", nil, defaultHeaders) if err != nil { return err } } else { - c, err = docker.NewClient(d.Endpoint) + c, err = client.NewClient(d.Endpoint, "", nil, defaultHeaders) if err != nil { return err } @@ -88,8 +96,8 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { } // List containers - opts := docker.ListContainersOptions{} - containers, err := d.client.ListContainers(opts) + opts := types.ContainerListOptions{} + containers, err := d.client.ContainerList(context.Background(), opts) if err != nil { return err } @@ -99,7 +107,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { wg.Add(len(containers)) for _, container := range containers { - go func(c docker.APIContainers) { + go func(c types.Container) { defer wg.Done() err := d.gatherContainer(c, acc) if err != nil { @@ -114,23 +122,22 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { // Init vars - var driverStatus [][]string dataFields := make(map[string]interface{}) metadataFields := make(map[string]interface{}) now := time.Now() // Get info from docker daemon - info, err := d.client.Info() + info, err := d.client.Info(context.Background()) if err != nil { return err } fields := map[string]interface{}{ - "n_cpus": info.GetInt64("NCPU"), - "n_used_file_descriptors": info.GetInt64("NFd"), - "n_containers": info.GetInt64("Containers"), - "n_images": info.GetInt64("Images"), - "n_goroutines": info.GetInt64("NGoroutines"), - "n_listener_events": info.GetInt64("NEventsListener"), + "n_cpus": info.NCPU, + "n_used_file_descriptors": info.NFd, + "n_containers": info.Containers, + "n_images": info.Images, + "n_goroutines": info.NGoroutines, + "n_listener_events": info.NEventsListener, } // Add metrics acc.AddFields("docker", @@ -138,13 +145,13 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { nil, now) acc.AddFields("docker", - map[string]interface{}{"memory_total": info.GetInt64("MemTotal")}, + map[string]interface{}{"memory_total": info.MemTotal}, map[string]string{"unit": "bytes"}, now) // Get storage metrics - driverStatusRaw := []byte(info.Get("DriverStatus")) - json.Unmarshal(driverStatusRaw, &driverStatus) - for _, rawData := range driverStatus { + //driverStatusRaw := []byte(info.DriverStatus) + //json.Unmarshal(driverStatusRaw, &driverStatus) + for _, rawData := range info.DriverStatus { // Try to convert string to int (bytes) value, err := parseSize(rawData[1]) if err != nil { @@ -159,12 +166,12 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { now) } else if strings.HasPrefix(name, "data_space_") { // data space - field_name := strings.TrimPrefix(name, "data_space_") - dataFields[field_name] = value + fieldName := strings.TrimPrefix(name, "data_space_") + dataFields[fieldName] = value } else if strings.HasPrefix(name, "metadata_space_") { // metadata space - field_name := strings.TrimPrefix(name, "metadata_space_") - metadataFields[field_name] = value + fieldName := strings.TrimPrefix(name, "metadata_space_") + metadataFields[fieldName] = value } } if len(dataFields) > 0 { @@ -183,9 +190,10 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { } func (d *Docker) gatherContainer( - container docker.APIContainers, + container types.Container, acc telegraf.Accumulator, ) error { + var v *types.StatsJSON // Parse container name cname := "unknown" if len(container.Names) > 0 { @@ -204,28 +212,14 @@ func (d *Docker) gatherContainer( } } - statChan := make(chan *docker.Stats) - done := make(chan bool) - statOpts := docker.StatsOptions{ - Stream: false, - ID: container.ID, - Stats: statChan, - Done: done, - Timeout: time.Duration(time.Second * 5), + r, err := d.client.ContainerStats(context.Background(), container.ID, false) + if err != nil { + log.Printf("Error getting docker stats: %s\n", err.Error()) } - - go func() { - err := d.client.Stats(statOpts) - if err != nil { - log.Printf("Error getting docker stats: %s\n", err.Error()) - } - }() - - stat := <-statChan - close(done) - - if stat == nil { - return nil + defer r.Close() + dec := json.NewDecoder(r) + if err = dec.Decode(&v); err != nil { + log.Printf("Error decoding: %s\n", err.Error()) } // Add labels to tags @@ -233,13 +227,13 @@ func (d *Docker) gatherContainer( tags[k] = v } - gatherContainerStats(stat, acc, tags) + gatherContainerStats(v, acc, tags) return nil } func gatherContainerStats( - stat *docker.Stats, + stat *types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, ) { @@ -250,35 +244,35 @@ func gatherContainerStats( "usage": stat.MemoryStats.Usage, "fail_count": stat.MemoryStats.Failcnt, "limit": stat.MemoryStats.Limit, - "total_pgmafault": stat.MemoryStats.Stats.TotalPgmafault, - "cache": stat.MemoryStats.Stats.Cache, - "mapped_file": stat.MemoryStats.Stats.MappedFile, - "total_inactive_file": stat.MemoryStats.Stats.TotalInactiveFile, - "pgpgout": stat.MemoryStats.Stats.Pgpgout, - "rss": stat.MemoryStats.Stats.Rss, - "total_mapped_file": stat.MemoryStats.Stats.TotalMappedFile, - "writeback": stat.MemoryStats.Stats.Writeback, - "unevictable": stat.MemoryStats.Stats.Unevictable, - "pgpgin": stat.MemoryStats.Stats.Pgpgin, - "total_unevictable": stat.MemoryStats.Stats.TotalUnevictable, - "pgmajfault": stat.MemoryStats.Stats.Pgmajfault, - "total_rss": stat.MemoryStats.Stats.TotalRss, - "total_rss_huge": stat.MemoryStats.Stats.TotalRssHuge, - "total_writeback": stat.MemoryStats.Stats.TotalWriteback, - "total_inactive_anon": stat.MemoryStats.Stats.TotalInactiveAnon, - "rss_huge": stat.MemoryStats.Stats.RssHuge, - "hierarchical_memory_limit": stat.MemoryStats.Stats.HierarchicalMemoryLimit, - "total_pgfault": stat.MemoryStats.Stats.TotalPgfault, - "total_active_file": stat.MemoryStats.Stats.TotalActiveFile, - "active_anon": stat.MemoryStats.Stats.ActiveAnon, - "total_active_anon": stat.MemoryStats.Stats.TotalActiveAnon, - "total_pgpgout": stat.MemoryStats.Stats.TotalPgpgout, - "total_cache": stat.MemoryStats.Stats.TotalCache, - "inactive_anon": stat.MemoryStats.Stats.InactiveAnon, - "active_file": stat.MemoryStats.Stats.ActiveFile, - "pgfault": stat.MemoryStats.Stats.Pgfault, - "inactive_file": stat.MemoryStats.Stats.InactiveFile, - "total_pgpgin": stat.MemoryStats.Stats.TotalPgpgin, + "total_pgmafault": stat.MemoryStats.Stats["total_pgmajfault"], + "cache": stat.MemoryStats.Stats["cache"], + "mapped_file": stat.MemoryStats.Stats["mapped_file"], + "total_inactive_file": stat.MemoryStats.Stats["total_inactive_file"], + "pgpgout": stat.MemoryStats.Stats["pagpgout"], + "rss": stat.MemoryStats.Stats["rss"], + "total_mapped_file": stat.MemoryStats.Stats["total_mapped_file"], + "writeback": stat.MemoryStats.Stats["writeback"], + "unevictable": stat.MemoryStats.Stats["unevictable"], + "pgpgin": stat.MemoryStats.Stats["pgpgin"], + "total_unevictable": stat.MemoryStats.Stats["total_unevictable"], + "pgmajfault": stat.MemoryStats.Stats["pgmajfault"], + "total_rss": stat.MemoryStats.Stats["total_rss"], + "total_rss_huge": stat.MemoryStats.Stats["total_rss_huge"], + "total_writeback": stat.MemoryStats.Stats["total_write_back"], + "total_inactive_anon": stat.MemoryStats.Stats["total_inactive_anon"], + "rss_huge": stat.MemoryStats.Stats["rss_huge"], + "hierarchical_memory_limit": stat.MemoryStats.Stats["hierarchical_memory_limit"], + "total_pgfault": stat.MemoryStats.Stats["total_pgfault"], + "total_active_file": stat.MemoryStats.Stats["total_active_file"], + "active_anon": stat.MemoryStats.Stats["active_anon"], + "total_active_anon": stat.MemoryStats.Stats["total_active_anon"], + "total_pgpgout": stat.MemoryStats.Stats["total_pgpgout"], + "total_cache": stat.MemoryStats.Stats["total_cache"], + "inactive_anon": stat.MemoryStats.Stats["inactive_anon"], + "active_file": stat.MemoryStats.Stats["active_file"], + "pgfault": stat.MemoryStats.Stats["pgfault"], + "inactive_file": stat.MemoryStats.Stats["inactive_file"], + "total_pgpgin": stat.MemoryStats.Stats["total_pgpgin"], "usage_percent": calculateMemPercent(stat), } acc.AddFields("docker_mem", memfields, tags, now) @@ -287,7 +281,7 @@ func gatherContainerStats( "usage_total": stat.CPUStats.CPUUsage.TotalUsage, "usage_in_usermode": stat.CPUStats.CPUUsage.UsageInUsermode, "usage_in_kernelmode": stat.CPUStats.CPUUsage.UsageInKernelmode, - "usage_system": stat.CPUStats.SystemCPUUsage, + "usage_system": stat.CPUStats.SystemUsage, "throttling_periods": stat.CPUStats.ThrottlingData.Periods, "throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods, "throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime, @@ -323,7 +317,7 @@ func gatherContainerStats( gatherBlockIOMetrics(stat, acc, tags, now) } -func calculateMemPercent(stat *docker.Stats) float64 { +func calculateMemPercent(stat *types.StatsJSON) float64 { var memPercent = 0.0 if stat.MemoryStats.Limit > 0 { memPercent = float64(stat.MemoryStats.Usage) / float64(stat.MemoryStats.Limit) * 100.0 @@ -331,11 +325,11 @@ func calculateMemPercent(stat *docker.Stats) float64 { return memPercent } -func calculateCPUPercent(stat *docker.Stats) float64 { +func calculateCPUPercent(stat *types.StatsJSON) float64 { var cpuPercent = 0.0 // calculate the change for the cpu and system usage of the container in between readings cpuDelta := float64(stat.CPUStats.CPUUsage.TotalUsage) - float64(stat.PreCPUStats.CPUUsage.TotalUsage) - systemDelta := float64(stat.CPUStats.SystemCPUUsage) - float64(stat.PreCPUStats.SystemCPUUsage) + systemDelta := float64(stat.CPUStats.SystemUsage) - float64(stat.PreCPUStats.SystemUsage) if systemDelta > 0.0 && cpuDelta > 0.0 { cpuPercent = (cpuDelta / systemDelta) * float64(len(stat.CPUStats.CPUUsage.PercpuUsage)) * 100.0 @@ -344,7 +338,7 @@ func calculateCPUPercent(stat *docker.Stats) float64 { } func gatherBlockIOMetrics( - stat *docker.Stats, + stat *types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, now time.Time, @@ -353,7 +347,7 @@ func gatherBlockIOMetrics( // Make a map of devices to their block io stats deviceStatMap := make(map[string]map[string]interface{}) - for _, metric := range blkioStats.IOServiceBytesRecursive { + for _, metric := range blkioStats.IoServiceBytesRecursive { device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) _, ok := deviceStatMap[device] if !ok { @@ -364,7 +358,7 @@ func gatherBlockIOMetrics( deviceStatMap[device][field] = metric.Value } - for _, metric := range blkioStats.IOServicedRecursive { + for _, metric := range blkioStats.IoServicedRecursive { device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) _, ok := deviceStatMap[device] if !ok { @@ -375,31 +369,31 @@ func gatherBlockIOMetrics( deviceStatMap[device][field] = metric.Value } - for _, metric := range blkioStats.IOQueueRecursive { + for _, metric := range blkioStats.IoQueuedRecursive { device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) field := fmt.Sprintf("io_queue_recursive_%s", strings.ToLower(metric.Op)) deviceStatMap[device][field] = metric.Value } - for _, metric := range blkioStats.IOServiceTimeRecursive { + for _, metric := range blkioStats.IoServiceTimeRecursive { device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) field := fmt.Sprintf("io_service_time_recursive_%s", strings.ToLower(metric.Op)) deviceStatMap[device][field] = metric.Value } - for _, metric := range blkioStats.IOWaitTimeRecursive { + for _, metric := range blkioStats.IoWaitTimeRecursive { device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) field := fmt.Sprintf("io_wait_time_%s", strings.ToLower(metric.Op)) deviceStatMap[device][field] = metric.Value } - for _, metric := range blkioStats.IOMergedRecursive { + for _, metric := range blkioStats.IoMergedRecursive { device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) field := fmt.Sprintf("io_merged_recursive_%s", strings.ToLower(metric.Op)) deviceStatMap[device][field] = metric.Value } - for _, metric := range blkioStats.IOTimeRecursive { + for _, metric := range blkioStats.IoTimeRecursive { device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) field := fmt.Sprintf("io_time_recursive_%s", strings.ToLower(metric.Op)) deviceStatMap[device][field] = metric.Value From 708cbf937f737c14e2138b4712a94f26fa09f562 Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Sun, 3 Apr 2016 19:37:41 +0200 Subject: [PATCH 244/287] input(docker): Fixed tests to work with engine-api * Modified tests to work with engine-api --- plugins/inputs/docker/docker_test.go | 196 +++++++++++++++++---------- 1 file changed, 121 insertions(+), 75 deletions(-) diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index 23fd0bb34..c9fe6cea1 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -1,13 +1,18 @@ package system import ( - "encoding/json" + "io" + "io/ioutil" + "strings" "testing" "time" + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/registry" "github.com/influxdata/telegraf/testutil" - "github.com/fsouza/go-dockerclient" "github.com/stretchr/testify/require" ) @@ -114,58 +119,58 @@ func TestDockerGatherContainerStats(t *testing.T) { acc.AssertContainsTaggedFields(t, "docker_cpu", cpu1fields, cputags) } -func testStats() *docker.Stats { - stats := &docker.Stats{ - Read: time.Now(), - Networks: make(map[string]docker.NetworkStats), - } +func testStats() *types.StatsJSON { + stats := &types.StatsJSON{} + stats.Read = time.Now() + stats.Networks = make(map[string]types.NetworkStats) stats.CPUStats.CPUUsage.PercpuUsage = []uint64{1, 1002} stats.CPUStats.CPUUsage.UsageInUsermode = 100 stats.CPUStats.CPUUsage.TotalUsage = 500 stats.CPUStats.CPUUsage.UsageInKernelmode = 200 - stats.CPUStats.SystemCPUUsage = 100 + stats.CPUStats.SystemUsage = 100 stats.CPUStats.ThrottlingData.Periods = 1 stats.PreCPUStats.CPUUsage.TotalUsage = 400 - stats.PreCPUStats.SystemCPUUsage = 50 + stats.PreCPUStats.SystemUsage = 50 - stats.MemoryStats.Stats.TotalPgmafault = 0 - stats.MemoryStats.Stats.Cache = 0 - stats.MemoryStats.Stats.MappedFile = 0 - stats.MemoryStats.Stats.TotalInactiveFile = 0 - stats.MemoryStats.Stats.Pgpgout = 0 - stats.MemoryStats.Stats.Rss = 0 - stats.MemoryStats.Stats.TotalMappedFile = 0 - stats.MemoryStats.Stats.Writeback = 0 - stats.MemoryStats.Stats.Unevictable = 0 - stats.MemoryStats.Stats.Pgpgin = 0 - stats.MemoryStats.Stats.TotalUnevictable = 0 - stats.MemoryStats.Stats.Pgmajfault = 0 - stats.MemoryStats.Stats.TotalRss = 44 - stats.MemoryStats.Stats.TotalRssHuge = 444 - stats.MemoryStats.Stats.TotalWriteback = 55 - stats.MemoryStats.Stats.TotalInactiveAnon = 0 - stats.MemoryStats.Stats.RssHuge = 0 - stats.MemoryStats.Stats.HierarchicalMemoryLimit = 0 - stats.MemoryStats.Stats.TotalPgfault = 0 - stats.MemoryStats.Stats.TotalActiveFile = 0 - stats.MemoryStats.Stats.ActiveAnon = 0 - stats.MemoryStats.Stats.TotalActiveAnon = 0 - stats.MemoryStats.Stats.TotalPgpgout = 0 - stats.MemoryStats.Stats.TotalCache = 0 - stats.MemoryStats.Stats.InactiveAnon = 0 - stats.MemoryStats.Stats.ActiveFile = 1 - stats.MemoryStats.Stats.Pgfault = 2 - stats.MemoryStats.Stats.InactiveFile = 3 - stats.MemoryStats.Stats.TotalPgpgin = 4 + stats.MemoryStats.Stats = make(map[string]uint64) + stats.MemoryStats.Stats["total_pgmajfault"] = 0 + stats.MemoryStats.Stats["cache"] = 0 + stats.MemoryStats.Stats["mapped_file"] = 0 + stats.MemoryStats.Stats["total_inactive_file"] = 0 + stats.MemoryStats.Stats["pagpgout"] = 0 + stats.MemoryStats.Stats["rss"] = 0 + stats.MemoryStats.Stats["total_mapped_file"] = 0 + stats.MemoryStats.Stats["writeback"] = 0 + stats.MemoryStats.Stats["unevictable"] = 0 + stats.MemoryStats.Stats["pgpgin"] = 0 + stats.MemoryStats.Stats["total_unevictable"] = 0 + stats.MemoryStats.Stats["pgmajfault"] = 0 + stats.MemoryStats.Stats["total_rss"] = 44 + stats.MemoryStats.Stats["total_rss_huge"] = 444 + stats.MemoryStats.Stats["total_write_back"] = 55 + stats.MemoryStats.Stats["total_inactive_anon"] = 0 + stats.MemoryStats.Stats["rss_huge"] = 0 + stats.MemoryStats.Stats["hierarchical_memory_limit"] = 0 + stats.MemoryStats.Stats["total_pgfault"] = 0 + stats.MemoryStats.Stats["total_active_file"] = 0 + stats.MemoryStats.Stats["active_anon"] = 0 + stats.MemoryStats.Stats["total_active_anon"] = 0 + stats.MemoryStats.Stats["total_pgpgout"] = 0 + stats.MemoryStats.Stats["total_cache"] = 0 + stats.MemoryStats.Stats["inactive_anon"] = 0 + stats.MemoryStats.Stats["active_file"] = 1 + stats.MemoryStats.Stats["pgfault"] = 2 + stats.MemoryStats.Stats["inactive_file"] = 3 + stats.MemoryStats.Stats["total_pgpgin"] = 4 stats.MemoryStats.MaxUsage = 1001 stats.MemoryStats.Usage = 1111 stats.MemoryStats.Failcnt = 1 stats.MemoryStats.Limit = 2000 - stats.Networks["eth0"] = docker.NetworkStats{ + stats.Networks["eth0"] = types.NetworkStats{ RxDropped: 1, RxBytes: 2, RxErrors: 3, @@ -176,23 +181,23 @@ func testStats() *docker.Stats { TxBytes: 4, } - sbr := docker.BlkioStatsEntry{ + sbr := types.BlkioStatEntry{ Major: 6, Minor: 0, Op: "read", Value: 100, } - sr := docker.BlkioStatsEntry{ + sr := types.BlkioStatEntry{ Major: 6, Minor: 0, Op: "write", Value: 101, } - stats.BlkioStats.IOServiceBytesRecursive = append( - stats.BlkioStats.IOServiceBytesRecursive, sbr) - stats.BlkioStats.IOServicedRecursive = append( - stats.BlkioStats.IOServicedRecursive, sr) + stats.BlkioStats.IoServiceBytesRecursive = append( + stats.BlkioStats.IoServiceBytesRecursive, sbr) + stats.BlkioStats.IoServicedRecursive = append( + stats.BlkioStats.IoServicedRecursive, sr) return stats } @@ -200,35 +205,78 @@ func testStats() *docker.Stats { type FakeDockerClient struct { } -func (d FakeDockerClient) Info() (*docker.Env, error) { - env := docker.Env{"Containers=108", "OomKillDisable=false", "SystemTime=2016-02-24T00:55:09.15073105-05:00", "NEventsListener=0", "ID=5WQQ:TFWR:FDNG:OKQ3:37Y4:FJWG:QIKK:623T:R3ME:QTKB:A7F7:OLHD", "Debug=false", "LoggingDriver=json-file", "KernelVersion=4.3.0-1-amd64", "IndexServerAddress=https://index.docker.io/v1/", "MemTotal=3840757760", "Images=199", "CpuCfsQuota=true", "Name=absol", "SwapLimit=false", "IPv4Forwarding=true", "ExecutionDriver=native-0.2", "InitSha1=23a51f3c916d2b5a3bbb31caf301fd2d14edd518", "ExperimentalBuild=false", "CpuCfsPeriod=true", "RegistryConfig={\"IndexConfigs\":{\"docker.io\":{\"Mirrors\":null,\"Name\":\"docker.io\",\"Official\":true,\"Secure\":true}},\"InsecureRegistryCIDRs\":[\"127.0.0.0/8\"],\"Mirrors\":null}", "OperatingSystem=Linux Mint LMDE (containerized)", "BridgeNfIptables=true", "HttpsProxy=", "Labels=null", "MemoryLimit=false", "DriverStatus=[[\"Pool Name\",\"docker-8:1-1182287-pool\"],[\"Pool Blocksize\",\"65.54 kB\"],[\"Backing Filesystem\",\"extfs\"],[\"Data file\",\"/dev/loop0\"],[\"Metadata file\",\"/dev/loop1\"],[\"Data Space Used\",\"17.3 GB\"],[\"Data Space Total\",\"107.4 GB\"],[\"Data Space Available\",\"36.53 GB\"],[\"Metadata Space Used\",\"20.97 MB\"],[\"Metadata Space Total\",\"2.147 GB\"],[\"Metadata Space Available\",\"2.127 GB\"],[\"Udev Sync Supported\",\"true\"],[\"Deferred Removal Enabled\",\"false\"],[\"Data loop file\",\"/var/lib/docker/devicemapper/devicemapper/data\"],[\"Metadata loop file\",\"/var/lib/docker/devicemapper/devicemapper/metadata\"],[\"Library Version\",\"1.02.115 (2016-01-25)\"]]", "NFd=19", "HttpProxy=", "Driver=devicemapper", "NGoroutines=39", "InitPath=/usr/lib/docker.io/dockerinit", "NCPU=4", "DockerRootDir=/var/lib/docker", "NoProxy=", "BridgeNfIp6tables=true"} - return &env, nil +func (d FakeDockerClient) Info(ctx context.Context) (types.Info, error) { + env := types.Info{ + Containers: 108, + OomKillDisable: false, + SystemTime: "2016-02-24T00:55:09.15073105-05:00", + NEventsListener: 0, + ID: "5WQQ:TFWR:FDNG:OKQ3:37Y4:FJWG:QIKK:623T:R3ME:QTKB:A7F7:OLHD", + Debug: false, + LoggingDriver: "json-file", + KernelVersion: "4.3.0-1-amd64", + IndexServerAddress: "https://index.docker.io/v1/", + MemTotal: 3840757760, + Images: 199, + CPUCfsQuota: true, + Name: "absol", + SwapLimit: false, + IPv4Forwarding: true, + ExecutionDriver: "native-0.2", + ExperimentalBuild: false, + CPUCfsPeriod: true, + RegistryConfig: ®istry.ServiceConfig{ + IndexConfigs: map[string]*registry.IndexInfo{ + "docker.io": { + Name: "docker.io", + Mirrors: []string{}, + Official: true, + Secure: true, + }, + }, InsecureRegistryCIDRs: []*registry.NetIPNet{{IP: []byte{127, 0, 0, 0}, Mask: []byte{255, 0, 0, 0}}}, Mirrors: []string{}}, + OperatingSystem: "Linux Mint LMDE (containerized)", + BridgeNfIptables: true, + HTTPSProxy: "", + Labels: []string{}, + MemoryLimit: false, + DriverStatus: [][2]string{{"Pool Name", "docker-8:1-1182287-pool"}, {"Pool Blocksize", "65.54 kB"}, {"Backing Filesystem", "extfs"}, {"Data file", "/dev/loop0"}, {"Metadata file", "/dev/loop1"}, {"Data Space Used", "17.3 GB"}, {"Data Space Total", "107.4 GB"}, {"Data Space Available", "36.53 GB"}, {"Metadata Space Used", "20.97 MB"}, {"Metadata Space Total", "2.147 GB"}, {"Metadata Space Available", "2.127 GB"}, {"Udev Sync Supported", "true"}, {"Deferred Removal Enabled", "false"}, {"Data loop file", "/var/lib/docker/devicemapper/devicemapper/data"}, {"Metadata loop file", "/var/lib/docker/devicemapper/devicemapper/metadata"}, {"Library Version", "1.02.115 (2016-01-25)"}}, + NFd: 19, + HTTPProxy: "", + Driver: "devicemapper", + NGoroutines: 39, + NCPU: 4, + DockerRootDir: "/var/lib/docker", + NoProxy: "", + BridgeNfIP6tables: true, + } + return env, nil } -func (d FakeDockerClient) ListContainers(opts docker.ListContainersOptions) ([]docker.APIContainers, error) { - container1 := docker.APIContainers{ +func (d FakeDockerClient) ContainerList(octx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + container1 := types.Container{ ID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb", + Names: []string{"/etcd"}, Image: "quay.io/coreos/etcd:v2.2.2", Command: "/etcd -name etcd0 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379", Created: 1455941930, Status: "Up 4 hours", - Ports: []docker.APIPort{ - docker.APIPort{ + Ports: []types.Port{ + types.Port{ PrivatePort: 7001, PublicPort: 0, Type: "tcp", }, - docker.APIPort{ + types.Port{ PrivatePort: 4001, PublicPort: 0, Type: "tcp", }, - docker.APIPort{ + types.Port{ PrivatePort: 2380, PublicPort: 0, Type: "tcp", }, - docker.APIPort{ + types.Port{ PrivatePort: 2379, PublicPort: 2379, Type: "tcp", @@ -237,31 +285,31 @@ func (d FakeDockerClient) ListContainers(opts docker.ListContainersOptions) ([]d }, SizeRw: 0, SizeRootFs: 0, - Names: []string{"/etcd"}, } - container2 := docker.APIContainers{ + container2 := types.Container{ ID: "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173", + Names: []string{"/etcd2"}, Image: "quay.io/coreos/etcd:v2.2.2", Command: "/etcd -name etcd2 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379", Created: 1455941933, Status: "Up 4 hours", - Ports: []docker.APIPort{ - docker.APIPort{ + Ports: []types.Port{ + types.Port{ PrivatePort: 7002, PublicPort: 0, Type: "tcp", }, - docker.APIPort{ + types.Port{ PrivatePort: 4002, PublicPort: 0, Type: "tcp", }, - docker.APIPort{ + types.Port{ PrivatePort: 2381, PublicPort: 0, Type: "tcp", }, - docker.APIPort{ + types.Port{ PrivatePort: 2382, PublicPort: 2382, Type: "tcp", @@ -270,21 +318,19 @@ func (d FakeDockerClient) ListContainers(opts docker.ListContainersOptions) ([]d }, SizeRw: 0, SizeRootFs: 0, - Names: []string{"/etcd2"}, } - containers := []docker.APIContainers{container1, container2} + containers := []types.Container{container1, container2} return containers, nil //#{e6a96c84ca91a5258b7cb752579fb68826b68b49ff957487695cd4d13c343b44 titilambert/snmpsim /bin/sh -c 'snmpsimd --agent-udpv4-endpoint=0.0.0.0:31161 --process-user=root --process-group=user' 1455724831 Up 4 hours [{31161 31161 udp 0.0.0.0}] 0 0 [/snmp] map[]}]2016/02/24 01:05:01 Gathered metrics, (3s interval), from 1 inputs in 1.233836656s } -func (d FakeDockerClient) Stats(opts docker.StatsOptions) error { +func (d FakeDockerClient) ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error) { + var stat io.ReadCloser jsonStat := `{"read":"2016-02-24T11:42:27.472459608-05:00","memory_stats":{"stats":{},"limit":18935443456},"blkio_stats":{"io_service_bytes_recursive":[{"major":252,"minor":1,"op":"Read","value":753664},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":753664},{"major":252,"minor":1,"op":"Total","value":753664}],"io_serviced_recursive":[{"major":252,"minor":1,"op":"Read","value":26},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":26},{"major":252,"minor":1,"op":"Total","value":26}]},"cpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052607520000000,"throttling_data":{}},"precpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052599550000000,"throttling_data":{}}}` - var stat docker.Stats - json.Unmarshal([]byte(jsonStat), &stat) - opts.Stats <- &stat - return nil + stat = ioutil.NopCloser(strings.NewReader(jsonStat)) + return stat, nil } func TestDockerGatherInfo(t *testing.T) { @@ -299,12 +345,12 @@ func TestDockerGatherInfo(t *testing.T) { acc.AssertContainsTaggedFields(t, "docker", map[string]interface{}{ - "n_listener_events": int64(0), - "n_cpus": int64(4), - "n_used_file_descriptors": int64(19), - "n_containers": int64(108), - "n_images": int64(199), - "n_goroutines": int64(39), + "n_listener_events": int(0), + "n_cpus": int(4), + "n_used_file_descriptors": int(19), + "n_containers": int(108), + "n_images": int(199), + "n_goroutines": int(39), }, map[string]string{}, ) From fd1f05c8e01706e388c5885f2c443ab927c2f817 Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Sun, 3 Apr 2016 19:40:12 +0200 Subject: [PATCH 245/287] input(docker): Fixed io sectors/io_time recursive * On engine-api sectors_recursive and io_time_recursive have no Op --- plugins/inputs/docker/docker.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index b1bbc63b3..d753e8b63 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -395,14 +395,12 @@ func gatherBlockIOMetrics( for _, metric := range blkioStats.IoTimeRecursive { device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) - field := fmt.Sprintf("io_time_recursive_%s", strings.ToLower(metric.Op)) - deviceStatMap[device][field] = metric.Value + deviceStatMap[device]["io_time_recursive"] = metric.Value } for _, metric := range blkioStats.SectorsRecursive { device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) - field := fmt.Sprintf("sectors_recursive_%s", strings.ToLower(metric.Op)) - deviceStatMap[device][field] = metric.Value + deviceStatMap[device]["sectors_recursive"] = metric.Value } for device, fields := range deviceStatMap { From 741fb1181ff6f5453b85f7a0c8a3da5f1f53f4c1 Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Sun, 3 Apr 2016 20:06:01 +0200 Subject: [PATCH 246/287] godeps(): Updated Godeps file for engine-api * Added required deps for engine-api * Removed fsouza/go-dockerclient --- Godeps | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Godeps b/Godeps index 255b95ab5..b2b6b7e58 100644 --- a/Godeps +++ b/Godeps @@ -9,10 +9,12 @@ github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2 github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6 github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d +github.com/docker/engine-api 8924d6900370b4c7e7984be5adc61f50a80d7537 +github.com/docker/go-connections f549a9393d05688dff0992ef3efd8bbe6c628aeb +github.com/docker/go-units 5d2041e26a699eaca682e2ea41c8f891e1060444 github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3 github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367 github.com/eclipse/paho.mqtt.golang 4ab3e867810d1ec5f35157c59e965054dbf43a0d -github.com/fsouza/go-dockerclient a49c8269a6899cae30da1f8a4b82e0ce945f9967 github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032 github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7 From 5c688daff12d298fd902607d6f4018599620fb1d Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Sun, 3 Apr 2016 20:42:52 +0200 Subject: [PATCH 247/287] input(docker): Updated README * Replaced links to fsouza/go-dockerclient by docker/engine-api --- plugins/inputs/docker/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index 97450e2aa..045e09a81 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -5,11 +5,11 @@ docker containers. You can read Docker's documentation for their remote API [here](https://docs.docker.com/engine/reference/api/docker_remote_api_v1.20/#get-container-stats-based-on-resource-usage) The docker plugin uses the excellent -[fsouza go-dockerclient](https://github.com/fsouza/go-dockerclient) library to +[docker engine-api](https://github.com/docker/engine-api) library to gather stats. Documentation for the library can be found [here](https://godoc.org/github.com/fsouza/go-dockerclient) and documentation for the stat structure can be found -[here](https://godoc.org/github.com/fsouza/go-dockerclient#Stats) +[here](https://godoc.org/github.com/docker/engine-api/types#Stats) ### Configuration: From 9f68a329349f564809a610dd7ed0f391b75a04d8 Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Sun, 3 Apr 2016 20:57:53 +0200 Subject: [PATCH 248/287] fix(): Last link on README --- plugins/inputs/docker/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index 045e09a81..c22e6af8e 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -7,7 +7,7 @@ docker containers. You can read Docker's documentation for their remote API The docker plugin uses the excellent [docker engine-api](https://github.com/docker/engine-api) library to gather stats. Documentation for the library can be found -[here](https://godoc.org/github.com/fsouza/go-dockerclient) and documentation +[here](https://godoc.org/github.com/docker/engine-api) and documentation for the stat structure can be found [here](https://godoc.org/github.com/docker/engine-api/types#Stats) From 8274798499b38145ba403484b4ddc49f639cb3a1 Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Mon, 4 Apr 2016 00:35:31 +0200 Subject: [PATCH 249/287] fix(Godeps): Added github.com/opencontainers/runc --- Godeps | 1 + 1 file changed, 1 insertion(+) diff --git a/Godeps b/Godeps index b2b6b7e58..aa5be999d 100644 --- a/Godeps +++ b/Godeps @@ -34,6 +34,7 @@ github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3 github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980 +github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8 github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831 github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37 From e19c474a92999137b59e2410483540d6dca3d4bb Mon Sep 17 00:00:00 2001 From: Sergio Jimenez Date: Tue, 5 Apr 2016 01:03:28 +0200 Subject: [PATCH 250/287] input(docker): Cleanup * Removed leftovers, unused code closes #957 fixes #645 --- CHANGELOG.md | 1 + plugins/inputs/docker/docker.go | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5a4552698..b5a2c9f78 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ - [#968](https://github.com/influxdata/telegraf/issues/968): Processes plugin gets unknown state when spaces are in (command name) - [#969](https://github.com/influxdata/telegraf/pull/969): ipmi_sensors: allow : in password. Thanks @awaw! - [#972](https://github.com/influxdata/telegraf/pull/972): dovecot: remove extra newline in dovecot command. Thanks @mrannanj! +- [#645](https://github.com/influxdata/telegraf/issues/645): docker plugin i/o error on closed pipe. Thanks @tripledes! ## v0.12.0 [2016-04-05] diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index d753e8b63..094bad8ca 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -149,8 +149,6 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { map[string]string{"unit": "bytes"}, now) // Get storage metrics - //driverStatusRaw := []byte(info.DriverStatus) - //json.Unmarshal(driverStatusRaw, &driverStatus) for _, rawData := range info.DriverStatus { // Try to convert string to int (bytes) value, err := parseSize(rawData[1]) From d5b9e003fee44ff5276e7177e048cdc007ea95f8 Mon Sep 17 00:00:00 2001 From: Josh Hardy Date: Fri, 25 Mar 2016 15:16:23 -0700 Subject: [PATCH 251/287] Add CloudWatch input plugin Rebased commit of previously reviewed branch. Added cloudwatch client Mock and more rich unit tests. closes #935 closes #936 --- CHANGELOG.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/cloudwatch/README.md | 86 ++++++ plugins/inputs/cloudwatch/cloudwatch.go | 305 +++++++++++++++++++ plugins/inputs/cloudwatch/cloudwatch_test.go | 131 ++++++++ 5 files changed, 524 insertions(+) create mode 100644 plugins/inputs/cloudwatch/README.md create mode 100644 plugins/inputs/cloudwatch/cloudwatch.go create mode 100644 plugins/inputs/cloudwatch/cloudwatch_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index b5a2c9f78..09a00f069 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ### Features - [#976](https://github.com/influxdata/telegraf/pull/976): Reduce allocations in the UDP and statsd inputs. - [#979](https://github.com/influxdata/telegraf/pull/979): Reduce allocations in the TCP listener. +- [#935](https://github.com/influxdata/telegraf/pull/935): AWS Cloudwatch input plugin. Thanks @joshhardy & @ljosa! ### Bugfixes - [#968](https://github.com/influxdata/telegraf/issues/968): Processes plugin gets unknown state when spaces are in (command name) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 4f7d45f60..52ee6c13d 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -4,6 +4,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/aerospike" _ "github.com/influxdata/telegraf/plugins/inputs/apache" _ "github.com/influxdata/telegraf/plugins/inputs/bcache" + _ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch" _ "github.com/influxdata/telegraf/plugins/inputs/couchbase" _ "github.com/influxdata/telegraf/plugins/inputs/couchdb" _ "github.com/influxdata/telegraf/plugins/inputs/disque" diff --git a/plugins/inputs/cloudwatch/README.md b/plugins/inputs/cloudwatch/README.md new file mode 100644 index 000000000..04501161d --- /dev/null +++ b/plugins/inputs/cloudwatch/README.md @@ -0,0 +1,86 @@ +# Amazon CloudWatch Statistics Input + +This plugin will pull Metric Statistics from Amazon CloudWatch. + +### Amazon Authentication + +This plugin uses a credential chain for Authentication with the CloudWatch +API endpoint. In the following order the plugin will attempt to authenticate. +1. [IAMS Role](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) +2. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables) +3. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file) + +### Configuration: + +```toml +[[inputs.cloudwatch]] + ## Amazon Region (required) + region = 'us-east-1' + + ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s) + period = '1m' + + ## Collection Delay (required - must account for metrics availability via CloudWatch API) + delay = '1m' + + ## Override global run interval (optional - defaults to global interval) + ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid + ## gaps or overlap in pulled data + interval = '1m' + + ## Metric Statistic Namespace (required) + namespace = 'AWS/ELB' + + ## Metrics to Pull (optional) + ## Defaults to all Metrics in Namespace if nothing is provided + ## Refreshes Namespace available metrics every 1h + [[inputs.cloudwatch.metrics]] + names = ['Latency', 'RequestCount'] + + ## Dimension filters for Metric (optional) + [[inputs.cloudwatch.metrics.dimensions]] + name = 'LoadBalancerName' + value = 'p-example' +``` +#### Requirements and Terminology + +Plugin Configuration utilizes [CloudWatch concepts](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html) and access pattern to allow monitoring of any CloudWatch Metric. + +- `region` must be a valid AWS [Region](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#CloudWatchRegions) value +- `period` must be a valid CloudWatch [Period](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#CloudWatchPeriods) value +- `namespace` must be a valid CloudWatch [Namespace](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Namespace) value +- `names` must be valid CloudWatch [Metric](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Metric) names +- `dimensions` must be valid CloudWatch [Dimension](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Dimension) name/value pairs + +#### Restrictions and Limitations +- CloudWatch metrics are not available instantly via the CloudWatch API. You should adjust your collection `delay` to account for this lag in metrics availability based on your [monitoring subscription level](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html) +- CloudWatch API usage incurs cost - see [GetMetricStatistics Pricing](https://aws.amazon.com/cloudwatch/pricing/) + +### Measurements & Fields: + +Each CloudWatch Namespace monitored records a measurement with fields for each available Metric Statistic +Namespace and Metrics are represented in [snake case](https://en.wikipedia.org/wiki/Snake_case) + +- cloudwatch_{namespace} + - {metric}_sum (metric Sum value) + - {metric}_average (metric Average value) + - {metric}_minimum (metric Minimum value) + - {metric}_maximum (metric Maximum value) + - {metric}_sample_count (metric SampleCount value) + + +### Tags: +Each measurement is tagged with the following identifiers to uniquely identify the associated metric +Tag Dimension names are represented in [snake case](https://en.wikipedia.org/wiki/Snake_case) + +- All measurements have the following tags: + - region (CloudWatch Region) + - unit (CloudWatch Metric Unit) + - {dimension-name} (Cloudwatch Dimension value - one for each metric dimension) + +### Example Output: + +``` +$ ./telegraf -config telegraf.conf -input-filter cloudwatch -test +> cloudwatch_aws_elb,load_balancer_name=p-example,region=us-east-1,unit=seconds latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875,latency_sample_count=4029,latency_sum=19.382705211639404 1459542420000000000 +``` diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go new file mode 100644 index 000000000..e3fa74bad --- /dev/null +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -0,0 +1,305 @@ +package cloudwatch + +import ( + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + + "github.com/aws/aws-sdk-go/service/cloudwatch" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type ( + CloudWatch struct { + Region string `toml:"region"` + Period internal.Duration `toml:"period"` + Delay internal.Duration `toml:"delay"` + Namespace string `toml:"namespace"` + Metrics []*Metric `toml:"metrics"` + client cloudwatchClient + metricCache *MetricCache + } + + Metric struct { + MetricNames []string `toml:"names"` + Dimensions []*Dimension `toml:"dimensions"` + } + + Dimension struct { + Name string `toml:"name"` + Value string `toml:"value"` + } + + MetricCache struct { + TTL time.Duration + Fetched time.Time + Metrics []*cloudwatch.Metric + } + + cloudwatchClient interface { + ListMetrics(*cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) + GetMetricStatistics(*cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) + } +) + +func (c *CloudWatch) SampleConfig() string { + return ` + ## Amazon Region + region = 'us-east-1' + + ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s) + period = '1m' + + ## Collection Delay (required - must account for metrics availability via CloudWatch API) + delay = '1m' + + ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid + ## gaps or overlap in pulled data + interval = '1m' + + ## Metric Statistic Namespace (required) + namespace = 'AWS/ELB' + + ## Metrics to Pull (optional) + ## Defaults to all Metrics in Namespace if nothing is provided + ## Refreshes Namespace available metrics every 1h + #[[inputs.cloudwatch.metrics]] + # names = ['Latency', 'RequestCount'] + # + # ## Dimension filters for Metric (optional) + # [[inputs.cloudwatch.metrics.dimensions]] + # name = 'LoadBalancerName' + # value = 'p-example' +` +} + +func (c *CloudWatch) Description() string { + return "Pull Metric Statistics from Amazon CloudWatch" +} + +func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { + if c.client == nil { + c.initializeCloudWatch() + } + + var metrics []*cloudwatch.Metric + + // check for provided metric filter + if c.Metrics != nil { + metrics = []*cloudwatch.Metric{} + for _, m := range c.Metrics { + dimensions := make([]*cloudwatch.Dimension, len(m.Dimensions)) + for k, d := range m.Dimensions { + dimensions[k] = &cloudwatch.Dimension{ + Name: aws.String(d.Name), + Value: aws.String(d.Value), + } + } + for _, name := range m.MetricNames { + metrics = append(metrics, &cloudwatch.Metric{ + Namespace: aws.String(c.Namespace), + MetricName: aws.String(name), + Dimensions: dimensions, + }) + } + } + } else { + var err error + metrics, err = c.fetchNamespaceMetrics() + if err != nil { + return err + } + } + + metricCount := len(metrics) + var errChan = make(chan error, metricCount) + + now := time.Now() + + // limit concurrency or we can easily exhaust user connection limit + semaphore := make(chan byte, 64) + + for _, m := range metrics { + semaphore <- 0x1 + go c.gatherMetric(acc, m, now, semaphore, errChan) + } + + for i := 1; i <= metricCount; i++ { + err := <-errChan + if err != nil { + return err + } + } + return nil +} + +func init() { + inputs.Add("cloudwatch", func() telegraf.Input { + return &CloudWatch{} + }) +} + +/* + * Initialize CloudWatch client + */ +func (c *CloudWatch) initializeCloudWatch() error { + config := &aws.Config{ + Region: aws.String(c.Region), + Credentials: credentials.NewChainCredentials( + []credentials.Provider{ + &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())}, + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{}, + }), + } + + c.client = cloudwatch.New(session.New(config)) + return nil +} + +/* + * Fetch available metrics for given CloudWatch Namespace + */ +func (c *CloudWatch) fetchNamespaceMetrics() (metrics []*cloudwatch.Metric, err error) { + if c.metricCache != nil && c.metricCache.IsValid() { + metrics = c.metricCache.Metrics + return + } + + metrics = []*cloudwatch.Metric{} + + var token *string + for more := true; more; { + params := &cloudwatch.ListMetricsInput{ + Namespace: aws.String(c.Namespace), + Dimensions: []*cloudwatch.DimensionFilter{}, + NextToken: token, + MetricName: nil, + } + + resp, err := c.client.ListMetrics(params) + if err != nil { + return nil, err + } + + metrics = append(metrics, resp.Metrics...) + + token = resp.NextToken + more = token != nil + } + + cacheTTL, _ := time.ParseDuration("1hr") + c.metricCache = &MetricCache{ + Metrics: metrics, + Fetched: time.Now(), + TTL: cacheTTL, + } + + return +} + +/* + * Gather given Metric and emit any error + */ +func (c *CloudWatch) gatherMetric(acc telegraf.Accumulator, metric *cloudwatch.Metric, now time.Time, semaphore chan byte, errChan chan error) { + params := c.getStatisticsInput(metric, now) + resp, err := c.client.GetMetricStatistics(params) + if err != nil { + errChan <- err + <-semaphore + return + } + + for _, point := range resp.Datapoints { + tags := map[string]string{ + "region": c.Region, + "unit": snakeCase(*point.Unit), + } + + for _, d := range metric.Dimensions { + tags[snakeCase(*d.Name)] = *d.Value + } + + // record field for each statistic + fields := map[string]interface{}{} + + if point.Average != nil { + fields[formatField(*metric.MetricName, cloudwatch.StatisticAverage)] = *point.Average + } + if point.Maximum != nil { + fields[formatField(*metric.MetricName, cloudwatch.StatisticMaximum)] = *point.Maximum + } + if point.Minimum != nil { + fields[formatField(*metric.MetricName, cloudwatch.StatisticMinimum)] = *point.Minimum + } + if point.SampleCount != nil { + fields[formatField(*metric.MetricName, cloudwatch.StatisticSampleCount)] = *point.SampleCount + } + if point.Sum != nil { + fields[formatField(*metric.MetricName, cloudwatch.StatisticSum)] = *point.Sum + } + + acc.AddFields(formatMeasurement(c.Namespace), fields, tags, *point.Timestamp) + } + + errChan <- nil + <-semaphore +} + +/* + * Formatting helpers + */ +func formatField(metricName string, statistic string) string { + return fmt.Sprintf("%s_%s", snakeCase(metricName), snakeCase(statistic)) +} + +func formatMeasurement(namespace string) string { + namespace = strings.Replace(namespace, "/", "_", -1) + namespace = snakeCase(namespace) + return fmt.Sprintf("cloudwatch_%s", namespace) +} + +func snakeCase(s string) string { + s = internal.SnakeCase(s) + s = strings.Replace(s, "__", "_", -1) + return s +} + +/* + * Map Metric to *cloudwatch.GetMetricStatisticsInput for given timeframe + */ +func (c *CloudWatch) getStatisticsInput(metric *cloudwatch.Metric, now time.Time) *cloudwatch.GetMetricStatisticsInput { + end := now.Add(-c.Delay.Duration) + + input := &cloudwatch.GetMetricStatisticsInput{ + StartTime: aws.Time(end.Add(-c.Period.Duration)), + EndTime: aws.Time(end), + MetricName: metric.MetricName, + Namespace: metric.Namespace, + Period: aws.Int64(int64(c.Period.Duration.Seconds())), + Dimensions: metric.Dimensions, + Statistics: []*string{ + aws.String(cloudwatch.StatisticAverage), + aws.String(cloudwatch.StatisticMaximum), + aws.String(cloudwatch.StatisticMinimum), + aws.String(cloudwatch.StatisticSum), + aws.String(cloudwatch.StatisticSampleCount)}, + } + return input +} + +/* + * Check Metric Cache validity + */ +func (c *MetricCache) IsValid() bool { + return c.Metrics != nil && time.Since(c.Fetched) < c.TTL +} diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go new file mode 100644 index 000000000..8f8a3ad0b --- /dev/null +++ b/plugins/inputs/cloudwatch/cloudwatch_test.go @@ -0,0 +1,131 @@ +package cloudwatch + +import ( + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" +) + +type mockCloudWatchClient struct{} + +func (m *mockCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) { + metric := &cloudwatch.Metric{ + Namespace: params.Namespace, + MetricName: aws.String("Latency"), + Dimensions: []*cloudwatch.Dimension{ + &cloudwatch.Dimension{ + Name: aws.String("LoadBalancerName"), + Value: aws.String("p-example"), + }, + }, + } + + result := &cloudwatch.ListMetricsOutput{ + Metrics: []*cloudwatch.Metric{metric}, + } + return result, nil +} + +func (m *mockCloudWatchClient) GetMetricStatistics(params *cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) { + dataPoint := &cloudwatch.Datapoint{ + Timestamp: params.EndTime, + Minimum: aws.Float64(0.1), + Maximum: aws.Float64(0.3), + Average: aws.Float64(0.2), + Sum: aws.Float64(123), + SampleCount: aws.Float64(100), + Unit: aws.String("Seconds"), + } + result := &cloudwatch.GetMetricStatisticsOutput{ + Label: aws.String("Latency"), + Datapoints: []*cloudwatch.Datapoint{dataPoint}, + } + return result, nil +} + +func TestGather(t *testing.T) { + duration, _ := time.ParseDuration("1m") + internalDuration := internal.Duration{ + Duration: duration, + } + c := &CloudWatch{ + Region: "us-east-1", + Namespace: "AWS/ELB", + Delay: internalDuration, + Period: internalDuration, + } + + var acc testutil.Accumulator + c.client = &mockCloudWatchClient{} + + c.Gather(&acc) + + fields := map[string]interface{}{} + fields["latency_minimum"] = 0.1 + fields["latency_maximum"] = 0.3 + fields["latency_average"] = 0.2 + fields["latency_sum"] = 123.0 + fields["latency_sample_count"] = 100.0 + + tags := map[string]string{} + tags["unit"] = "seconds" + tags["region"] = "us-east-1" + tags["load_balancer_name"] = "p-example" + + assert.True(t, acc.HasMeasurement("cloudwatch_aws_elb")) + acc.AssertContainsTaggedFields(t, "cloudwatch_aws_elb", fields, tags) + +} + +func TestGenerateStatisticsInputParams(t *testing.T) { + d := &cloudwatch.Dimension{ + Name: aws.String("LoadBalancerName"), + Value: aws.String("p-example"), + } + + m := &cloudwatch.Metric{ + MetricName: aws.String("Latency"), + Dimensions: []*cloudwatch.Dimension{d}, + } + + duration, _ := time.ParseDuration("1m") + internalDuration := internal.Duration{ + Duration: duration, + } + + c := &CloudWatch{ + Namespace: "AWS/ELB", + Delay: internalDuration, + Period: internalDuration, + } + + c.initializeCloudWatch() + + now := time.Now() + + params := c.getStatisticsInput(m, now) + + assert.EqualValues(t, *params.EndTime, now.Add(-c.Delay.Duration)) + assert.EqualValues(t, *params.StartTime, now.Add(-c.Period.Duration).Add(-c.Delay.Duration)) + assert.Len(t, params.Dimensions, 1) + assert.Len(t, params.Statistics, 5) + assert.EqualValues(t, *params.Period, 60) +} + +func TestMetricsCacheTimeout(t *testing.T) { + ttl, _ := time.ParseDuration("5ms") + cache := &MetricCache{ + Metrics: []*cloudwatch.Metric{}, + Fetched: time.Now(), + TTL: ttl, + } + + assert.True(t, cache.IsValid()) + time.Sleep(ttl) + assert.False(t, cache.IsValid()) +} From 1a612bcae99e95045e70e9b00e9347c4d4e90b6b Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 6 Apr 2016 13:49:50 -0600 Subject: [PATCH 252/287] Update README and etc/telegraf.conf --- README.md | 1 + etc/telegraf.conf | 39 ++++++++++++++++++++++++++++++--------- 2 files changed, 31 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 9813ca6d4..caa562a6d 100644 --- a/README.md +++ b/README.md @@ -156,6 +156,7 @@ more information on each, please look at the directory of the same name in Currently implemented sources: +* [aws cloudwatch](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/cloudwatch) * [aerospike](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/aerospike) * [apache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/apache) * [bcache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/bcache) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 3d65aaf62..1b534d888 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -429,6 +429,36 @@ # bcacheDevs = ["bcache0"] +# # Pull Metric Statistics from Amazon CloudWatch +# [[inputs.cloudwatch]] +# ## Amazon Region +# region = 'us-east-1' +# +# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s) +# period = '1m' +# +# ## Collection Delay (required - must account for metrics availability via CloudWatch API) +# delay = '1m' +# +# ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid +# ## gaps or overlap in pulled data +# interval = '1m' +# +# ## Metric Statistic Namespace (required) +# namespace = 'AWS/ELB' +# +# ## Metrics to Pull (optional) +# ## Defaults to all Metrics in Namespace if nothing is provided +# ## Refreshes Namespace available metrics every 1h +# #[[inputs.cloudwatch.metrics]] +# # names = ['Latency', 'RequestCount'] +# # +# # ## Dimension filters for Metric (optional) +# # [[inputs.cloudwatch.metrics.dimensions]] +# # name = 'LoadBalancerName' +# # value = 'p-example' + + # # Read metrics from one or many couchbase clusters # [[inputs.couchbase]] # ## specify servers via a url matching: @@ -1245,10 +1275,6 @@ # ## calculation of percentiles. Raising this limit increases the accuracy # ## of percentiles but also increases the memory usage and cpu time. # percentile_limit = 1000 -# -# ## UDP packet size for the server to listen for. This will depend on the size -# ## of the packets that the client is sending, which is usually 1500 bytes. -# udp_packet_size = 1500 # # Generic TCP listener @@ -1279,11 +1305,6 @@ # ## UDP listener will start dropping packets. # allowed_pending_messages = 10000 # -# ## UDP packet size for the server to listen for. This will depend -# ## on the size of the packets that the client is sending, which is -# ## usually 1500 bytes, but can be as large as 65,535 bytes. -# udp_packet_size = 1500 -# # ## Data format to consume. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: From 163e8f2cdb8d35545035e6808cb8ce871e3cce53 Mon Sep 17 00:00:00 2001 From: Miki Date: Thu, 7 Apr 2016 17:29:08 +0200 Subject: [PATCH 253/287] dovecot: enable global and user queries --- plugins/inputs/dovecot/dovecot.go | 63 ++++++++++++++++---------- plugins/inputs/dovecot/dovecot_test.go | 26 +++++++---- 2 files changed, 57 insertions(+), 32 deletions(-) diff --git a/plugins/inputs/dovecot/dovecot.go b/plugins/inputs/dovecot/dovecot.go index bf1b20269..518b02e72 100644 --- a/plugins/inputs/dovecot/dovecot.go +++ b/plugins/inputs/dovecot/dovecot.go @@ -15,8 +15,9 @@ import ( ) type Dovecot struct { + Type string + Filters []string Servers []string - Domains []string } func (d *Dovecot) Description() string { @@ -30,12 +31,19 @@ var sampleConfig = ` ## ## If no servers are specified, then localhost is used as the host. servers = ["localhost:24242"] - ## Only collect metrics for these domains, collect all if empty - domains = [] + ## Type is one of "user", "domain", "ip", or "global" + type = "global" + ## Wildcard matches like "*.com". An empty string "" is same as "*" + ## If type = "ip" filters should be + filters = [""] ` var defaultTimeout = time.Second * time.Duration(5) +var validQuery = map[string]bool{ + "user": true, "domain": true, "global": true, "ip": true, +} + func (d *Dovecot) SampleConfig() string { return sampleConfig } const defaultPort = "24242" @@ -43,6 +51,11 @@ const defaultPort = "24242" // Reads stats from all configured servers. func (d *Dovecot) Gather(acc telegraf.Accumulator) error { + if !validQuery[d.Type] { + return fmt.Errorf("Error: %s is not a valid query type\n", + d.Type) + } + if len(d.Servers) == 0 { d.Servers = append(d.Servers, "127.0.0.1:24242") } @@ -51,18 +64,14 @@ func (d *Dovecot) Gather(acc telegraf.Accumulator) error { var outerr error - var domains = make(map[string]bool) - - for _, dom := range d.Domains { - domains[dom] = true - } - for _, serv := range d.Servers { - wg.Add(1) - go func(serv string) { - defer wg.Done() - outerr = d.gatherServer(serv, acc, domains) - }(serv) + for _, filter := range d.Filters { + wg.Add(1) + go func(serv string, filter string) { + defer wg.Done() + outerr = d.gatherServer(serv, acc, d.Type, filter) + }(serv, filter) + } } wg.Wait() @@ -70,7 +79,7 @@ func (d *Dovecot) Gather(acc telegraf.Accumulator) error { return outerr } -func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, doms map[string]bool) error { +func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype string, filter string) error { _, _, err := net.SplitHostPort(addr) if err != nil { return fmt.Errorf("Error: %s on url %s\n", err, addr) @@ -85,17 +94,22 @@ func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, doms map[s // Extend connection c.SetDeadline(time.Now().Add(defaultTimeout)) - c.Write([]byte("EXPORT\tdomain\n")) + msg := fmt.Sprintf("EXPORT\t%s", qtype) + if len(filter) > 0 { + msg += fmt.Sprintf("\t%s=%s", qtype, filter) + } + msg += "\n" + + c.Write([]byte(msg)) var buf bytes.Buffer io.Copy(&buf, c) - // buf := bufio.NewReader(c) host, _, _ := net.SplitHostPort(addr) - return gatherStats(&buf, acc, doms, host) + return gatherStats(&buf, acc, host, qtype) } -func gatherStats(buf *bytes.Buffer, acc telegraf.Accumulator, doms map[string]bool, host string) error { +func gatherStats(buf *bytes.Buffer, acc telegraf.Accumulator, host string, qtype string) error { lines := strings.Split(buf.String(), "\n") head := strings.Split(lines[0], "\t") @@ -107,15 +121,18 @@ func gatherStats(buf *bytes.Buffer, acc telegraf.Accumulator, doms map[string]bo } val := strings.Split(vals[i], "\t") fields := make(map[string]interface{}) - if len(doms) > 0 && !doms[val[0]] { + tags := map[string]string{"server": host, "type": qtype} + switch qtype { + case "global": continue + default: + tags[qtype] = val[0] } - tags := map[string]string{"server": host, "domain": val[0]} + for n := range val { switch head[n] { - case "domain": + case qtype: continue - // fields[head[n]] = val[n] case "user_cpu", "sys_cpu", "clock_time": fields[head[n]] = secParser(val[n]) case "reset_timestamp", "last_update": diff --git a/plugins/inputs/dovecot/dovecot_test.go b/plugins/inputs/dovecot/dovecot_test.go index 76425c0db..4f7e36204 100644 --- a/plugins/inputs/dovecot/dovecot_test.go +++ b/plugins/inputs/dovecot/dovecot_test.go @@ -15,15 +15,12 @@ func TestDovecot(t *testing.T) { t.Skip("Skipping integration test in short mode") } + // Test type=global var acc testutil.Accumulator - tags := map[string]string{"server": "dovecot.test", "domain": "domain.test"} - buf := bytes.NewBufferString(sampleStats) + tags := map[string]string{"server": "dovecot.test", "type": "global"} + buf := bytes.NewBufferString(sampleGlobal) - var doms = map[string]bool{ - "domain.test": true, - } - - err := gatherStats(buf, &acc, doms, "dovecot.test") + err := gatherStats(buf, &acc, "dovecot.test", "global") require.NoError(t, err) fields := map[string]interface{}{ @@ -56,6 +53,17 @@ func TestDovecot(t *testing.T) { } -const sampleStats = `domain reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits -domain.bad 1453970076 1454603947.383029 10749 33828 0 177988.524000 148071.772000 7531838964717.193706 212491179 2125 2190386067 112779200 74487934976 3221808119808 2469948401 5237602841760 1091171292 2951966459802 15363 0 2922 136403379 334372 +const sampleGlobal = `reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits +1453969886 1454603963.039864 7503897 52595715 1204 100831175.372000 83849071.112000 4326001931528183.495762 763950011 1112443 4120386897 3685239306 41679480946688 1819070669176832 2368906465 2957928122981169 3545389615 1666822498251286 24396105 302845 20155768 669946617705 1557255080` + +const sampleDomain = `domain reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits domain.test 1453969886 1454603963.039864 7503897 52595715 1204 100831175.372000 83849071.112000 4326001931528183.495762 763950011 1112443 4120386897 3685239306 41679480946688 1819070669176832 2368906465 2957928122981169 3545389615 1666822498251286 24396105 302845 20155768 669946617705 1557255080` + +const sampleUser = `user reset_timestamp last_update num_logins num_cmds user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits +user.1@tiscali.it 1460041745 1460041745.258851 2 0 8.868000 6.344000 2920083490.361458 706804 448916 48979 268148736 950759424 632685 1556937725 219865 907305251 0 0 0 0 0 +user.2@tiscali.it 1460041657 1460041658.284800 2 0 1.192000 0.660000 2920083316.276633 13873 0 60226 5584 31674368 122347520 78251 207879097 27095 118197529 0 0 0 0 0 +user.3@tiscali.it 1460041657 1460041717.175634 1 7 0.0 0.0 0.20 319 0 50 9 61440 1228867 73508 28 3174 0 0 0 0 0` + +const sampleIp = `ip reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits +192.168.0.100 1460041847 1460041847.849766 1 0 0 0.4000 0.0 0.49 449 0 70 3 4096 1228861 45414 24 1606 0 0 0 0 0 +192.168.0.201 1460041772 1460041772.737830 1 0 0 0.0 0.0 0.0 0 0 0 0 0 0 00 0 0 0 0 0 0 0` From 2b954770177536c1b1fb84036f2cd9d4981c3d91 Mon Sep 17 00:00:00 2001 From: Miki Date: Thu, 7 Apr 2016 17:33:56 +0200 Subject: [PATCH 254/287] enable global,user,domain and ip queries --- plugins/inputs/dovecot/README.md | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/plugins/inputs/dovecot/README.md b/plugins/inputs/dovecot/README.md index b2df0107f..262328d2a 100644 --- a/plugins/inputs/dovecot/README.md +++ b/plugins/inputs/dovecot/README.md @@ -10,20 +10,25 @@ domains. You can read Dovecot's documentation ``` # Read metrics about dovecot servers [[inputs.dovecot]] - # Dovecot servers - # specify dovecot servers via an address:port list - # e.g. - # localhost:24242 - # - # If no servers are specified, then localhost is used as the host. + ## specify dovecot servers via an address:port list + ## e.g. + ## localhost:24242 + ## + ## If no servers are specified, then localhost is used as the host. servers = ["localhost:24242"] - # Only collect metrics for these domains, collect all if empty - domains = [] + ## Type is one of "user", "domain", "ip", or "global" + type = "global" + ## Wildcard matches like "*.com". An empty string "" is same as "*" + ## If type = "ip" filters should be + filters = [""] ``` ### Tags: server: hostname + type: query type + ip: ip addr + user: username domain: domain name From cce35da366ac95cb41568047b908d14fac5fe9ce Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 7 Apr 2016 09:26:35 -0600 Subject: [PATCH 255/287] Godeps_windows: update file --- Godeps_windows | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/Godeps_windows b/Godeps_windows index f499fa915..ab3004bb8 100644 --- a/Godeps_windows +++ b/Godeps_windows @@ -1,3 +1,4 @@ +github.com/Microsoft/go-winio 9f57cbbcbcb41dea496528872a4f0e37a4f7ae98 github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9 github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5 @@ -9,24 +10,24 @@ github.com/couchbase/go-couchbase cb664315a324d87d19c879d9cc67fda6be8c2ac1 github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2 github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6 github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc -github.com/davecgh/go-spew fc32781af5e85e548d3f1abaf0fa3dbe8a72495c +github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d +github.com/docker/engine-api 8924d6900370b4c7e7984be5adc61f50a80d7537 +github.com/docker/go-connections f549a9393d05688dff0992ef3efd8bbe6c628aeb +github.com/docker/go-units 5d2041e26a699eaca682e2ea41c8f891e1060444 github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3 github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367 github.com/eclipse/paho.mqtt.golang 4ab3e867810d1ec5f35157c59e965054dbf43a0d -github.com/fsouza/go-dockerclient a49c8269a6899cae30da1f8a4b82e0ce945f9967 -github.com/go-ini/ini 776aa739ce9373377cd16f526cdf06cb4c89b40f github.com/go-ole/go-ole 50055884d646dd9434f16bbb5c9801749b9bafe4 github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032 -github.com/golang/snappy 5979233c5d6225d4a8e438cdd0b411888449ddab +github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7 github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2 github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478 github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da -github.com/influxdata/influxdb c190778997f4154294e6160c41b90140641ac915 +github.com/influxdata/influxdb e3fef5593c21644f2b43af55d6e17e70910b0e48 github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0 -github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74 github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720 github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36 github.com/lxn/win 9a7734ea4db26bc593d52f6a8a957afdad39c5c1 @@ -37,7 +38,6 @@ github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3 github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980 -github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2 github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831 github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37 @@ -47,9 +47,8 @@ github.com/shirou/gopsutil 1f32ce1bb380845be7f5d174ac641a2c592c0c42 github.com/shirou/w32 ada3ba68f000aa1b58580e45c9d308fe0b7fc5c5 github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744 -github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94 github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c -github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3 +github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866 github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8 github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363 golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172 From dfbe231a51bea200b45cd84d0c014223c6009fdd Mon Sep 17 00:00:00 2001 From: Luke Swithenbank Date: Thu, 31 Mar 2016 18:33:28 +1100 Subject: [PATCH 256/287] add http_response plugin --- plugins/inputs/http_response/README.md | 36 +++++++ plugins/inputs/http_response/http_response.go | 95 +++++++++++++++++++ 2 files changed, 131 insertions(+) create mode 100644 plugins/inputs/http_response/README.md create mode 100644 plugins/inputs/http_response/http_response.go diff --git a/plugins/inputs/http_response/README.md b/plugins/inputs/http_response/README.md new file mode 100644 index 000000000..13da76097 --- /dev/null +++ b/plugins/inputs/http_response/README.md @@ -0,0 +1,36 @@ +# Example Input Plugin + +This input plugin will test HTTP/HTTPS connections. + +### Configuration: + +``` +# List of UDP/TCP connections you want to check +[[inputs.http_response]] + # Server address (default http://localhost) + address = "http://github.com:80" + # Set http response timeout (default 1.0) + response_timeout = 1.0 + # HTTP Method (default "GET") + method = "GET" +``` + +### Measurements & Fields: + +- http_response + - response_time (float, seconds) + - http_response_code (int) #The code received + +### Tags: + +- All measurements have the following tags: + - server + - port + - protocol + +### Example Output: + +``` +$ ./telegraf -config telegraf.conf -input-filter http_response -test +http_response,server=http://192.168.2.2:2000,method=GET response_time=0.18070360500000002,http_response_code=200 1454785464182527094 +``` diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go new file mode 100644 index 000000000..e19c698a8 --- /dev/null +++ b/plugins/inputs/http_response/http_response.go @@ -0,0 +1,95 @@ +package http_response + +import ( + "errors" + "net/http" + "net/url" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// HttpResponses struct +type HttpResponse struct { + Address string + Method string + ResponseTimeout int +} + +func (_ *HttpResponse) Description() string { + return "HTTP/HTTPS request given an address a method and a timeout" +} + +var sampleConfig = ` + ## Server address (default http://localhost) + address = "http://github.com:80" + ## Set response_timeout (default 1 seconds) + response_timeout = 1 + ## HTTP Method + method = "GET" +` + +func (_ *HttpResponse) SampleConfig() string { + return sampleConfig +} + +func (h *HttpResponse) HttpGather() (map[string]interface{}, error) { + // Prepare fields + fields := make(map[string]interface{}) + + client := &http.Client{ + Timeout: time.Second * time.Duration(h.ResponseTimeout), + } + request, err := http.NewRequest(h.Method, h.Address, nil) + if err != nil { + return nil, err + } + // Start Timer + start := time.Now() + resp, err := client.Do(request) + if err != nil { + return nil, err + } + fields["response_time"] = time.Since(start).Seconds() + fields["http_response_code"] = resp.StatusCode + return fields, nil +} + +func (c *HttpResponse) Gather(acc telegraf.Accumulator) error { + // Set default values + if c.ResponseTimeout < 1 { + c.ResponseTimeout = 1 + } + // Check send and expected string + if c.Method == "" { + c.Method = "GET" + } + if c.Address == "" { + c.Address = "http://localhost" + } + addr, err := url.Parse(c.Address) + if err != nil { + return err + } + if addr.Scheme != "http" && addr.Scheme != "https" { + return errors.New("Only http and https are supported") + } + // Prepare data + tags := map[string]string{"server": c.Address, "method": c.Method} + var fields map[string]interface{} + // Gather data + fields, err = c.HttpGather() + if err != nil { + return err + } + // Add metrics + acc.AddFields("http_response", fields, tags) + return nil +} + +func init() { + inputs.Add("http_response", func() telegraf.Input { + return &HttpResponse{} + }) +} From 70aa0ef85da3bd67c0978c3cd23fca2ca9f06af4 Mon Sep 17 00:00:00 2001 From: Luke Swithenbank Date: Thu, 31 Mar 2016 19:47:10 +1100 Subject: [PATCH 257/287] add plugin to all --- plugins/inputs/all/all.go | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 52ee6c13d..b8534fd6d 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -16,6 +16,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/github_webhooks" _ "github.com/influxdata/telegraf/plugins/inputs/haproxy" _ "github.com/influxdata/telegraf/plugins/inputs/httpjson" + _ "github.com/influxdata/telegraf/plugins/inputs/http_response" _ "github.com/influxdata/telegraf/plugins/inputs/influxdb" _ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor" _ "github.com/influxdata/telegraf/plugins/inputs/jolokia" From 207ab5a0d1d9f1c33aa168c99b21e487a8d38462 Mon Sep 17 00:00:00 2001 From: Luke Swithenbank Date: Thu, 31 Mar 2016 19:54:08 +1100 Subject: [PATCH 258/287] update to make a working sample_config --- plugins/inputs/http_response/README.md | 6 +++--- plugins/inputs/http_response/http_response.go | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/http_response/README.md b/plugins/inputs/http_response/README.md index 13da76097..b70bbde72 100644 --- a/plugins/inputs/http_response/README.md +++ b/plugins/inputs/http_response/README.md @@ -8,9 +8,9 @@ This input plugin will test HTTP/HTTPS connections. # List of UDP/TCP connections you want to check [[inputs.http_response]] # Server address (default http://localhost) - address = "http://github.com:80" - # Set http response timeout (default 1.0) - response_timeout = 1.0 + address = "https://github.com" + # Set http response timeout (default 10) + response_timeout = 10 # HTTP Method (default "GET") method = "GET" ``` diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index e19c698a8..d2d35025a 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -23,9 +23,9 @@ func (_ *HttpResponse) Description() string { var sampleConfig = ` ## Server address (default http://localhost) - address = "http://github.com:80" + address = "https://github.com" ## Set response_timeout (default 1 seconds) - response_timeout = 1 + response_timeout = 10 ## HTTP Method method = "GET" ` From b7435b9cd18949b5291301b571fa32f473b434d2 Mon Sep 17 00:00:00 2001 From: Luke Swithenbank Date: Thu, 31 Mar 2016 19:55:17 +1100 Subject: [PATCH 259/287] fmt --- plugins/inputs/all/all.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index b8534fd6d..b28291c24 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -15,8 +15,8 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/exec" _ "github.com/influxdata/telegraf/plugins/inputs/github_webhooks" _ "github.com/influxdata/telegraf/plugins/inputs/haproxy" - _ "github.com/influxdata/telegraf/plugins/inputs/httpjson" _ "github.com/influxdata/telegraf/plugins/inputs/http_response" + _ "github.com/influxdata/telegraf/plugins/inputs/httpjson" _ "github.com/influxdata/telegraf/plugins/inputs/influxdb" _ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor" _ "github.com/influxdata/telegraf/plugins/inputs/jolokia" From 7219efbdb76006fe52c475994b5608af19dcdaaf Mon Sep 17 00:00:00 2001 From: Luke Swithenbank Date: Thu, 31 Mar 2016 20:53:51 +1100 Subject: [PATCH 260/287] add the ability to parse http headers --- plugins/inputs/http_response/http_response.go | 52 +++++++++++++------ 1 file changed, 36 insertions(+), 16 deletions(-) diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index d2d35025a..df21311ae 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -1,23 +1,28 @@ package http_response import ( + "bufio" "errors" "net/http" + "net/textproto" "net/url" + "strings" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) -// HttpResponses struct -type HttpResponse struct { +// HTTPResponse struct +type HTTPResponse struct { Address string Method string ResponseTimeout int + Headers string } -func (_ *HttpResponse) Description() string { +// Description returns the plugin Description +func (h *HTTPResponse) Description() string { return "HTTP/HTTPS request given an address a method and a timeout" } @@ -28,13 +33,19 @@ var sampleConfig = ` response_timeout = 10 ## HTTP Method method = "GET" + ## HTTP Request Headers + headers = ''' + Host: github.com + ''' ` -func (_ *HttpResponse) SampleConfig() string { +// SampleConfig returns the plugin SampleConfig +func (h *HTTPResponse) SampleConfig() string { return sampleConfig } -func (h *HttpResponse) HttpGather() (map[string]interface{}, error) { +// HTTPGather gathers all fields and returns any errors it encounters +func (h *HTTPResponse) HTTPGather() (map[string]interface{}, error) { // Prepare fields fields := make(map[string]interface{}) @@ -45,6 +56,14 @@ func (h *HttpResponse) HttpGather() (map[string]interface{}, error) { if err != nil { return nil, err } + h.Headers = strings.TrimSpace(h.Headers) + "\n\n" + reader := bufio.NewReader(strings.NewReader(h.Headers)) + tp := textproto.NewReader(reader) + mimeHeader, err := tp.ReadMIMEHeader() + if err != nil { + return nil, err + } + request.Header = http.Header(mimeHeader) // Start Timer start := time.Now() resp, err := client.Do(request) @@ -56,19 +75,20 @@ func (h *HttpResponse) HttpGather() (map[string]interface{}, error) { return fields, nil } -func (c *HttpResponse) Gather(acc telegraf.Accumulator) error { +// Gather gets all metric fields and tags and returns any errors it encounters +func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error { // Set default values - if c.ResponseTimeout < 1 { - c.ResponseTimeout = 1 + if h.ResponseTimeout < 1 { + h.ResponseTimeout = 1 } // Check send and expected string - if c.Method == "" { - c.Method = "GET" + if h.Method == "" { + h.Method = "GET" } - if c.Address == "" { - c.Address = "http://localhost" + if h.Address == "" { + h.Address = "http://localhost" } - addr, err := url.Parse(c.Address) + addr, err := url.Parse(h.Address) if err != nil { return err } @@ -76,10 +96,10 @@ func (c *HttpResponse) Gather(acc telegraf.Accumulator) error { return errors.New("Only http and https are supported") } // Prepare data - tags := map[string]string{"server": c.Address, "method": c.Method} + tags := map[string]string{"server": h.Address, "method": h.Method} var fields map[string]interface{} // Gather data - fields, err = c.HttpGather() + fields, err = h.HTTPGather() if err != nil { return err } @@ -90,6 +110,6 @@ func (c *HttpResponse) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("http_response", func() telegraf.Input { - return &HttpResponse{} + return &HTTPResponse{} }) } From f947fa86e37a1679eacb7ae98b06a8826d03a5d3 Mon Sep 17 00:00:00 2001 From: Luke Swithenbank Date: Thu, 31 Mar 2016 21:18:19 +1100 Subject: [PATCH 261/287] update to allow for following redirects --- plugins/inputs/http_response/README.md | 19 +++++++---- plugins/inputs/http_response/http_response.go | 33 ++++++++++++++++--- 2 files changed, 41 insertions(+), 11 deletions(-) diff --git a/plugins/inputs/http_response/README.md b/plugins/inputs/http_response/README.md index b70bbde72..99770e526 100644 --- a/plugins/inputs/http_response/README.md +++ b/plugins/inputs/http_response/README.md @@ -7,12 +7,18 @@ This input plugin will test HTTP/HTTPS connections. ``` # List of UDP/TCP connections you want to check [[inputs.http_response]] - # Server address (default http://localhost) - address = "https://github.com" - # Set http response timeout (default 10) + ## Server address (default http://localhost) + address = "http://github.com" + ## Set response_timeout (default 10 seconds) response_timeout = 10 - # HTTP Method (default "GET") + ## HTTP Method method = "GET" + ## HTTP Request Headers + headers = ''' + Host: github.com + ''' + ## Whether to follow redirects from the server (defaults to false) + follow_redirects = true ``` ### Measurements & Fields: @@ -25,12 +31,11 @@ This input plugin will test HTTP/HTTPS connections. - All measurements have the following tags: - server - - port - - protocol + - method ### Example Output: ``` $ ./telegraf -config telegraf.conf -input-filter http_response -test -http_response,server=http://192.168.2.2:2000,method=GET response_time=0.18070360500000002,http_response_code=200 1454785464182527094 +http_response,method=GET,server=http://www.github.com http_response_code=200i,response_time=6.223266528 1459419354977857955 ``` diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index df21311ae..09569fe73 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -3,9 +3,11 @@ package http_response import ( "bufio" "errors" + "fmt" "net/http" "net/textproto" "net/url" + "os" "strings" "time" @@ -19,6 +21,7 @@ type HTTPResponse struct { Method string ResponseTimeout int Headers string + FollowRedirects bool } // Description returns the plugin Description @@ -28,8 +31,8 @@ func (h *HTTPResponse) Description() string { var sampleConfig = ` ## Server address (default http://localhost) - address = "https://github.com" - ## Set response_timeout (default 1 seconds) + address = "http://github.com" + ## Set response_timeout (default 10 seconds) response_timeout = 10 ## HTTP Method method = "GET" @@ -37,6 +40,8 @@ var sampleConfig = ` headers = ''' Host: github.com ''' + ## Whether to follow redirects from the server (defaults to false) + follow_redirects = true ` // SampleConfig returns the plugin SampleConfig @@ -44,6 +49,8 @@ func (h *HTTPResponse) SampleConfig() string { return sampleConfig } +var ErrRedirectAttempted = errors.New("redirect") + // HTTPGather gathers all fields and returns any errors it encounters func (h *HTTPResponse) HTTPGather() (map[string]interface{}, error) { // Prepare fields @@ -52,6 +59,14 @@ func (h *HTTPResponse) HTTPGather() (map[string]interface{}, error) { client := &http.Client{ Timeout: time.Second * time.Duration(h.ResponseTimeout), } + + if h.FollowRedirects == false { + fmt.Println(h.FollowRedirects) + client.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return ErrRedirectAttempted + } + } + request, err := http.NewRequest(h.Method, h.Address, nil) if err != nil { return nil, err @@ -66,9 +81,19 @@ func (h *HTTPResponse) HTTPGather() (map[string]interface{}, error) { request.Header = http.Header(mimeHeader) // Start Timer start := time.Now() + request.Write(os.Stdout) resp, err := client.Do(request) if err != nil { - return nil, err + if h.FollowRedirects { + return nil, err + } + if urlError, ok := err.(*url.Error); ok && + urlError.Err == ErrRedirectAttempted { + fmt.Println(err) + err = nil + } else { + return nil, err + } } fields["response_time"] = time.Since(start).Seconds() fields["http_response_code"] = resp.StatusCode @@ -79,7 +104,7 @@ func (h *HTTPResponse) HTTPGather() (map[string]interface{}, error) { func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error { // Set default values if h.ResponseTimeout < 1 { - h.ResponseTimeout = 1 + h.ResponseTimeout = 10 } // Check send and expected string if h.Method == "" { From 73a7916ce3f7b43005097ba591157f43fda45d2e Mon Sep 17 00:00:00 2001 From: Luke Swithenbank Date: Thu, 31 Mar 2016 22:06:47 +1100 Subject: [PATCH 262/287] take a request body as a param --- plugins/inputs/http_response/README.md | 6 +++++- plugins/inputs/http_response/http_response.go | 20 ++++++++++++------- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/plugins/inputs/http_response/README.md b/plugins/inputs/http_response/README.md index 99770e526..f2f45b2af 100644 --- a/plugins/inputs/http_response/README.md +++ b/plugins/inputs/http_response/README.md @@ -11,7 +11,7 @@ This input plugin will test HTTP/HTTPS connections. address = "http://github.com" ## Set response_timeout (default 10 seconds) response_timeout = 10 - ## HTTP Method + ## HTTP Request Method method = "GET" ## HTTP Request Headers headers = ''' @@ -19,6 +19,10 @@ This input plugin will test HTTP/HTTPS connections. ''' ## Whether to follow redirects from the server (defaults to false) follow_redirects = true + ## Optional HTTP Request Body + body = ''' + {'fake':'data'} + ''' ``` ### Measurements & Fields: diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index 09569fe73..dc4b2df60 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -3,11 +3,10 @@ package http_response import ( "bufio" "errors" - "fmt" + "io" "net/http" "net/textproto" "net/url" - "os" "strings" "time" @@ -18,6 +17,7 @@ import ( // HTTPResponse struct type HTTPResponse struct { Address string + Body string Method string ResponseTimeout int Headers string @@ -34,7 +34,7 @@ var sampleConfig = ` address = "http://github.com" ## Set response_timeout (default 10 seconds) response_timeout = 10 - ## HTTP Method + ## HTTP Request Method method = "GET" ## HTTP Request Headers headers = ''' @@ -42,6 +42,10 @@ var sampleConfig = ` ''' ## Whether to follow redirects from the server (defaults to false) follow_redirects = true + ## Optional HTTP Request Body + body = ''' + {'fake':'data'} + ''' ` // SampleConfig returns the plugin SampleConfig @@ -49,6 +53,7 @@ func (h *HTTPResponse) SampleConfig() string { return sampleConfig } +// ErrRedirectAttempted indicates that a redirect occurred var ErrRedirectAttempted = errors.New("redirect") // HTTPGather gathers all fields and returns any errors it encounters @@ -61,13 +66,16 @@ func (h *HTTPResponse) HTTPGather() (map[string]interface{}, error) { } if h.FollowRedirects == false { - fmt.Println(h.FollowRedirects) client.CheckRedirect = func(req *http.Request, via []*http.Request) error { return ErrRedirectAttempted } } - request, err := http.NewRequest(h.Method, h.Address, nil) + var body io.Reader + if h.Body != "" { + body = strings.NewReader(h.Body) + } + request, err := http.NewRequest(h.Method, h.Address, body) if err != nil { return nil, err } @@ -81,7 +89,6 @@ func (h *HTTPResponse) HTTPGather() (map[string]interface{}, error) { request.Header = http.Header(mimeHeader) // Start Timer start := time.Now() - request.Write(os.Stdout) resp, err := client.Do(request) if err != nil { if h.FollowRedirects { @@ -89,7 +96,6 @@ func (h *HTTPResponse) HTTPGather() (map[string]interface{}, error) { } if urlError, ok := err.(*url.Error); ok && urlError.Err == ErrRedirectAttempted { - fmt.Println(err) err = nil } else { return nil, err From 437bd87d7c4994052def798b5e21e8d9ee1f7c28 Mon Sep 17 00:00:00 2001 From: Luke Swithenbank Date: Mon, 4 Apr 2016 12:20:07 +1000 Subject: [PATCH 263/287] added tests and did some refactoring --- plugins/inputs/http_response/http_response.go | 56 ++-- .../http_response/http_response_test.go | 245 ++++++++++++++++++ 2 files changed, 281 insertions(+), 20 deletions(-) create mode 100644 plugins/inputs/http_response/http_response_test.go diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index dc4b2df60..cee33795a 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -40,12 +40,12 @@ var sampleConfig = ` headers = ''' Host: github.com ''' - ## Whether to follow redirects from the server (defaults to false) - follow_redirects = true - ## Optional HTTP Request Body - body = ''' - {'fake':'data'} - ''' + ## Whether to follow redirects from the server (defaults to false) + follow_redirects = true + ## Optional HTTP Request Body + body = ''' + {'fake':'data'} + ''' ` // SampleConfig returns the plugin SampleConfig @@ -56,20 +56,40 @@ func (h *HTTPResponse) SampleConfig() string { // ErrRedirectAttempted indicates that a redirect occurred var ErrRedirectAttempted = errors.New("redirect") +// CreateHttpClient creates an http client which will timeout at the specified +// timeout period and can follow redirects if specified +func CreateHttpClient(followRedirects bool, ResponseTimeout time.Duration) *http.Client { + client := &http.Client{ + Timeout: time.Second * ResponseTimeout, + } + + if followRedirects == false { + client.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return ErrRedirectAttempted + } + } + return client +} + +// ParseHeaders takes a string of newline seperated http headers and returns a +// http.Header object. An error is returned if the headers cannot be parsed. +func ParseHeaders(headers string) (http.Header, error) { + headers = strings.TrimSpace(headers) + "\n\n" + reader := bufio.NewReader(strings.NewReader(headers)) + tp := textproto.NewReader(reader) + mimeHeader, err := tp.ReadMIMEHeader() + if err != nil { + return nil, err + } + return http.Header(mimeHeader), nil +} + // HTTPGather gathers all fields and returns any errors it encounters func (h *HTTPResponse) HTTPGather() (map[string]interface{}, error) { // Prepare fields fields := make(map[string]interface{}) - client := &http.Client{ - Timeout: time.Second * time.Duration(h.ResponseTimeout), - } - - if h.FollowRedirects == false { - client.CheckRedirect = func(req *http.Request, via []*http.Request) error { - return ErrRedirectAttempted - } - } + client := CreateHttpClient(h.FollowRedirects, time.Duration(h.ResponseTimeout)) var body io.Reader if h.Body != "" { @@ -79,14 +99,10 @@ func (h *HTTPResponse) HTTPGather() (map[string]interface{}, error) { if err != nil { return nil, err } - h.Headers = strings.TrimSpace(h.Headers) + "\n\n" - reader := bufio.NewReader(strings.NewReader(h.Headers)) - tp := textproto.NewReader(reader) - mimeHeader, err := tp.ReadMIMEHeader() + request.Header, err = ParseHeaders(h.Headers) if err != nil { return nil, err } - request.Header = http.Header(mimeHeader) // Start Timer start := time.Now() resp, err := client.Do(request) diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go new file mode 100644 index 000000000..0f568e3b4 --- /dev/null +++ b/plugins/inputs/http_response/http_response_test.go @@ -0,0 +1,245 @@ +package http_response + +import ( + "fmt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + "time" +) + +func TestParseHeaders(t *testing.T) { + fakeHeaders := ` +Accept: text/plain +Content-Type: application/json +Cache-Control: no-cache +` + headers, err := ParseHeaders(fakeHeaders) + require.NoError(t, err) + testHeaders := make(http.Header) + testHeaders.Add("Accept", "text/plain") + testHeaders.Add("Content-Type", "application/json") + testHeaders.Add("Cache-Control", "no-cache") + assert.Equal(t, testHeaders, headers) + + headers, err = ParseHeaders("Accept text/plain") + require.Error(t, err) +} + +func setUpTestMux() http.Handler { + mux := http.NewServeMux() + mux.HandleFunc("/redirect", func(w http.ResponseWriter, req *http.Request) { + http.Redirect(w, req, "/good", http.StatusMovedPermanently) + }) + mux.HandleFunc("/good", func(w http.ResponseWriter, req *http.Request) { + fmt.Fprintf(w, "hit the good page!") + }) + mux.HandleFunc("/badredirect", func(w http.ResponseWriter, req *http.Request) { + http.Redirect(w, req, "/badredirect", http.StatusMovedPermanently) + }) + mux.HandleFunc("/mustbepostmethod", func(w http.ResponseWriter, req *http.Request) { + if req.Method != "POST" { + http.Error(w, "method wasn't post", http.StatusMethodNotAllowed) + return + } + fmt.Fprintf(w, "used post correctly!") + }) + mux.HandleFunc("/musthaveabody", func(w http.ResponseWriter, req *http.Request) { + body, err := ioutil.ReadAll(req.Body) + req.Body.Close() + if err != nil { + http.Error(w, "couldn't read request body", http.StatusBadRequest) + return + } + if string(body) == "" { + http.Error(w, "body was empty", http.StatusBadRequest) + return + } + fmt.Fprintf(w, "sent a body!") + }) + mux.HandleFunc("/twosecondnap", func(w http.ResponseWriter, req *http.Request) { + time.Sleep(time.Second * 2) + return + }) + return mux +} + +func TestFields(t *testing.T) { + mux := setUpTestMux() + ts := httptest.NewServer(mux) + defer ts.Close() + + h := &HTTPResponse{ + Address: ts.URL + "/good", + Body: "{ 'test': 'data'}", + Method: "GET", + ResponseTimeout: 20, + Headers: ` +Content-Type: application/json +`, + FollowRedirects: true, + } + fields, err := h.HTTPGather() + require.NoError(t, err) + assert.NotEmpty(t, fields) + if assert.NotNil(t, fields["http_response_code"]) { + assert.Equal(t, http.StatusOK, fields["http_response_code"]) + } + assert.NotNil(t, fields["response_time"]) + +} + +func TestRedirects(t *testing.T) { + mux := setUpTestMux() + ts := httptest.NewServer(mux) + defer ts.Close() + + h := &HTTPResponse{ + Address: ts.URL + "/redirect", + Body: "{ 'test': 'data'}", + Method: "GET", + ResponseTimeout: 20, + Headers: ` +Content-Type: application/json +`, + FollowRedirects: true, + } + fields, err := h.HTTPGather() + require.NoError(t, err) + assert.NotEmpty(t, fields) + if assert.NotNil(t, fields["http_response_code"]) { + assert.Equal(t, http.StatusOK, fields["http_response_code"]) + } + + h = &HTTPResponse{ + Address: ts.URL + "/badredirect", + Body: "{ 'test': 'data'}", + Method: "GET", + ResponseTimeout: 20, + Headers: ` +Content-Type: application/json +`, + FollowRedirects: true, + } + fields, err = h.HTTPGather() + require.Error(t, err) +} + +func TestMethod(t *testing.T) { + mux := setUpTestMux() + ts := httptest.NewServer(mux) + defer ts.Close() + + h := &HTTPResponse{ + Address: ts.URL + "/mustbepostmethod", + Body: "{ 'test': 'data'}", + Method: "POST", + ResponseTimeout: 20, + Headers: ` +Content-Type: application/json +`, + FollowRedirects: true, + } + fields, err := h.HTTPGather() + require.NoError(t, err) + assert.NotEmpty(t, fields) + if assert.NotNil(t, fields["http_response_code"]) { + assert.Equal(t, http.StatusOK, fields["http_response_code"]) + } + + h = &HTTPResponse{ + Address: ts.URL + "/mustbepostmethod", + Body: "{ 'test': 'data'}", + Method: "GET", + ResponseTimeout: 20, + Headers: ` +Content-Type: application/json +`, + FollowRedirects: true, + } + fields, err = h.HTTPGather() + require.NoError(t, err) + assert.NotEmpty(t, fields) + if assert.NotNil(t, fields["http_response_code"]) { + assert.Equal(t, http.StatusMethodNotAllowed, fields["http_response_code"]) + } + + //check that lowercase methods work correctly + h = &HTTPResponse{ + Address: ts.URL + "/mustbepostmethod", + Body: "{ 'test': 'data'}", + Method: "head", + ResponseTimeout: 20, + Headers: ` +Content-Type: application/json +`, + FollowRedirects: true, + } + fields, err = h.HTTPGather() + require.NoError(t, err) + assert.NotEmpty(t, fields) + if assert.NotNil(t, fields["http_response_code"]) { + assert.Equal(t, http.StatusMethodNotAllowed, fields["http_response_code"]) + } +} + +func TestBody(t *testing.T) { + mux := setUpTestMux() + ts := httptest.NewServer(mux) + defer ts.Close() + + h := &HTTPResponse{ + Address: ts.URL + "/musthaveabody", + Body: "{ 'test': 'data'}", + Method: "GET", + ResponseTimeout: 20, + Headers: ` +Content-Type: application/json +`, + FollowRedirects: true, + } + fields, err := h.HTTPGather() + require.NoError(t, err) + assert.NotEmpty(t, fields) + if assert.NotNil(t, fields["http_response_code"]) { + assert.Equal(t, http.StatusOK, fields["http_response_code"]) + } + + h = &HTTPResponse{ + Address: ts.URL + "/musthaveabody", + Method: "GET", + ResponseTimeout: 20, + Headers: ` +Content-Type: application/json +`, + FollowRedirects: true, + } + fields, err = h.HTTPGather() + require.NoError(t, err) + assert.NotEmpty(t, fields) + if assert.NotNil(t, fields["http_response_code"]) { + assert.Equal(t, http.StatusBadRequest, fields["http_response_code"]) + } +} + +func TestTimeout(t *testing.T) { + mux := setUpTestMux() + ts := httptest.NewServer(mux) + defer ts.Close() + + h := &HTTPResponse{ + Address: ts.URL + "/twosecondnap", + Body: "{ 'test': 'data'}", + Method: "GET", + ResponseTimeout: 1, + Headers: ` +Content-Type: application/json +`, + FollowRedirects: true, + } + _, err := h.HTTPGather() + require.Error(t, err) +} From 377b030d88d78a8046f7d1c155a240e7212d60a2 Mon Sep 17 00:00:00 2001 From: Luke Swithenbank Date: Thu, 7 Apr 2016 11:57:49 +1000 Subject: [PATCH 264/287] update to 5 second default and string map for headers --- plugins/inputs/http_response/README.md | 9 ++- plugins/inputs/http_response/http_response.go | 40 +++++------ .../http_response/http_response_test.go | 72 +++++++++---------- 3 files changed, 54 insertions(+), 67 deletions(-) diff --git a/plugins/inputs/http_response/README.md b/plugins/inputs/http_response/README.md index f2f45b2af..e2bf75b5f 100644 --- a/plugins/inputs/http_response/README.md +++ b/plugins/inputs/http_response/README.md @@ -9,14 +9,13 @@ This input plugin will test HTTP/HTTPS connections. [[inputs.http_response]] ## Server address (default http://localhost) address = "http://github.com" - ## Set response_timeout (default 10 seconds) - response_timeout = 10 + ## Set response_timeout (default 5 seconds) + response_timeout = 5 ## HTTP Request Method method = "GET" ## HTTP Request Headers - headers = ''' - Host: github.com - ''' + [inputs.http_response.headers] + Host = github.com ## Whether to follow redirects from the server (defaults to false) follow_redirects = true ## Optional HTTP Request Body diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index cee33795a..73533fed4 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -1,11 +1,9 @@ package http_response import ( - "bufio" "errors" "io" "net/http" - "net/textproto" "net/url" "strings" "time" @@ -20,7 +18,7 @@ type HTTPResponse struct { Body string Method string ResponseTimeout int - Headers string + Headers map[string]string FollowRedirects bool } @@ -32,14 +30,13 @@ func (h *HTTPResponse) Description() string { var sampleConfig = ` ## Server address (default http://localhost) address = "http://github.com" - ## Set response_timeout (default 10 seconds) - response_timeout = 10 + ## Set response_timeout (default 5 seconds) + response_timeout = 5 ## HTTP Request Method method = "GET" - ## HTTP Request Headers - headers = ''' - Host: github.com - ''' + ## HTTP Request Headers (all values must be strings) + [inputs.http_response.headers] + # Host = "github.com" ## Whether to follow redirects from the server (defaults to false) follow_redirects = true ## Optional HTTP Request Body @@ -71,17 +68,14 @@ func CreateHttpClient(followRedirects bool, ResponseTimeout time.Duration) *http return client } -// ParseHeaders takes a string of newline seperated http headers and returns a -// http.Header object. An error is returned if the headers cannot be parsed. -func ParseHeaders(headers string) (http.Header, error) { - headers = strings.TrimSpace(headers) + "\n\n" - reader := bufio.NewReader(strings.NewReader(headers)) - tp := textproto.NewReader(reader) - mimeHeader, err := tp.ReadMIMEHeader() - if err != nil { - return nil, err +// CreateHeaders takes a map of header strings and puts them +// into a http.Header Object +func CreateHeaders(headers map[string]string) http.Header { + httpHeaders := make(http.Header) + for key := range headers { + httpHeaders.Add(key, headers[key]) } - return http.Header(mimeHeader), nil + return httpHeaders } // HTTPGather gathers all fields and returns any errors it encounters @@ -99,10 +93,8 @@ func (h *HTTPResponse) HTTPGather() (map[string]interface{}, error) { if err != nil { return nil, err } - request.Header, err = ParseHeaders(h.Headers) - if err != nil { - return nil, err - } + request.Header = CreateHeaders(h.Headers) + // Start Timer start := time.Now() resp, err := client.Do(request) @@ -126,7 +118,7 @@ func (h *HTTPResponse) HTTPGather() (map[string]interface{}, error) { func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error { // Set default values if h.ResponseTimeout < 1 { - h.ResponseTimeout = 10 + h.ResponseTimeout = 5 } // Check send and expected string if h.Method == "" { diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 0f568e3b4..acdfeac75 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -11,22 +11,18 @@ import ( "time" ) -func TestParseHeaders(t *testing.T) { - fakeHeaders := ` -Accept: text/plain -Content-Type: application/json -Cache-Control: no-cache -` - headers, err := ParseHeaders(fakeHeaders) - require.NoError(t, err) +func TestCreateHeaders(t *testing.T) { + fakeHeaders := map[string]string{ + "Accept": "text/plain", + "Content-Type": "application/json", + "Cache-Control": "no-cache", + } + headers := CreateHeaders(fakeHeaders) testHeaders := make(http.Header) testHeaders.Add("Accept", "text/plain") testHeaders.Add("Content-Type", "application/json") testHeaders.Add("Cache-Control", "no-cache") assert.Equal(t, testHeaders, headers) - - headers, err = ParseHeaders("Accept text/plain") - require.Error(t, err) } func setUpTestMux() http.Handler { @@ -77,9 +73,9 @@ func TestFields(t *testing.T) { Body: "{ 'test': 'data'}", Method: "GET", ResponseTimeout: 20, - Headers: ` -Content-Type: application/json -`, + Headers: map[string]string{ + "Content-Type": "application/json", + }, FollowRedirects: true, } fields, err := h.HTTPGather() @@ -102,9 +98,9 @@ func TestRedirects(t *testing.T) { Body: "{ 'test': 'data'}", Method: "GET", ResponseTimeout: 20, - Headers: ` -Content-Type: application/json -`, + Headers: map[string]string{ + "Content-Type": "application/json", + }, FollowRedirects: true, } fields, err := h.HTTPGather() @@ -119,9 +115,9 @@ Content-Type: application/json Body: "{ 'test': 'data'}", Method: "GET", ResponseTimeout: 20, - Headers: ` -Content-Type: application/json -`, + Headers: map[string]string{ + "Content-Type": "application/json", + }, FollowRedirects: true, } fields, err = h.HTTPGather() @@ -138,9 +134,9 @@ func TestMethod(t *testing.T) { Body: "{ 'test': 'data'}", Method: "POST", ResponseTimeout: 20, - Headers: ` -Content-Type: application/json -`, + Headers: map[string]string{ + "Content-Type": "application/json", + }, FollowRedirects: true, } fields, err := h.HTTPGather() @@ -155,9 +151,9 @@ Content-Type: application/json Body: "{ 'test': 'data'}", Method: "GET", ResponseTimeout: 20, - Headers: ` -Content-Type: application/json -`, + Headers: map[string]string{ + "Content-Type": "application/json", + }, FollowRedirects: true, } fields, err = h.HTTPGather() @@ -173,9 +169,9 @@ Content-Type: application/json Body: "{ 'test': 'data'}", Method: "head", ResponseTimeout: 20, - Headers: ` -Content-Type: application/json -`, + Headers: map[string]string{ + "Content-Type": "application/json", + }, FollowRedirects: true, } fields, err = h.HTTPGather() @@ -196,9 +192,9 @@ func TestBody(t *testing.T) { Body: "{ 'test': 'data'}", Method: "GET", ResponseTimeout: 20, - Headers: ` -Content-Type: application/json -`, + Headers: map[string]string{ + "Content-Type": "application/json", + }, FollowRedirects: true, } fields, err := h.HTTPGather() @@ -212,9 +208,9 @@ Content-Type: application/json Address: ts.URL + "/musthaveabody", Method: "GET", ResponseTimeout: 20, - Headers: ` -Content-Type: application/json -`, + Headers: map[string]string{ + "Content-Type": "application/json", + }, FollowRedirects: true, } fields, err = h.HTTPGather() @@ -235,9 +231,9 @@ func TestTimeout(t *testing.T) { Body: "{ 'test': 'data'}", Method: "GET", ResponseTimeout: 1, - Headers: ` -Content-Type: application/json -`, + Headers: map[string]string{ + "Content-Type": "application/json", + }, FollowRedirects: true, } _, err := h.HTTPGather() From 90185dc6b369981e82ceedc844ce435a000fc094 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 7 Apr 2016 10:31:06 -0600 Subject: [PATCH 265/287] cleanup & comment http_response def config closes #332 --- CHANGELOG.md | 1 + README.md | 3 ++- etc/telegraf.conf | 19 +++++++++++++++++++ plugins/inputs/http_response/http_response.go | 12 ++++++------ 4 files changed, 28 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 09a00f069..699d0f602 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ - [#976](https://github.com/influxdata/telegraf/pull/976): Reduce allocations in the UDP and statsd inputs. - [#979](https://github.com/influxdata/telegraf/pull/979): Reduce allocations in the TCP listener. - [#935](https://github.com/influxdata/telegraf/pull/935): AWS Cloudwatch input plugin. Thanks @joshhardy & @ljosa! +- [#943](https://github.com/influxdata/telegraf/pull/943): http_response input plugin. Thanks @Lswith! ### Bugfixes - [#968](https://github.com/influxdata/telegraf/issues/968): Processes plugin gets unknown state when spaces are in (command name) diff --git a/README.md b/README.md index caa562a6d..8621238dd 100644 --- a/README.md +++ b/README.md @@ -169,7 +169,8 @@ Currently implemented sources: * [elasticsearch](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/elasticsearch) * [exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec ) (generic executable plugin, support JSON, influx, graphite and nagios) * [haproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/haproxy) -* [httpjson ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/httpjson ) (generic JSON-emitting http service plugin) +* [http_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_response) +* [httpjson](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/httpjson) (generic JSON-emitting http service plugin) * [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb) * [ipmi_sensor](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ipmi_sensor) * [jolokia](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 1b534d888..fa77a3a34 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -569,6 +569,25 @@ # ## servers = ["socket://run/haproxy/admin.sock"] +# # HTTP/HTTPS request given an address a method and a timeout +# [[inputs.http_response]] +# ## Server address (default http://localhost) +# address = "http://github.com" +# ## Set response_timeout (default 5 seconds) +# response_timeout = 5 +# ## HTTP Request Method +# method = "GET" +# ## Whether to follow redirects from the server (defaults to false) +# follow_redirects = true +# ## HTTP Request Headers (all values must be strings) +# # [inputs.http_response.headers] +# # Host = "github.com" +# ## Optional HTTP Request Body +# # body = ''' +# # {'fake':'data'} +# # ''' + + # # Read flattened metrics from one or more JSON HTTP endpoints # [[inputs.httpjson]] # ## NOTE This plugin only reads numerical measurements, strings and booleans diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index 73533fed4..69c8fcd06 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -34,15 +34,15 @@ var sampleConfig = ` response_timeout = 5 ## HTTP Request Method method = "GET" - ## HTTP Request Headers (all values must be strings) - [inputs.http_response.headers] - # Host = "github.com" ## Whether to follow redirects from the server (defaults to false) follow_redirects = true + ## HTTP Request Headers (all values must be strings) + # [inputs.http_response.headers] + # Host = "github.com" ## Optional HTTP Request Body - body = ''' - {'fake':'data'} - ''' + # body = ''' + # {'fake':'data'} + # ''' ` // SampleConfig returns the plugin SampleConfig From c4ea122d6679b72e7b0a4e66135438f2dffb4a0e Mon Sep 17 00:00:00 2001 From: Rene Zbinden Date: Tue, 29 Mar 2016 21:38:07 +0200 Subject: [PATCH 266/287] add sysstat plugin --- plugins/inputs/all/all.go | 1 + plugins/inputs/sysstat/README.md | 448 +++++++++++++++++++++++++ plugins/inputs/sysstat/sysstat.go | 315 +++++++++++++++++ plugins/inputs/sysstat/sysstat_test.go | 305 +++++++++++++++++ 4 files changed, 1069 insertions(+) create mode 100644 plugins/inputs/sysstat/README.md create mode 100644 plugins/inputs/sysstat/sysstat.go create mode 100644 plugins/inputs/sysstat/sysstat_test.go diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index b28291c24..2d784ca27 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -52,6 +52,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/snmp" _ "github.com/influxdata/telegraf/plugins/inputs/sqlserver" _ "github.com/influxdata/telegraf/plugins/inputs/statsd" + _ "github.com/influxdata/telegraf/plugins/inputs/sysstat" _ "github.com/influxdata/telegraf/plugins/inputs/system" _ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener" _ "github.com/influxdata/telegraf/plugins/inputs/trig" diff --git a/plugins/inputs/sysstat/README.md b/plugins/inputs/sysstat/README.md new file mode 100644 index 000000000..7191a83d3 --- /dev/null +++ b/plugins/inputs/sysstat/README.md @@ -0,0 +1,448 @@ +# sysstat Input Plugin + +Collect [sysstat](https://github.com/sysstat/sysstat) metrics - requires the sysstat +package installed. + +This plugin collects system metrics with the sysstat collector utility `sadc` and parses +the created binary data file with the `sadf` utility. + +### Configuration: + +```toml +# Sysstat metrics collector +[[inputs.sysstat]] + ## Collect interval in seconds. This value has to be equal + ## to the telegraf collect interval. + collect_interval = 30 # required + # + # + ## Path to the sadc command. + sadc_path = "/usr/lib/sa/sadc" # required + # + # + ## Path to the sadf command, if it is not in PATH + # sadf_path = "/usr/bin/sadf" + # + # + ## Activities is a list of activities, that are passed as argument to the + ## sadc collector utility (e.g: DISK, SNMP etc...) + ## The more activities that are added, the more data is collected. + # activities = ["DISK"] + # + # + ## Group metrics to measurements. + ## + ## If group is false each metric will be prefixed with a description + ## and represents itself a measurement. + ## + ## If Group is true, corresponding metrics are grouped to a single measurement. + # group = false + # + # + ## Options for the sadf command. The values on the left represent the sadf options and + ## the values on the right their description (wich are used for grouping and prefixing metrics). + [inputs.sysstat.options] + -C = "cpu" + -B = "paging" + -b = "io" + -d = "disk" # requires DISK activity + -H = "hugepages" + "-I ALL" = "interrupts" # requires INT activity + "-n ALL" = "network" + "-P ALL" = "per_cpu" + -q = "queue" + -R = "mem" + "-r ALL" = "mem_util" + -S = "swap_util" + -u = "cpu_util" + -v = "inode" + -W = "swap" + -w = "task" + # + ## Device tags can be used to add additional tags for devices. For example the configuration below + ## adds a tag vg=rootvg for all metrics with sda devices. + # [[inputs.sysstat.device_tags.sda]] + # vg = "rootvg" +``` + +### Measurements & Fields: +#### group=true +- cpu + - pct_idle (float) + - pct_iowait (float) + - pct_nice (float) + - pct_steal (float) + - pct_system (float) + - pct_user (float) + +- disk + - avgqu-sz (float) + - avgrq-sz (float) + - await (float) + - pct_util (float) + - rd_sec_pers (float) + - svctm (float) + - tps (float) + +And much more, depending on the options you configure. + +#### group=false +- cpu_pct_idle + - value (float) +- cpu_pct_iowait + - value (float) +- cpu_pct_nice + - value (float) +- cpu_pct_steal + - value (float) +- cpu_pct_system + - value (float) +- cpu_pct_user + - value (float) +- disk_avgqu-sz + - value (float) +- disk_avgrq-sz + - value (float) +- disk_await + - value (float) +- disk_pct_util + - value (float) +- disk_rd_sec_per_s + - value (float) +- disk_svctm + - value (float) +- disk_tps + - value (float) + +And much more, depending on the options you configure. + +### Tags: + +- All measurements have the following tags: + - device + +And more if you define some `device_tags`. +### Example Output: + +With the configuration below: +```toml +[[inputs.sysstat]] + collect_interval = 30 + sadc_path = "/usr/lib/sa/sadc" # required + activities = ["DISK", "SNMP", "INT"] + group = true + [inputs.sysstat.options] + -C = "cpu" + -B = "paging" + -b = "io" + -d = "disk" # requires DISK activity + -H = "hugepages" + "-I ALL" = "interrupts" # requires INT activity + "-n ALL" = "network" + "-P ALL" = "per_cpu" + -q = "queue" + -R = "mem" + "-r ALL" = "mem_util" + -S = "swap_util" + -u = "cpu_util" + -v = "inode" + -W = "swap" + -w = "task" + [[inputs.sysstat.device_tags.sda]] + vg = "rootvg" +``` + +you get the following output: +``` +$ telegraf -config telegraf.conf -input-filter sysstat -test +* Plugin: sysstat, Collection 1 +> cpu_util,device=all pct_idle=98.85,pct_iowait=0,pct_nice=0.38,pct_steal=0,pct_system=0.64,pct_user=0.13 1459255626657883725 +> swap pswpin_per_s=0,pswpout_per_s=0 1459255626658387650 +> per_cpu,device=cpu1 pct_idle=98.98,pct_iowait=0,pct_nice=0.26,pct_steal=0,pct_system=0.51,pct_user=0.26 1459255626659630437 +> per_cpu,device=all pct_idle=98.85,pct_iowait=0,pct_nice=0.38,pct_steal=0,pct_system=0.64,pct_user=0.13 1459255626659670744 +> per_cpu,device=cpu0 pct_idle=98.73,pct_iowait=0,pct_nice=0.76,pct_steal=0,pct_system=0.51,pct_user=0 1459255626659697515 +> hugepages kbhugfree=0,kbhugused=0,pct_hugused=0 1459255626660057517 +> network,device=lo coll_per_s=0,pct_ifutil=0,rxcmp_per_s=0,rxdrop_per_s=0,rxerr_per_s=0,rxfifo_per_s=0,rxfram_per_s=0,rxkB_per_s=0.81,rxmcst_per_s=0,rxpck_per_s=16,txcarr_per_s=0,txcmp_per_s=0,txdrop_per_s=0,txerr_per_s=0,txfifo_per_s=0,txkB_per_s=0.81,txpck_per_s=16 1459255626661197666 +> network access_per_s=0,active_per_s=0,asmf_per_s=0,asmok_per_s=0,asmrq_per_s=0,atmptf_per_s=0,badcall_per_s=0,call_per_s=0,estres_per_s=0,fragcrt_per_s=0,fragf_per_s=0,fragok_per_s=0,fwddgm_per_s=0,getatt_per_s=0,hit_per_s=0,iadrerr_per_s=0,iadrmk_per_s=0,iadrmkr_per_s=0,idel_per_s=16,idgm_per_s=0,idgmerr_per_s=0,idisc_per_s=0,idstunr_per_s=0,iech_per_s=0,iechr_per_s=0,ierr_per_s=0,ihdrerr_per_s=0,imsg_per_s=0,ip-frag=0,iparmpb_per_s=0,irec_per_s=16,iredir_per_s=0,iseg_per_s=16,isegerr_per_s=0,isrcq_per_s=0,itm_per_s=0,itmex_per_s=0,itmr_per_s=0,iukwnpr_per_s=0,miss_per_s=0,noport_per_s=0,oadrmk_per_s=0,oadrmkr_per_s=0,odgm_per_s=0,odisc_per_s=0,odstunr_per_s=0,oech_per_s=0,oechr_per_s=0,oerr_per_s=0,omsg_per_s=0,onort_per_s=0,oparmpb_per_s=0,oredir_per_s=0,orq_per_s=16,orsts_per_s=0,oseg_per_s=16,osrcq_per_s=0,otm_per_s=0,otmex_per_s=0,otmr_per_s=0,packet_per_s=0,passive_per_s=0,rawsck=0,read_per_s=0,retrans_per_s=0,saccess_per_s=0,scall_per_s=0,sgetatt_per_s=0,sread_per_s=0,swrite_per_s=0,tcp-tw=7,tcp_per_s=0,tcpsck=1543,totsck=4052,udp_per_s=0,udpsck=2,write_per_s=0 1459255626661381788 +> network,device=ens33 coll_per_s=0,pct_ifutil=0,rxcmp_per_s=0,rxdrop_per_s=0,rxerr_per_s=0,rxfifo_per_s=0,rxfram_per_s=0,rxkB_per_s=0,rxmcst_per_s=0,rxpck_per_s=0,txcarr_per_s=0,txcmp_per_s=0,txdrop_per_s=0,txerr_per_s=0,txfifo_per_s=0,txkB_per_s=0,txpck_per_s=0 1459255626661533072 +> disk,device=sda,vg=rootvg avgqu-sz=0.01,avgrq-sz=8.5,await=3.31,pct_util=0.1,rd_sec_per_s=0,svctm=0.25,tps=4,wr_sec_per_s=34 1459255626663974389 +> queue blocked=0,ldavg-1=1.61,ldavg-15=1.34,ldavg-5=1.67,plist-sz=1415,runq-sz=0 1459255626664159054 +> paging fault_per_s=0.25,majflt_per_s=0,pct_vmeff=0,pgfree_per_s=19,pgpgin_per_s=0,pgpgout_per_s=17,pgscand_per_s=0,pgscank_per_s=0,pgsteal_per_s=0 1459255626664304249 +> mem_util kbactive=2206568,kbanonpg=1472208,kbbuffers=118020,kbcached=1035252,kbcommit=8717200,kbdirty=156,kbinact=418912,kbkstack=24672,kbmemfree=1744868,kbmemused=3610272,kbpgtbl=87116,kbslab=233804,kbvmused=0,pct_commit=136.13,pct_memused=67.42 1459255626664554981 +> io bread_per_s=0,bwrtn_per_s=34,rtps=0,tps=4,wtps=4 1459255626664596198 +> inode dentunusd=235039,file-nr=17120,inode-nr=94505,pty-nr=14 1459255626664663693 +> interrupts,device=i000 intr_per_s=0 1459255626664800109 +> interrupts,device=i003 intr_per_s=0 1459255626665255145 +> interrupts,device=i004 intr_per_s=0 1459255626665281776 +> interrupts,device=i006 intr_per_s=0 1459255626665297416 +> interrupts,device=i007 intr_per_s=0 1459255626665321008 +> interrupts,device=i010 intr_per_s=0 1459255626665339413 +> interrupts,device=i012 intr_per_s=0 1459255626665361510 +> interrupts,device=i013 intr_per_s=0 1459255626665381327 +> interrupts,device=i015 intr_per_s=1 1459255626665397313 +> interrupts,device=i001 intr_per_s=0.25 1459255626665412985 +> interrupts,device=i002 intr_per_s=0 1459255626665430475 +> interrupts,device=i005 intr_per_s=0 1459255626665453944 +> interrupts,device=i008 intr_per_s=0 1459255626665470650 +> interrupts,device=i011 intr_per_s=0 1459255626665486069 +> interrupts,device=i009 intr_per_s=0 1459255626665502913 +> interrupts,device=i014 intr_per_s=0 1459255626665518152 +> task cswch_per_s=722.25,proc_per_s=0 1459255626665849646 +> cpu,device=all pct_idle=98.85,pct_iowait=0,pct_nice=0.38,pct_steal=0,pct_system=0.64,pct_user=0.13 1459255626666639715 +> mem bufpg_per_s=0,campg_per_s=1.75,frmpg_per_s=-8.25 1459255626666770205 +> swap_util kbswpcad=0,kbswpfree=1048572,kbswpused=0,pct_swpcad=0,pct_swpused=0 1459255626667313276 +``` + +If you change the group value to false like below: +```toml +[[inputs.sysstat]] + collect_interval = 30 + sadc_path = "/usr/lib/sa/sadc" # required + activities = ["DISK", "SNMP", "INT"] + group = false + [inputs.sysstat.options] + -C = "cpu" + -B = "paging" + -b = "io" + -d = "disk" # requires DISK activity + -H = "hugepages" + "-I ALL" = "interrupts" # requires INT activity + "-n ALL" = "network" + "-P ALL" = "per_cpu" + -q = "queue" + -R = "mem" + "-r ALL" = "mem_util" + -S = "swap_util" + -u = "cpu_util" + -v = "inode" + -W = "swap" + -w = "task" + [[inputs.sysstat.device_tags.sda]] + vg = "rootvg" +``` + +you get the following output: +``` +$ telegraf -config telegraf.conf -input-filter sysstat -test +* Plugin: sysstat, Collection 1 +> io_tps value=0.5 1459255780126025822 +> io_rtps value=0 1459255780126025822 +> io_wtps value=0.5 1459255780126025822 +> io_bread_per_s value=0 1459255780126025822 +> io_bwrtn_per_s value=38 1459255780126025822 +> cpu_util_pct_user,device=all value=39.07 1459255780126025822 +> cpu_util_pct_nice,device=all value=0 1459255780126025822 +> cpu_util_pct_system,device=all value=47.94 1459255780126025822 +> cpu_util_pct_iowait,device=all value=0 1459255780126025822 +> cpu_util_pct_steal,device=all value=0 1459255780126025822 +> cpu_util_pct_idle,device=all value=12.98 1459255780126025822 +> swap_pswpin_per_s value=0 1459255780126025822 +> cpu_pct_user,device=all value=39.07 1459255780126025822 +> cpu_pct_nice,device=all value=0 1459255780126025822 +> cpu_pct_system,device=all value=47.94 1459255780126025822 +> cpu_pct_iowait,device=all value=0 1459255780126025822 +> cpu_pct_steal,device=all value=0 1459255780126025822 +> cpu_pct_idle,device=all value=12.98 1459255780126025822 +> per_cpu_pct_user,device=all value=39.07 1459255780126025822 +> per_cpu_pct_nice,device=all value=0 1459255780126025822 +> per_cpu_pct_system,device=all value=47.94 1459255780126025822 +> per_cpu_pct_iowait,device=all value=0 1459255780126025822 +> per_cpu_pct_steal,device=all value=0 1459255780126025822 +> per_cpu_pct_idle,device=all value=12.98 1459255780126025822 +> per_cpu_pct_user,device=cpu0 value=33.5 1459255780126025822 +> per_cpu_pct_nice,device=cpu0 value=0 1459255780126025822 +> per_cpu_pct_system,device=cpu0 value=65.25 1459255780126025822 +> per_cpu_pct_iowait,device=cpu0 value=0 1459255780126025822 +> per_cpu_pct_steal,device=cpu0 value=0 1459255780126025822 +> per_cpu_pct_idle,device=cpu0 value=1.25 1459255780126025822 +> per_cpu_pct_user,device=cpu1 value=44.85 1459255780126025822 +> per_cpu_pct_nice,device=cpu1 value=0 1459255780126025822 +> per_cpu_pct_system,device=cpu1 value=29.55 1459255780126025822 +> per_cpu_pct_iowait,device=cpu1 value=0 1459255780126025822 +> per_cpu_pct_steal,device=cpu1 value=0 1459255780126025822 +> per_cpu_pct_idle,device=cpu1 value=25.59 1459255780126025822 +> hugepages_kbhugfree value=0 1459255780126025822 +> hugepages_kbhugused value=0 1459255780126025822 +> hugepages_pct_hugused value=0 1459255780126025822 +> interrupts_intr_per_s,device=i000 value=0 1459255780126025822 +> inode_dentunusd value=252876 1459255780126025822 +> mem_util_kbmemfree value=1613612 1459255780126025822 +> disk_tps,device=sda,vg=rootvg value=0.5 1459255780126025822 +> swap_pswpout_per_s value=0 1459255780126025822 +> network_rxpck_per_s,device=ens33 value=0 1459255780126025822 +> queue_runq-sz value=4 1459255780126025822 +> task_proc_per_s value=0 1459255780126025822 +> task_cswch_per_s value=2019 1459255780126025822 +> mem_frmpg_per_s value=0 1459255780126025822 +> mem_bufpg_per_s value=0.5 1459255780126025822 +> mem_campg_per_s value=1.25 1459255780126025822 +> interrupts_intr_per_s,device=i001 value=0 1459255780126025822 +> inode_file-nr value=19104 1459255780126025822 +> mem_util_kbmemused value=3741528 1459255780126025822 +> disk_rd_sec_per_s,device=sda,vg=rootvg value=0 1459255780126025822 +> network_txpck_per_s,device=ens33 value=0 1459255780126025822 +> queue_plist-sz value=1512 1459255780126025822 +> paging_pgpgin_per_s value=0 1459255780126025822 +> paging_pgpgout_per_s value=19 1459255780126025822 +> paging_fault_per_s value=0.25 1459255780126025822 +> paging_majflt_per_s value=0 1459255780126025822 +> paging_pgfree_per_s value=34.25 1459255780126025822 +> paging_pgscank_per_s value=0 1459255780126025822 +> paging_pgscand_per_s value=0 1459255780126025822 +> paging_pgsteal_per_s value=0 1459255780126025822 +> paging_pct_vmeff value=0 1459255780126025822 +> interrupts_intr_per_s,device=i002 value=0 1459255780126025822 +> interrupts_intr_per_s,device=i003 value=0 1459255780126025822 +> interrupts_intr_per_s,device=i004 value=0 1459255780126025822 +> interrupts_intr_per_s,device=i005 value=0 1459255780126025822 +> interrupts_intr_per_s,device=i006 value=0 1459255780126025822 +> interrupts_intr_per_s,device=i007 value=0 1459255780126025822 +> interrupts_intr_per_s,device=i008 value=0 1459255780126025822 +> interrupts_intr_per_s,device=i009 value=0 1459255780126025822 +> interrupts_intr_per_s,device=i010 value=0 1459255780126025822 +> interrupts_intr_per_s,device=i011 value=0 1459255780126025822 +> interrupts_intr_per_s,device=i012 value=0 1459255780126025822 +> interrupts_intr_per_s,device=i013 value=0 1459255780126025822 +> interrupts_intr_per_s,device=i014 value=0 1459255780126025822 +> interrupts_intr_per_s,device=i015 value=1 1459255780126025822 +> inode_inode-nr value=94709 1459255780126025822 +> inode_pty-nr value=14 1459255780126025822 +> mem_util_pct_memused value=69.87 1459255780126025822 +> mem_util_kbbuffers value=118252 1459255780126025822 +> mem_util_kbcached value=1045240 1459255780126025822 +> mem_util_kbcommit value=9628152 1459255780126025822 +> mem_util_pct_commit value=150.35 1459255780126025822 +> mem_util_kbactive value=2303752 1459255780126025822 +> mem_util_kbinact value=428340 1459255780126025822 +> mem_util_kbdirty value=104 1459255780126025822 +> mem_util_kbanonpg value=1568676 1459255780126025822 +> mem_util_kbslab value=240032 1459255780126025822 +> mem_util_kbkstack value=26224 1459255780126025822 +> mem_util_kbpgtbl value=98056 1459255780126025822 +> mem_util_kbvmused value=0 1459255780126025822 +> disk_wr_sec_per_s,device=sda,vg=rootvg value=38 1459255780126025822 +> disk_avgrq-sz,device=sda,vg=rootvg value=76 1459255780126025822 +> disk_avgqu-sz,device=sda,vg=rootvg value=0 1459255780126025822 +> disk_await,device=sda,vg=rootvg value=2 1459255780126025822 +> disk_svctm,device=sda,vg=rootvg value=2 1459255780126025822 +> disk_pct_util,device=sda,vg=rootvg value=0.1 1459255780126025822 +> network_rxkB_per_s,device=ens33 value=0 1459255780126025822 +> network_txkB_per_s,device=ens33 value=0 1459255780126025822 +> network_rxcmp_per_s,device=ens33 value=0 1459255780126025822 +> network_txcmp_per_s,device=ens33 value=0 1459255780126025822 +> network_rxmcst_per_s,device=ens33 value=0 1459255780126025822 +> network_pct_ifutil,device=ens33 value=0 1459255780126025822 +> network_rxpck_per_s,device=lo value=10.75 1459255780126025822 +> network_txpck_per_s,device=lo value=10.75 1459255780126025822 +> network_rxkB_per_s,device=lo value=0.77 1459255780126025822 +> network_txkB_per_s,device=lo value=0.77 1459255780126025822 +> network_rxcmp_per_s,device=lo value=0 1459255780126025822 +> network_txcmp_per_s,device=lo value=0 1459255780126025822 +> network_rxmcst_per_s,device=lo value=0 1459255780126025822 +> network_pct_ifutil,device=lo value=0 1459255780126025822 +> network_rxerr_per_s,device=ens33 value=0 1459255780126025822 +> network_txerr_per_s,device=ens33 value=0 1459255780126025822 +> network_coll_per_s,device=ens33 value=0 1459255780126025822 +> network_rxdrop_per_s,device=ens33 value=0 1459255780126025822 +> network_txdrop_per_s,device=ens33 value=0 1459255780126025822 +> network_txcarr_per_s,device=ens33 value=0 1459255780126025822 +> network_rxfram_per_s,device=ens33 value=0 1459255780126025822 +> network_rxfifo_per_s,device=ens33 value=0 1459255780126025822 +> network_txfifo_per_s,device=ens33 value=0 1459255780126025822 +> network_rxerr_per_s,device=lo value=0 1459255780126025822 +> network_txerr_per_s,device=lo value=0 1459255780126025822 +> network_coll_per_s,device=lo value=0 1459255780126025822 +> network_rxdrop_per_s,device=lo value=0 1459255780126025822 +> network_txdrop_per_s,device=lo value=0 1459255780126025822 +> network_txcarr_per_s,device=lo value=0 1459255780126025822 +> network_rxfram_per_s,device=lo value=0 1459255780126025822 +> network_rxfifo_per_s,device=lo value=0 1459255780126025822 +> network_txfifo_per_s,device=lo value=0 1459255780126025822 +> network_call_per_s value=0 1459255780126025822 +> network_retrans_per_s value=0 1459255780126025822 +> network_read_per_s value=0 1459255780126025822 +> network_write_per_s value=0 1459255780126025822 +> network_access_per_s value=0 1459255780126025822 +> network_getatt_per_s value=0 1459255780126025822 +> network_scall_per_s value=0 1459255780126025822 +> network_badcall_per_s value=0 1459255780126025822 +> network_packet_per_s value=0 1459255780126025822 +> network_udp_per_s value=0 1459255780126025822 +> network_tcp_per_s value=0 1459255780126025822 +> network_hit_per_s value=0 1459255780126025822 +> network_miss_per_s value=0 1459255780126025822 +> network_sread_per_s value=0 1459255780126025822 +> network_swrite_per_s value=0 1459255780126025822 +> network_saccess_per_s value=0 1459255780126025822 +> network_sgetatt_per_s value=0 1459255780126025822 +> network_totsck value=4234 1459255780126025822 +> network_tcpsck value=1637 1459255780126025822 +> network_udpsck value=2 1459255780126025822 +> network_rawsck value=0 1459255780126025822 +> network_ip-frag value=0 1459255780126025822 +> network_tcp-tw value=4 1459255780126025822 +> network_irec_per_s value=10.75 1459255780126025822 +> network_fwddgm_per_s value=0 1459255780126025822 +> network_idel_per_s value=10.75 1459255780126025822 +> network_orq_per_s value=10.75 1459255780126025822 +> network_asmrq_per_s value=0 1459255780126025822 +> network_asmok_per_s value=0 1459255780126025822 +> network_fragok_per_s value=0 1459255780126025822 +> network_fragcrt_per_s value=0 1459255780126025822 +> network_ihdrerr_per_s value=0 1459255780126025822 +> network_iadrerr_per_s value=0 1459255780126025822 +> network_iukwnpr_per_s value=0 1459255780126025822 +> network_idisc_per_s value=0 1459255780126025822 +> network_odisc_per_s value=0 1459255780126025822 +> network_onort_per_s value=0 1459255780126025822 +> network_asmf_per_s value=0 1459255780126025822 +> network_fragf_per_s value=0 1459255780126025822 +> network_imsg_per_s value=0 1459255780126025822 +> network_omsg_per_s value=0 1459255780126025822 +> network_iech_per_s value=0 1459255780126025822 +> network_iechr_per_s value=0 1459255780126025822 +> network_oech_per_s value=0 1459255780126025822 +> network_oechr_per_s value=0 1459255780126025822 +> network_itm_per_s value=0 1459255780126025822 +> network_itmr_per_s value=0 1459255780126025822 +> network_otm_per_s value=0 1459255780126025822 +> network_otmr_per_s value=0 1459255780126025822 +> network_iadrmk_per_s value=0 1459255780126025822 +> network_iadrmkr_per_s value=0 1459255780126025822 +> network_oadrmk_per_s value=0 1459255780126025822 +> network_oadrmkr_per_s value=0 1459255780126025822 +> network_ierr_per_s value=0 1459255780126025822 +> network_oerr_per_s value=0 1459255780126025822 +> network_idstunr_per_s value=0 1459255780126025822 +> network_odstunr_per_s value=0 1459255780126025822 +> network_itmex_per_s value=0 1459255780126025822 +> network_otmex_per_s value=0 1459255780126025822 +> network_iparmpb_per_s value=0 1459255780126025822 +> network_oparmpb_per_s value=0 1459255780126025822 +> network_isrcq_per_s value=0 1459255780126025822 +> network_osrcq_per_s value=0 1459255780126025822 +> network_iredir_per_s value=0 1459255780126025822 +> network_oredir_per_s value=0 1459255780126025822 +> network_active_per_s value=0 1459255780126025822 +> network_passive_per_s value=0 1459255780126025822 +> network_iseg_per_s value=10.75 1459255780126025822 +> network_oseg_per_s value=9.5 1459255780126025822 +> network_atmptf_per_s value=0 1459255780126025822 +> network_estres_per_s value=0 1459255780126025822 +> network_retrans_per_s value=1.5 1459255780126025822 +> network_isegerr_per_s value=0.25 1459255780126025822 +> network_orsts_per_s value=0 1459255780126025822 +> network_idgm_per_s value=0 1459255780126025822 +> network_odgm_per_s value=0 1459255780126025822 +> network_noport_per_s value=0 1459255780126025822 +> network_idgmerr_per_s value=0 1459255780126025822 +> queue_ldavg-1 value=2.1 1459255780126025822 +> queue_ldavg-5 value=1.82 1459255780126025822 +> queue_ldavg-15 value=1.44 1459255780126025822 +> queue_blocked value=0 1459255780126025822 +> swap_util_kbswpfree value=1048572 1459255780126025822 +> swap_util_kbswpused value=0 1459255780126025822 +> swap_util_pct_swpused value=0 1459255780126025822 +> swap_util_kbswpcad value=0 1459255780126025822 +> swap_util_pct_swpcad value=0 1459255780126025822 +``` diff --git a/plugins/inputs/sysstat/sysstat.go b/plugins/inputs/sysstat/sysstat.go new file mode 100644 index 000000000..2b605b3ad --- /dev/null +++ b/plugins/inputs/sysstat/sysstat.go @@ -0,0 +1,315 @@ +// build +linux + +package sysstat + +import ( + "bufio" + "encoding/csv" + "errors" + "fmt" + "io" + "os" + "os/exec" + "path" + "strconv" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +var ( + execCommand = exec.Command // execCommand is used to mock commands in tests. + dfltActivities = []string{"DISK"} +) + +const parseInterval = 1 // parseInterval is the interval (in seconds) where the parsing takes place. + +type Sysstat struct { + // Interval that defines how long data is collected by Sadc cmd. + // + // This value has to be the same as the thelegraf collection interval. + Interval int `toml:"collect_interval"` + + // Sadc represents the path to the sadc collector utility. + Sadc string `toml:"sadc_path"` + + // Sadf represents the path to the sadf cmd. + Sadf string `toml:"sadf_path"` + + // Activities is a list of activities that are passed as argument to the + // collector utility (e.g: DISK, SNMP etc...) + // The more activities that are added, the more data is collected. + Activities []string + + // Options is a map of options. + // + // The key represents the actual option that the Sadf command is called with and + // the value represents the description for that option. + // + // For example, if you have the following options map: + // map[string]string{"-C": "cpu", "-d": "disk"} + // The Sadf command is run with the options -C and -d to extract cpu and + // disk metrics from the collected binary file. + // + // If Group is false (see below), each metric will be prefixed with the corresponding description + // and represents itself a measurement. + // + // If Group is true, metrics are grouped to a single measurement with the corresponding description as name. + Options map[string]string + + // Group determines if metrics are grouped or not. + Group bool + + // DeviceTags adds the possibility to add additional tags for devices. + DeviceTags map[string][]map[string]string `toml:"device_tags"` + tmpFile string +} + +func (*Sysstat) Description() string { + return "Sysstat metrics collector" +} + +var sampleConfig = ` + ## Collect interval in seconds. This value has to be equal + ## to the telegraf collect interval. + collect_interval = 5 # required + # + # + ## Path to the sadc command. + sadc_path = "/usr/lib/sa/sadc" # required + # + # + ## Path to the sadf command, if it is not in PATH + # sadf_path = "/usr/bin/sadf" + # + # + ## Activities is a list of activities, that are passed as argument to the + ## sadc collector utility (e.g: DISK, SNMP etc...) + ## The more activities that are added, the more data is collected. + # activities = ["DISK"] + # + # + ## Group metrics to measurements. + ## + ## If group is false each metric will be prefixed with a description + ## and represents itself a measurement. + ## + ## If Group is true, corresponding metrics are grouped to a single measurement. + # group = false + # + # + ## Options for the sasf command. The values on the left represent the sadf options and + ## the values on the right their description (wich are used for grouping and prefixing metrics). + [inputs.sysstat.options] + -C = "cpu" + -B = "paging" + -b = "io" + -d = "disk" # requires DISK activity + -H = "hugepages" + "-I ALL" = "interrupts" # requires INT activity + "-n ALL" = "network" + "-P ALL" = "per_cpu" + -q = "queue" + -R = "mem" + "-r ALL" = "mem_util" + -S = "swap_util" + -u = "cpu_util" + -v = "inode" + -W = "swap" + -w = "task" + # + # + ## Device tags can be used to add additional tags for devices. For example the configuration below + ## adds a tag vg with value rootvg for all metrics with sda devices. + # [[inputs.sysstat.device_tags.sda]] + # vg = "rootvg" +` + +func (*Sysstat) SampleConfig() string { + return sampleConfig +} + +func (s *Sysstat) Gather(acc telegraf.Accumulator) error { + ts := time.Now().Add(time.Duration(s.Interval) * time.Second) + if err := s.collect(); err != nil { + return err + } + var wg sync.WaitGroup + errorChannel := make(chan error, len(s.Options)*2) + for option := range s.Options { + wg.Add(1) + go func(acc telegraf.Accumulator, option string) { + defer wg.Done() + if err := s.parse(acc, option, ts); err != nil { + errorChannel <- err + } + }(acc, option) + } + wg.Wait() + close(errorChannel) + + errorStrings := []string{} + for err := range errorChannel { + errorStrings = append(errorStrings, err.Error()) + } + + if _, err := os.Stat(s.tmpFile); err == nil { + if err := os.Remove(s.tmpFile); err != nil { + errorStrings = append(errorStrings, err.Error()) + } + } + + if len(errorStrings) == 0 { + return nil + } + return errors.New(strings.Join(errorStrings, "\n")) +} + +// collect collects sysstat data with the collector utility sadc. It runs the following command: +// Sadc -S -S ... 2 tmpFile +// The above command collects system metrics during and saves it in binary form to tmpFile. +func (s *Sysstat) collect() error { + if len(s.Activities) == 0 { + s.Activities = dfltActivities + } + options := []string{} + for _, act := range s.Activities { + options = append(options, "-S", act) + } + s.tmpFile = path.Join("/tmp", fmt.Sprintf("sysstat-%d", time.Now().Unix())) + collectInterval := s.Interval - parseInterval // collectInterval has to be smaller than the telegraf data collection interval + options = append(options, strconv.Itoa(collectInterval), "2", s.tmpFile) + cmd := execCommand(s.Sadc, options...) + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to run command %s: %s", strings.Join(cmd.Args, " "), string(out)) + } + return nil +} + +// parse runs Sadf on the previously saved tmpFile: +// Sadf -p -- -p