From 12db3b91208415364cc1649844e77143a1221a1f Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 13 Dec 2016 12:24:39 +0000 Subject: [PATCH 0001/1302] Check if metric is nil before calling SetAggregate fixes #2146 --- CHANGELOG.md | 3 ++- internal/models/running_aggregator.go | 4 +++- plugins/inputs/logparser/README.md | 6 ++++-- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c3d3ef5f5..6f5ca5d3b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,7 +19,8 @@ - [#2049](https://github.com/influxdata/telegraf/pull/2049): Fix the Value data format not trimming null characters from input. - [#1949](https://github.com/influxdata/telegraf/issues/1949): Fix windows `net` plugin. -- [#1775](https://github.com/influxdata/telegraf/issues/1775): Cache & expire metrics for delivery to prometheus +- [#1775](https://github.com/influxdata/telegraf/issues/1775): Cache & expire metrics for delivery to prometheus. +- [#2146](https://github.com/influxdata/telegraf/issues/2146): Fix potential panic in aggregator plugin metric maker. ## v1.1.2 [2016-12-12] diff --git a/internal/models/running_aggregator.go b/internal/models/running_aggregator.go index 2e22f1569..8189a6667 100644 --- a/internal/models/running_aggregator.go +++ b/internal/models/running_aggregator.go @@ -70,7 +70,9 @@ func (r *RunningAggregator) MakeMetric( t, ) - m.SetAggregate(true) + if m != nil { + m.SetAggregate(true) + } return m } diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index 1affcd811..5973d9f42 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -40,8 +40,11 @@ regex patterns. ## Grok Parser The grok parser uses a slightly modified version of logstash "grok" patterns, -with the format `%{[:][:]}` +with the format +``` +%{[:][:]} +``` Telegraf has many of it's own [built-in patterns](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/logparser/grok/patterns/influx-patterns), @@ -92,4 +95,3 @@ Timestamp modifiers can be used to convert captures to the timestamp of the CUSTOM time layouts must be within quotes and be the representation of the "reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006` See https://golang.org/pkg/time/#Parse for more details. - From fc9f921b629bc582cda6c71dbc34b07bc5bd1e3f Mon Sep 17 00:00:00 2001 From: Rikaard Hosein Date: Mon, 3 Oct 2016 20:24:18 -0400 Subject: [PATCH 0002/1302] Can turn pid into tag instead of field closes #1843 fixes #1668 --- CHANGELOG.md | 1 + plugins/inputs/procstat/procstat.go | 6 ++++++ plugins/inputs/procstat/spec_processor.go | 7 ++++++- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6f5ca5d3b..1de19eca0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ - [#1949](https://github.com/influxdata/telegraf/issues/1949): Fix windows `net` plugin. - [#1775](https://github.com/influxdata/telegraf/issues/1775): Cache & expire metrics for delivery to prometheus. - [#2146](https://github.com/influxdata/telegraf/issues/2146): Fix potential panic in aggregator plugin metric maker. +- [#1843](https://github.com/influxdata/telegraf/pull/1843) & [#1668](https://github.com/influxdata/telegraf/issues/1668): Add optional ability to define PID as a tag. ## v1.1.2 [2016-12-12] diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index e29b5031c..929490e4a 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -21,6 +21,7 @@ type Procstat struct { Prefix string ProcessName string User string + PidTag bool // pidmap maps a pid to a process object, so we don't recreate every gather pidmap map[int32]*process.Process @@ -53,6 +54,8 @@ var sampleConfig = ` prefix = "" ## comment this out if you want raw cpu_time stats fielddrop = ["cpu_time_*"] + ## This is optional; moves pid into a tag instead of a field + pid_tag = false ` func (_ *Procstat) SampleConfig() string { @@ -70,6 +73,9 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { p.Exe, p.PidFile, p.Pattern, p.User, err.Error()) } else { for pid, proc := range p.pidmap { + if p.PidTag { + p.tagmap[pid]["pid"] = fmt.Sprint(pid) + } p := NewSpecProcessor(p.ProcessName, p.Prefix, pid, acc, proc, p.tagmap[pid]) p.pushMetrics() } diff --git a/plugins/inputs/procstat/spec_processor.go b/plugins/inputs/procstat/spec_processor.go index 5143d8bcc..3b56fbc3e 100644 --- a/plugins/inputs/procstat/spec_processor.go +++ b/plugins/inputs/procstat/spec_processor.go @@ -48,7 +48,12 @@ func (p *SpecProcessor) pushMetrics() { if p.Prefix != "" { prefix = p.Prefix + "_" } - fields := map[string]interface{}{"pid": p.pid} + fields := map[string]interface{}{} + + //If pid is not present as a tag, include it as a field. + if _, pidInTags := p.tags["pid"]; !pidInTags { + fields["pid"] = p.pid + } numThreads, err := p.proc.NumThreads() if err == nil { From 6e241611beffee9e4bed152f8eb505b2ce48c38f Mon Sep 17 00:00:00 2001 From: Leon Barrett Date: Tue, 13 Dec 2016 06:13:53 -0800 Subject: [PATCH 0003/1302] Fix bug: too many cloudwatch metrics (#1885) * Fix bug: too many cloudwatch metrics Cloudwatch metrics were being added incorrectly. The most obvious symptom of this was that too many metrics were being added. A simple check against the name of the metric proved to be a sufficient fix. In order to test the fix, a metric selection function was factored out. * Go fmt cloudwatch * Cloudwatch isSelected checks metric name * Move cloudwatch line in changelog to 1.2 features --- CHANGELOG.md | 1 + plugins/inputs/cloudwatch/cloudwatch.go | 29 ++++-- plugins/inputs/cloudwatch/cloudwatch_test.go | 96 +++++++++++++++++++- 3 files changed, 112 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1de19eca0..a7b4cf048 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ - [#2127](https://github.com/influxdata/telegraf/pull/2127): Update Go version to 1.7.4. - [#2126](https://github.com/influxdata/telegraf/pull/2126): Support a metric.Split function. - [#2026](https://github.com/influxdata/telegraf/pull/2065): elasticsearch "shield" (basic auth) support doc. +- [#1885](https://github.com/influxdata/telegraf/pull/1885): Fix over-querying of cloudwatch metrics ### Bugfixes diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index bc8de313e..a812c1265 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -126,11 +126,7 @@ func (c *CloudWatch) Description() string { return "Pull Metric Statistics from Amazon CloudWatch" } -func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { - if c.client == nil { - c.initializeCloudWatch() - } - +func SelectMetrics(c *CloudWatch) ([]*cloudwatch.Metric, error) { var metrics []*cloudwatch.Metric // check for provided metric filter @@ -155,11 +151,11 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { } else { allMetrics, err := c.fetchNamespaceMetrics() if err != nil { - return err + return nil, err } for _, name := range m.MetricNames { for _, metric := range allMetrics { - if isSelected(metric, m.Dimensions) { + if isSelected(name, metric, m.Dimensions) { metrics = append(metrics, &cloudwatch.Metric{ Namespace: aws.String(c.Namespace), MetricName: aws.String(name), @@ -169,16 +165,26 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { } } } - } } else { var err error metrics, err = c.fetchNamespaceMetrics() if err != nil { - return err + return nil, err } } + return metrics, nil +} +func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { + if c.client == nil { + c.initializeCloudWatch() + } + + metrics, err := SelectMetrics(c) + if err != nil { + return err + } metricCount := len(metrics) errChan := errchan.New(metricCount) @@ -380,7 +386,10 @@ func hasWilcard(dimensions []*Dimension) bool { return false } -func isSelected(metric *cloudwatch.Metric, dimensions []*Dimension) bool { +func isSelected(name string, metric *cloudwatch.Metric, dimensions []*Dimension) bool { + if name != *metric.MetricName { + return false + } if len(metric.Dimensions) != len(dimensions) { return false } diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go index 73fca9253..a1bd7464b 100644 --- a/plugins/inputs/cloudwatch/cloudwatch_test.go +++ b/plugins/inputs/cloudwatch/cloudwatch_test.go @@ -11,9 +11,9 @@ import ( "github.com/stretchr/testify/assert" ) -type mockCloudWatchClient struct{} +type mockGatherCloudWatchClient struct{} -func (m *mockCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) { +func (m *mockGatherCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) { metric := &cloudwatch.Metric{ Namespace: params.Namespace, MetricName: aws.String("Latency"), @@ -31,7 +31,7 @@ func (m *mockCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) return result, nil } -func (m *mockCloudWatchClient) GetMetricStatistics(params *cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) { +func (m *mockGatherCloudWatchClient) GetMetricStatistics(params *cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) { dataPoint := &cloudwatch.Datapoint{ Timestamp: params.EndTime, Minimum: aws.Float64(0.1), @@ -62,7 +62,7 @@ func TestGather(t *testing.T) { } var acc testutil.Accumulator - c.client = &mockCloudWatchClient{} + c.client = &mockGatherCloudWatchClient{} c.Gather(&acc) @@ -83,6 +83,94 @@ func TestGather(t *testing.T) { } +type mockSelectMetricsCloudWatchClient struct{} + +func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) { + metrics := []*cloudwatch.Metric{} + // 4 metrics are available + metricNames := []string{"Latency", "RequestCount", "HealthyHostCount", "UnHealthyHostCount"} + // for 3 ELBs + loadBalancers := []string{"lb-1", "lb-2", "lb-3"} + // in 2 AZs + availabilityZones := []string{"us-east-1a", "us-east-1b"} + for _, m := range metricNames { + for _, lb := range loadBalancers { + // For each metric/ELB pair, we get an aggregate value across all AZs. + metrics = append(metrics, &cloudwatch.Metric{ + Namespace: aws.String("AWS/ELB"), + MetricName: aws.String(m), + Dimensions: []*cloudwatch.Dimension{ + &cloudwatch.Dimension{ + Name: aws.String("LoadBalancerName"), + Value: aws.String(lb), + }, + }, + }) + for _, az := range availabilityZones { + // We get a metric for each metric/ELB/AZ triplet. + metrics = append(metrics, &cloudwatch.Metric{ + Namespace: aws.String("AWS/ELB"), + MetricName: aws.String(m), + Dimensions: []*cloudwatch.Dimension{ + &cloudwatch.Dimension{ + Name: aws.String("LoadBalancerName"), + Value: aws.String(lb), + }, + &cloudwatch.Dimension{ + Name: aws.String("AvailabilityZone"), + Value: aws.String(az), + }, + }, + }) + } + } + } + + result := &cloudwatch.ListMetricsOutput{ + Metrics: metrics, + } + return result, nil +} + +func (m *mockSelectMetricsCloudWatchClient) GetMetricStatistics(params *cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) { + return nil, nil +} + +func TestSelectMetrics(t *testing.T) { + duration, _ := time.ParseDuration("1m") + internalDuration := internal.Duration{ + Duration: duration, + } + c := &CloudWatch{ + Region: "us-east-1", + Namespace: "AWS/ELB", + Delay: internalDuration, + Period: internalDuration, + RateLimit: 10, + Metrics: []*Metric{ + &Metric{ + MetricNames: []string{"Latency", "RequestCount"}, + Dimensions: []*Dimension{ + &Dimension{ + Name: "LoadBalancerName", + Value: "*", + }, + &Dimension{ + Name: "AvailabilityZone", + Value: "*", + }, + }, + }, + }, + } + c.client = &mockSelectMetricsCloudWatchClient{} + metrics, err := SelectMetrics(c) + // We've asked for 2 (out of 4) metrics, over all 3 load balancers in all 2 + // AZs. We should get 12 metrics. + assert.Equal(t, 12, len(metrics)) + assert.Nil(t, err) +} + func TestGenerateStatisticsInputParams(t *testing.T) { d := &cloudwatch.Dimension{ Name: aws.String("LoadBalancerName"), From 755808187398792533eebd394b99756bb878ee2d Mon Sep 17 00:00:00 2001 From: Anthony Arnaud Date: Tue, 13 Dec 2016 09:15:51 -0500 Subject: [PATCH 0004/1302] Output openTSDB HTTPS with basic auth (#1913) --- plugins/outputs/opentsdb/opentsdb.go | 4 +++- plugins/outputs/opentsdb/opentsdb_http.go | 5 ++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/opentsdb/opentsdb.go b/plugins/outputs/opentsdb/opentsdb.go index 232edc0b7..ce797e10f 100644 --- a/plugins/outputs/opentsdb/opentsdb.go +++ b/plugins/outputs/opentsdb/opentsdb.go @@ -90,7 +90,7 @@ func (o *OpenTSDB) Write(metrics []telegraf.Metric) error { if u.Scheme == "" || u.Scheme == "tcp" { return o.WriteTelnet(metrics, u) - } else if u.Scheme == "http" { + } else if u.Scheme == "http" || u.Scheme == "https" { return o.WriteHttp(metrics, u) } else { return fmt.Errorf("Unknown scheme in host parameter.") @@ -101,6 +101,8 @@ func (o *OpenTSDB) WriteHttp(metrics []telegraf.Metric, u *url.URL) error { http := openTSDBHttp{ Host: u.Host, Port: o.Port, + Scheme: u.Scheme, + User: u.User, BatchSize: o.HttpBatchSize, Debug: o.Debug, } diff --git a/plugins/outputs/opentsdb/opentsdb_http.go b/plugins/outputs/opentsdb/opentsdb_http.go index e0a7a66ef..912ca670a 100644 --- a/plugins/outputs/opentsdb/opentsdb_http.go +++ b/plugins/outputs/opentsdb/opentsdb_http.go @@ -23,6 +23,8 @@ type HttpMetric struct { type openTSDBHttp struct { Host string Port int + Scheme string + User *url.Userinfo BatchSize int Debug bool @@ -118,7 +120,8 @@ func (o *openTSDBHttp) flush() error { o.body.close() u := url.URL{ - Scheme: "http", + Scheme: o.Scheme, + User: o.User, Host: fmt.Sprintf("%s:%d", o.Host, o.Port), Path: "/api/put", } From dede3e70add8a4f2249ddb8e1779a383165bcf3b Mon Sep 17 00:00:00 2001 From: Kishore Nallan Date: Tue, 13 Dec 2016 19:47:20 +0530 Subject: [PATCH 0005/1302] Rabbitmq plugin: connection-related metrics. (#1908) * Rabbitmq plugin: connection-related metrics. * Run go fmt. --- plugins/inputs/rabbitmq/rabbitmq.go | 69 ++++++++++++++++++++++-- plugins/inputs/rabbitmq/rabbitmq_test.go | 64 ++++++++++++++++++++++ 2 files changed, 130 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index 5519ee14a..d1c973dea 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -50,8 +50,9 @@ type RabbitMQ struct { ClientTimeout internal.Duration `toml:"client_timeout"` // InsecureSkipVerify bool - Nodes []string - Queues []string + Nodes []string + Queues []string + Connections []string Client *http.Client } @@ -135,10 +136,22 @@ type Node struct { SocketsUsed int64 `json:"sockets_used"` } +// Connection ... +type Connection struct { + Name string + State string + Vhost string + Host string + Node string + ReceiveCount int64 `json:"recv_cnt"` + SendCount int64 `json:"send_cnt"` + SendPend int64 `json:"send_pend"` +} + // gatherFunc ... type gatherFunc func(r *RabbitMQ, acc telegraf.Accumulator, errChan chan error) -var gatherFunctions = []gatherFunc{gatherOverview, gatherNodes, gatherQueues} +var gatherFunctions = []gatherFunc{gatherOverview, gatherNodes, gatherQueues, gatherConnections} var sampleConfig = ` # url = "http://localhost:15672" @@ -380,6 +393,42 @@ func gatherQueues(r *RabbitMQ, acc telegraf.Accumulator, errChan chan error) { errChan <- nil } +func gatherConnections(r *RabbitMQ, acc telegraf.Accumulator, errChan chan error) { + // Gather information about connections + connections := make([]Connection, 0) + err := r.requestJSON("/api/connections", &connections) + if err != nil { + errChan <- err + return + } + + for _, connection := range connections { + if !r.shouldGatherConnection(connection) { + continue + } + tags := map[string]string{ + "url": r.URL, + "connection": connection.Name, + "vhost": connection.Vhost, + "host": connection.Host, + "node": connection.Node, + } + + acc.AddFields( + "rabbitmq_connection", + map[string]interface{}{ + "recv_cnt": connection.ReceiveCount, + "send_cnt": connection.SendCount, + "send_pend": connection.SendPend, + "state": connection.State, + }, + tags, + ) + } + + errChan <- nil +} + func (r *RabbitMQ) shouldGatherNode(node Node) bool { if len(r.Nodes) == 0 { return true @@ -408,6 +457,20 @@ func (r *RabbitMQ) shouldGatherQueue(queue Queue) bool { return false } +func (r *RabbitMQ) shouldGatherConnection(connection Connection) bool { + if len(r.Connections) == 0 { + return true + } + + for _, name := range r.Connections { + if name == connection.Name { + return true + } + } + + return false +} + func init() { inputs.Add("rabbitmq", func() telegraf.Input { return &RabbitMQ{ diff --git a/plugins/inputs/rabbitmq/rabbitmq_test.go b/plugins/inputs/rabbitmq/rabbitmq_test.go index 4bdc980db..bbb3dd450 100644 --- a/plugins/inputs/rabbitmq/rabbitmq_test.go +++ b/plugins/inputs/rabbitmq/rabbitmq_test.go @@ -374,6 +374,57 @@ const sampleQueuesResponse = ` ] ` +const sampleConnectionsResponse = ` +[ + { + "recv_oct": 166055, + "recv_oct_details": { + "rate": 0 + }, + "send_oct": 589, + "send_oct_details": { + "rate": 0 + }, + "recv_cnt": 124, + "send_cnt": 7, + "send_pend": 0, + "state": "running", + "channels": 1, + "type": "network", + "node": "rabbit@ip-10-0-12-133", + "name": "10.0.10.8:32774 -> 10.0.12.131:5672", + "port": 5672, + "peer_port": 32774, + "host": "10.0.12.131", + "peer_host": "10.0.10.8", + "ssl": false, + "peer_cert_subject": null, + "peer_cert_issuer": null, + "peer_cert_validity": null, + "auth_mechanism": "AMQPLAIN", + "ssl_protocol": null, + "ssl_key_exchange": null, + "ssl_cipher": null, + "ssl_hash": null, + "protocol": "AMQP 0-9-1", + "user": "workers", + "vhost": "main", + "timeout": 0, + "frame_max": 131072, + "channel_max": 65535, + "client_properties": { + "product": "py-amqp", + "product_version": "1.4.7", + "capabilities": { + "connection.blocked": true, + "consumer_cancel_notify": true + } + }, + "connected_at": 1476647837266 + } +] +` + func TestRabbitMQGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var rsp string @@ -385,6 +436,8 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) { rsp = sampleNodesResponse case "/api/queues": rsp = sampleQueuesResponse + case "/api/connections": + rsp = sampleConnectionsResponse default: panic("Cannot handle request") } @@ -441,4 +494,15 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) { } assert.True(t, acc.HasMeasurement("rabbitmq_queue")) + + assert.True(t, acc.HasMeasurement("rabbitmq_connection")) + + connection_fields := map[string]interface{}{ + "recv_cnt": int64(124), + "send_cnt": int64(7), + "send_pend": int64(0), + "state": "running", + } + + acc.AssertContainsFields(t, "rabbitmq_connection", connection_fields) } From 8e3fbaa9dd74600873ecb7e1956de402df4061c1 Mon Sep 17 00:00:00 2001 From: krise3k Date: Tue, 13 Dec 2016 15:23:18 +0100 Subject: [PATCH 0006/1302] Add missing slim (#1937) --- plugins/inputs/haproxy/haproxy.go | 5 +++++ plugins/inputs/haproxy/haproxy_test.go | 5 +++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/haproxy/haproxy.go b/plugins/inputs/haproxy/haproxy.go index 6b42a0705..c764a5530 100644 --- a/plugins/inputs/haproxy/haproxy.go +++ b/plugins/inputs/haproxy/haproxy.go @@ -263,6 +263,11 @@ func importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error { if err == nil { fields["smax"] = ival } + case HF_SLIM: + ival, err := strconv.ParseUint(v, 10, 64) + if err == nil { + fields["slim"] = ival + } case HF_STOT: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { diff --git a/plugins/inputs/haproxy/haproxy_test.go b/plugins/inputs/haproxy/haproxy_test.go index ae71ad76c..12be2ed88 100644 --- a/plugins/inputs/haproxy/haproxy_test.go +++ b/plugins/inputs/haproxy/haproxy_test.go @@ -198,6 +198,7 @@ func HaproxyGetFieldValues() map[string]interface{} { "rtime": uint64(312), "scur": uint64(1), "smax": uint64(32), + "slim": uint64(32), "srv_abort": uint64(1), "stot": uint64(171014), "ttime": uint64(2341), @@ -223,6 +224,6 @@ be_static,host1,0,0,0,1,,28,7873,1209688,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18, be_static,host2,0,0,0,1,,28,13830,1085929,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,9,,28,,2,0,,1,L4OK,,0,0,19,6,3,0,0,0,,,,0,0,,,,,338,,,0,1,1,38, be_static,host3,0,0,0,1,,28,17959,1259760,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,10,,28,,2,0,,1,L4OK,,1,0,20,6,2,0,0,0,,,,0,0,,,,,92,,,0,1,1,17, be_static,BACKEND,0,0,0,2,200,307,160276,13322728,0,0,,0,0,0,0,UP,11,11,0,,0,70698,0,,2,18,0,,307,,1,0,,4,,,,0,205,73,29,0,0,,,,,0,0,0,0,0,0,92,,,0,1,3,381, -be_app,host0,0,0,1,32,,171014,510913516,2193856571,,0,,0,1,1,0,UP,100,1,0,1,0,70698,0,,2,19,1,,171013,,2,3,,12,L7OK,301,10,0,119534,48051,2345,1056,0,0,,,,73,1,,,,,0,Moved Permanently,,0,2,312,2341, -be_app,host4,0,0,2,29,,171013,499318742,2195595896,12,34,,0,2,0,0,UP,100,1,0,2,0,70698,0,,2,19,2,,171013,,2,3,,12,L7OK,301,12,0,119572,47882,2441,1088,0,0,,,,84,2,,,,,0,Moved Permanently,,0,2,316,2355, +be_app,host0,0,0,1,32,32,171014,510913516,2193856571,,0,,0,1,1,0,UP,100,1,0,1,0,70698,0,,2,19,1,,171013,,2,3,,12,L7OK,301,10,0,119534,48051,2345,1056,0,0,,,,73,1,,,,,0,Moved Permanently,,0,2,312,2341, +be_app,host4,0,0,2,29,32,171013,499318742,2195595896,12,34,,0,2,0,0,UP,100,1,0,2,0,70698,0,,2,19,2,,171013,,2,3,,12,L7OK,301,12,0,119572,47882,2441,1088,0,0,,,,84,2,,,,,0,Moved Permanently,,0,2,316,2355, ` From 17fa6f9b17cfa039ab4b0c1d21b075590ca6eb79 Mon Sep 17 00:00:00 2001 From: Da1den Date: Tue, 13 Dec 2016 15:24:41 +0100 Subject: [PATCH 0007/1302] Fixed bug that you cannot gather data on non english systems (#1944) --- plugins/inputs/win_perf_counters/win_perf_counters.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index 60b9ff55d..1f233a3d4 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -116,11 +116,7 @@ func (m *Win_PerfCounters) AddItem(metrics *itemList, query string, objectName s var handle win.PDH_HQUERY var counterHandle win.PDH_HCOUNTER ret := win.PdhOpenQuery(0, 0, &handle) - if m.PreVistaSupport { - ret = win.PdhAddCounter(handle, query, 0, &counterHandle) - } else { - ret = win.PdhAddEnglishCounter(handle, query, 0, &counterHandle) - } + ret = win.PdhAddCounter(handle, query, 0, &counterHandle) _ = ret temp := &item{query, objectName, counter, instance, measurement, From 07684fb03088146406be877c2aa69373dbd60f8f Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 13 Dec 2016 14:28:19 +0000 Subject: [PATCH 0008/1302] Update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a7b4cf048..68262c98d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,9 @@ - [#2126](https://github.com/influxdata/telegraf/pull/2126): Support a metric.Split function. - [#2026](https://github.com/influxdata/telegraf/pull/2065): elasticsearch "shield" (basic auth) support doc. - [#1885](https://github.com/influxdata/telegraf/pull/1885): Fix over-querying of cloudwatch metrics +- [#1913](https://github.com/influxdata/telegraf/pull/1913): OpenTSDB basic auth support. +- [#1908](https://github.com/influxdata/telegraf/pull/1908): RabbitMQ Connection metrics. +- [#1937](https://github.com/influxdata/telegraf/pull/1937): HAProxy session limit metric. ### Bugfixes @@ -23,6 +26,7 @@ - [#1775](https://github.com/influxdata/telegraf/issues/1775): Cache & expire metrics for delivery to prometheus. - [#2146](https://github.com/influxdata/telegraf/issues/2146): Fix potential panic in aggregator plugin metric maker. - [#1843](https://github.com/influxdata/telegraf/pull/1843) & [#1668](https://github.com/influxdata/telegraf/issues/1668): Add optional ability to define PID as a tag. +- [#1730](https://github.com/influxdata/telegraf/issues/1730): Fix win_perf_counters not gathering non-English counters. ## v1.1.2 [2016-12-12] From e097ae9632e3b12de4c2516b007f7305841a7530 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 13 Dec 2016 14:49:11 +0000 Subject: [PATCH 0009/1302] Fix possible panic when file info cannot be gotten closes #2061 --- CHANGELOG.md | 1 + plugins/inputs/filestat/filestat.go | 11 +++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 68262c98d..8cb3e6196 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ - [#2146](https://github.com/influxdata/telegraf/issues/2146): Fix potential panic in aggregator plugin metric maker. - [#1843](https://github.com/influxdata/telegraf/pull/1843) & [#1668](https://github.com/influxdata/telegraf/issues/1668): Add optional ability to define PID as a tag. - [#1730](https://github.com/influxdata/telegraf/issues/1730): Fix win_perf_counters not gathering non-English counters. +- [#2061](https://github.com/influxdata/telegraf/issues/2061): Fix panic when file stat info cannot be collected due to permissions or other issue(s). ## v1.1.2 [2016-12-12] diff --git a/plugins/inputs/filestat/filestat.go b/plugins/inputs/filestat/filestat.go index 938c12e34..83f511a84 100644 --- a/plugins/inputs/filestat/filestat.go +++ b/plugins/inputs/filestat/filestat.go @@ -4,6 +4,7 @@ import ( "crypto/md5" "fmt" "io" + "log" "os" "github.com/influxdata/telegraf" @@ -78,8 +79,14 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error { "file": fileName, } fields := map[string]interface{}{ - "exists": int64(1), - "size_bytes": fileInfo.Size(), + "exists": int64(1), + } + + if fileInfo == nil { + log.Printf("E! Unable to get info for file [%s], possible permissions issue", + fileName) + } else { + fields["size_bytes"] = fileInfo.Size() } if f.Md5 { From e2f9617228cebeb5fcdf7b5904bb2033e6d6297e Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 13 Dec 2016 15:24:05 +0000 Subject: [PATCH 0010/1302] Support strings in statsd set measurements closes #2068 --- CHANGELOG.md | 1 + plugins/inputs/statsd/statsd.go | 13 ++++++++----- plugins/inputs/statsd/statsd_test.go | 7 +++++++ 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8cb3e6196..36e7ea73e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ - [#1913](https://github.com/influxdata/telegraf/pull/1913): OpenTSDB basic auth support. - [#1908](https://github.com/influxdata/telegraf/pull/1908): RabbitMQ Connection metrics. - [#1937](https://github.com/influxdata/telegraf/pull/1937): HAProxy session limit metric. +- [#2068](https://github.com/influxdata/telegraf/issues/2068): Accept strings for StatsD sets. ### Bugfixes diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index a46af0a87..7591864c2 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -98,6 +98,7 @@ type metric struct { hash string intvalue int64 floatvalue float64 + strvalue string mtype string additive bool samplerate float64 @@ -106,7 +107,7 @@ type metric struct { type cachedset struct { name string - fields map[string]map[int64]bool + fields map[string]map[string]bool tags map[string]string } @@ -435,7 +436,7 @@ func (s *Statsd) parseStatsdLine(line string) error { return errors.New("Error Parsing statsd line") } m.floatvalue = v - case "c", "s": + case "c": var v int64 v, err := strconv.ParseInt(pipesplit[0], 10, 64) if err != nil { @@ -451,6 +452,8 @@ func (s *Statsd) parseStatsdLine(line string) error { v = int64(float64(v) / m.samplerate) } m.intvalue = v + case "s": + m.strvalue = pipesplit[0] } // Parse the name & tags from bucket @@ -625,16 +628,16 @@ func (s *Statsd) aggregate(m metric) { if !ok { s.sets[m.hash] = cachedset{ name: m.name, - fields: make(map[string]map[int64]bool), + fields: make(map[string]map[string]bool), tags: m.tags, } } // check if the field exists _, ok = s.sets[m.hash].fields[m.field] if !ok { - s.sets[m.hash].fields[m.field] = make(map[int64]bool) + s.sets[m.hash].fields[m.field] = make(map[string]bool) } - s.sets[m.hash].fields[m.field][m.intvalue] = true + s.sets[m.hash].fields[m.field][m.strvalue] = true } } diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index bb0d68c16..9fbaf5372 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -139,6 +139,9 @@ func TestParse_Sets(t *testing.T) { "scientific.notation.sets:4.696E+5|s", "scientific.notation.sets:4.696E+5|s", "scientific.notation.sets:4.697E+5|s", + "string.sets:foobar|s", + "string.sets:foobar|s", + "string.sets:bar|s", } for _, line := range valid_lines { @@ -164,6 +167,10 @@ func TestParse_Sets(t *testing.T) { "oneuser_id", 1, }, + { + "string_sets", + 2, + }, } for _, test := range validations { From 8a4ab3654d5beb4b8570533af9c56c5900cd6c03 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 13 Dec 2016 16:02:03 +0000 Subject: [PATCH 0011/1302] Fix documentation for net_response plugin closes #2103 --- plugins/inputs/net_response/README.md | 23 +++++++++++++++++++++ plugins/inputs/net_response/net_response.go | 16 +++++++++----- 2 files changed, 34 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/net_response/README.md b/plugins/inputs/net_response/README.md index 1d075d1a1..e3836721c 100644 --- a/plugins/inputs/net_response/README.md +++ b/plugins/inputs/net_response/README.md @@ -6,6 +6,27 @@ It can also check response text. ### Configuration: ``` +[[inputs.net_response]] + ## Protocol, must be "tcp" or "udp" + ## NOTE: because the "udp" protocol does not respond to requests, it requires + ## a send/expect string pair (see below). + protocol = "tcp" + ## Server address (default localhost) + address = "localhost:80" + ## Set timeout + timeout = "1s" + + ## Set read timeout (only used if expecting a response) + read_timeout = "1s" + + ## The following options are required for UDP checks. For TCP, they are + ## optional. The plugin will send the given string to the server and then + ## expect to receive the given 'expect' string back. + ## string sent to the server + # send = "ssh" + ## expected string in answer + # expect = "ssh" + [[inputs.net_response]] protocol = "tcp" address = ":80" @@ -30,6 +51,8 @@ It can also check response text. protocol = "udp" address = "localhost:161" timeout = "2s" + send = "hello server" + expect = "hello client" ``` ### Measurements & Fields: diff --git a/plugins/inputs/net_response/net_response.go b/plugins/inputs/net_response/net_response.go index 15725ff27..ad0de46c3 100644 --- a/plugins/inputs/net_response/net_response.go +++ b/plugins/inputs/net_response/net_response.go @@ -29,18 +29,24 @@ func (_ *NetResponse) Description() string { var sampleConfig = ` ## Protocol, must be "tcp" or "udp" + ## NOTE: because the "udp" protocol does not respond to requests, it requires + ## a send/expect string pair (see below). protocol = "tcp" ## Server address (default localhost) - address = "github.com:80" + address = "localhost:80" ## Set timeout timeout = "1s" - ## Optional string sent to the server - # send = "ssh" - ## Optional expected string in answer - # expect = "ssh" ## Set read timeout (only used if expecting a response) read_timeout = "1s" + + ## The following options are required for UDP checks. For TCP, they are + ## optional. The plugin will send the given string to the server and then + ## expect to receive the given 'expect' string back. + ## string sent to the server + # send = "ssh" + ## expected string in answer + # expect = "ssh" ` func (_ *NetResponse) SampleConfig() string { From 5f06bd2566175eb1acdb4199109c260a22d3489d Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 13 Dec 2016 16:10:59 +0000 Subject: [PATCH 0012/1302] Graylog output should set short_message field closes #2045 --- CHANGELOG.md | 1 + plugins/outputs/graylog/README.md | 2 +- plugins/outputs/graylog/graylog.go | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 36e7ea73e..d23402afd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,7 @@ - [#1843](https://github.com/influxdata/telegraf/pull/1843) & [#1668](https://github.com/influxdata/telegraf/issues/1668): Add optional ability to define PID as a tag. - [#1730](https://github.com/influxdata/telegraf/issues/1730): Fix win_perf_counters not gathering non-English counters. - [#2061](https://github.com/influxdata/telegraf/issues/2061): Fix panic when file stat info cannot be collected due to permissions or other issue(s). +- [#2045](https://github.com/influxdata/telegraf/issues/2045): Graylog output should set short_message field. ## v1.1.2 [2016-12-12] diff --git a/plugins/outputs/graylog/README.md b/plugins/outputs/graylog/README.md index 833482047..39863b541 100644 --- a/plugins/outputs/graylog/README.md +++ b/plugins/outputs/graylog/README.md @@ -9,6 +9,6 @@ It requires a `servers` name. ```toml # Send telegraf metrics to graylog(s) [[outputs.graylog]] - ## Udp endpoint for your graylog instance. + ## UDP endpoint for your graylog instance(s). servers = ["127.0.0.1:12201", "192.168.1.1:12201"] ``` diff --git a/plugins/outputs/graylog/graylog.go b/plugins/outputs/graylog/graylog.go index 64624bcb4..e77eae558 100644 --- a/plugins/outputs/graylog/graylog.go +++ b/plugins/outputs/graylog/graylog.go @@ -154,7 +154,7 @@ type Graylog struct { } var sampleConfig = ` - ## Udp endpoint for your graylog instance. + ## UDP endpoint for your graylog instance. servers = ["127.0.0.1:12201", "192.168.1.1:12201"] ` @@ -213,7 +213,7 @@ func serialize(metric telegraf.Metric) ([]string, error) { m := make(map[string]interface{}) m["version"] = "1.1" m["timestamp"] = metric.UnixNano() / 1000000000 - m["short_message"] = " " + m["short_message"] = "telegraf" m["name"] = metric.Name() if host, ok := metric.Tags()["host"]; ok { From b4f9bc8745de54fb673e651bfa58f3e1dfaebaed Mon Sep 17 00:00:00 2001 From: Jonas Falck Date: Tue, 13 Dec 2016 20:40:55 +0100 Subject: [PATCH 0013/1302] Change hddtemp to always put temperature in temperature field (#1905) Added unit tests for the changes Fixes #1904 --- CHANGELOG.md | 1 + plugins/inputs/hddtemp/README.md | 23 +++++- plugins/inputs/hddtemp/go-hddtemp/hddtemp.go | 15 +++- .../inputs/hddtemp/go-hddtemp/hddtemp_test.go | 14 ++-- plugins/inputs/hddtemp/hddtemp.go | 12 ++- plugins/inputs/hddtemp/hddtemp_test.go | 80 +++++++++++++++++++ 6 files changed, 131 insertions(+), 14 deletions(-) create mode 100644 plugins/inputs/hddtemp/hddtemp_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index d23402afd..d899f9074 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ - [#1730](https://github.com/influxdata/telegraf/issues/1730): Fix win_perf_counters not gathering non-English counters. - [#2061](https://github.com/influxdata/telegraf/issues/2061): Fix panic when file stat info cannot be collected due to permissions or other issue(s). - [#2045](https://github.com/influxdata/telegraf/issues/2045): Graylog output should set short_message field. +- [#1904](https://github.com/influxdata/telegraf/issues/1904): Hddtemp always put the value in the field temperature. ## v1.1.2 [2016-12-12] diff --git a/plugins/inputs/hddtemp/README.md b/plugins/inputs/hddtemp/README.md index d87ae625d..3bafb4f21 100644 --- a/plugins/inputs/hddtemp/README.md +++ b/plugins/inputs/hddtemp/README.md @@ -8,7 +8,7 @@ Hddtemp should be installed and its daemon running ## Configuration -``` +```toml [[inputs.hddtemp]] ## By default, telegraf gathers temps data from all disks detected by the ## hddtemp. @@ -20,3 +20,24 @@ Hddtemp should be installed and its daemon running # address = "127.0.0.1:7634" # devices = ["sda", "*"] ``` + +## Measurements + +- hddtemp + - temperature + +Tags: +- device +- model +- unit +- status + + + +## Example output + +``` +> hddtemp,unit=C,status=,host=server1,device=sdb,model=WDC\ WD740GD-00FLA1 temperature=43i 1481655647000000000 +> hddtemp,device=sdc,model=SAMSUNG\ HD103UI,unit=C,status=,host=server1 temperature=38i 148165564700000000 +> hddtemp,device=sdd,model=SAMSUNG\ HD103UI,unit=C,status=,host=server1 temperature=36i 1481655647000000000 +``` diff --git a/plugins/inputs/hddtemp/go-hddtemp/hddtemp.go b/plugins/inputs/hddtemp/go-hddtemp/hddtemp.go index d7d650b79..2d0e67fee 100644 --- a/plugins/inputs/hddtemp/go-hddtemp/hddtemp.go +++ b/plugins/inputs/hddtemp/go-hddtemp/hddtemp.go @@ -8,7 +8,7 @@ import ( "strings" ) -type disk struct { +type Disk struct { DeviceName string Model string Temperature int32 @@ -16,12 +16,19 @@ type disk struct { Status string } -func Fetch(address string) ([]disk, error) { +type hddtemp struct { +} + +func New() *hddtemp { + return &hddtemp{} +} + +func (h *hddtemp) Fetch(address string) ([]Disk, error) { var ( err error conn net.Conn buffer bytes.Buffer - disks []disk + disks []Disk ) if conn, err = net.Dial("tcp", address); err != nil { @@ -48,7 +55,7 @@ func Fetch(address string) ([]disk, error) { status = temperatureField } - disks = append(disks, disk{ + disks = append(disks, Disk{ DeviceName: device, Model: fields[offset+2], Temperature: int32(temperature), diff --git a/plugins/inputs/hddtemp/go-hddtemp/hddtemp_test.go b/plugins/inputs/hddtemp/go-hddtemp/hddtemp_test.go index 858e91a90..a3fda2abd 100644 --- a/plugins/inputs/hddtemp/go-hddtemp/hddtemp_test.go +++ b/plugins/inputs/hddtemp/go-hddtemp/hddtemp_test.go @@ -10,13 +10,13 @@ func TestFetch(t *testing.T) { l := serve(t, []byte("|/dev/sda|foobar|36|C|")) defer l.Close() - disks, err := Fetch(l.Addr().String()) + disks, err := New().Fetch(l.Addr().String()) if err != nil { t.Error("expecting err to be nil") } - expected := []disk{ + expected := []Disk{ { DeviceName: "sda", Model: "foobar", @@ -31,7 +31,7 @@ func TestFetch(t *testing.T) { } func TestFetchWrongAddress(t *testing.T) { - _, err := Fetch("127.0.0.1:1") + _, err := New().Fetch("127.0.0.1:1") if err == nil { t.Error("expecting err to be non-nil") @@ -42,13 +42,13 @@ func TestFetchStatus(t *testing.T) { l := serve(t, []byte("|/dev/sda|foobar|SLP|C|")) defer l.Close() - disks, err := Fetch(l.Addr().String()) + disks, err := New().Fetch(l.Addr().String()) if err != nil { t.Error("expecting err to be nil") } - expected := []disk{ + expected := []Disk{ { DeviceName: "sda", Model: "foobar", @@ -67,13 +67,13 @@ func TestFetchTwoDisks(t *testing.T) { l := serve(t, []byte("|/dev/hda|ST380011A|46|C||/dev/hdd|ST340016A|SLP|*|")) defer l.Close() - disks, err := Fetch(l.Addr().String()) + disks, err := New().Fetch(l.Addr().String()) if err != nil { t.Error("expecting err to be nil") } - expected := []disk{ + expected := []Disk{ { DeviceName: "hda", Model: "ST380011A", diff --git a/plugins/inputs/hddtemp/hddtemp.go b/plugins/inputs/hddtemp/hddtemp.go index c1e01c3c6..ac11218dd 100644 --- a/plugins/inputs/hddtemp/hddtemp.go +++ b/plugins/inputs/hddtemp/hddtemp.go @@ -13,6 +13,11 @@ const defaultAddress = "127.0.0.1:7634" type HDDTemp struct { Address string Devices []string + fetcher Fetcher +} + +type Fetcher interface { + Fetch(address string) ([]gohddtemp.Disk, error) } func (_ *HDDTemp) Description() string { @@ -36,7 +41,10 @@ func (_ *HDDTemp) SampleConfig() string { } func (h *HDDTemp) Gather(acc telegraf.Accumulator) error { - disks, err := gohddtemp.Fetch(h.Address) + if h.fetcher == nil { + h.fetcher = gohddtemp.New() + } + disks, err := h.fetcher.Fetch(h.Address) if err != nil { return err @@ -53,7 +61,7 @@ func (h *HDDTemp) Gather(acc telegraf.Accumulator) error { } fields := map[string]interface{}{ - disk.DeviceName: disk.Temperature, + "temperature": disk.Temperature, } acc.AddFields("hddtemp", fields, tags) diff --git a/plugins/inputs/hddtemp/hddtemp_test.go b/plugins/inputs/hddtemp/hddtemp_test.go new file mode 100644 index 000000000..37dfef7d6 --- /dev/null +++ b/plugins/inputs/hddtemp/hddtemp_test.go @@ -0,0 +1,80 @@ +package hddtemp + +import ( + "testing" + + hddtemp "github.com/influxdata/telegraf/plugins/inputs/hddtemp/go-hddtemp" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type mockFetcher struct { +} + +func (h *mockFetcher) Fetch(address string) ([]hddtemp.Disk, error) { + return []hddtemp.Disk{ + hddtemp.Disk{ + DeviceName: "Disk1", + Model: "Model1", + Temperature: 13, + Unit: "C", + }, + hddtemp.Disk{ + DeviceName: "Disk2", + Model: "Model2", + Temperature: 14, + Unit: "C", + }, + }, nil + +} +func newMockFetcher() *mockFetcher { + return &mockFetcher{} +} + +func TestFetch(t *testing.T) { + hddtemp := &HDDTemp{ + fetcher: newMockFetcher(), + Devices: []string{"*"}, + } + + acc := &testutil.Accumulator{} + err := hddtemp.Gather(acc) + + require.NoError(t, err) + assert.Equal(t, acc.NFields(), 2) + + var tests = []struct { + fields map[string]interface{} + tags map[string]string + }{ + { + map[string]interface{}{ + "temperature": int32(13), + }, + map[string]string{ + "device": "Disk1", + "model": "Model1", + "unit": "C", + "status": "", + }, + }, + { + map[string]interface{}{ + "temperature": int32(14), + }, + map[string]string{ + "device": "Disk2", + "model": "Model2", + "unit": "C", + "status": "", + }, + }, + } + + for _, test := range tests { + acc.AssertContainsTaggedFields(t, "hddtemp", test.fields, test.tags) + } + +} From f39db08c6dc7fa5adf66fef0c9d55d29e0df16f3 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 13 Dec 2016 16:34:52 +0000 Subject: [PATCH 0014/1302] Set default values for delete_ configuration options closes #1893 --- CHANGELOG.md | 5 +++++ plugins/inputs/statsd/README.md | 19 ++++++++++++------- plugins/inputs/statsd/statsd.go | 24 +++++++++++++++++------- 3 files changed, 34 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d899f9074..c745d462f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,10 @@ ### Release Notes +- The StatsD plugin will now default all "delete_" config options to "true". This +will change te default behavior for users who were not specifying these parameters +in their config file. + ### Features - [#2123](https://github.com/influxdata/telegraf/pull/2123): Fix improper calculation of CPU percentages @@ -19,6 +23,7 @@ - [#1908](https://github.com/influxdata/telegraf/pull/1908): RabbitMQ Connection metrics. - [#1937](https://github.com/influxdata/telegraf/pull/1937): HAProxy session limit metric. - [#2068](https://github.com/influxdata/telegraf/issues/2068): Accept strings for StatsD sets. +- [#1893](https://github.com/influxdata/telegraf/issues/1893): Change StatsD default "reset" behavior. ### Bugfixes diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md index a17f8c888..91070419a 100644 --- a/plugins/inputs/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -7,14 +7,19 @@ [[inputs.statsd]] ## Address and port to host UDP listener on service_address = ":8125" - ## Delete gauges every interval (default=false) - delete_gauges = false - ## Delete counters every interval (default=false) - delete_counters = false - ## Delete sets every interval (default=false) - delete_sets = false - ## Delete timings & histograms every interval (default=true) + + ## The following configuration options control when telegraf clears it's cache + ## of previous values. If set to false, then telegraf will only clear it's + ## cache when the daemon is restarted. + ## Reset gauges every interval (default=true) + delete_gauges = true + ## Reset counters every interval (default=true) + delete_counters = true + ## Reset sets every interval (default=true) + delete_sets = true + ## Reset timings & histograms every interval (default=true) delete_timings = true + ## Percentiles to calculate for timing & histogram stats percentiles = [90] diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 7591864c2..75dfc915e 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -136,14 +136,19 @@ func (_ *Statsd) Description() string { const sampleConfig = ` ## Address and port to host UDP listener on service_address = ":8125" - ## Delete gauges every interval (default=false) - delete_gauges = false - ## Delete counters every interval (default=false) - delete_counters = false - ## Delete sets every interval (default=false) - delete_sets = false - ## Delete timings & histograms every interval (default=true) + + ## The following configuration options control when telegraf clears it's cache + ## of previous values. If set to false, then telegraf will only clear it's + ## cache when the daemon is restarted. + ## Reset gauges every interval (default=true) + delete_gauges = true + ## Reset counters every interval (default=true) + delete_counters = true + ## Reset sets every interval (default=true) + delete_sets = true + ## Reset timings & histograms every interval (default=true) delete_timings = true + ## Percentiles to calculate for timing & histogram stats percentiles = [90] @@ -654,8 +659,13 @@ func (s *Statsd) Stop() { func init() { inputs.Add("statsd", func() telegraf.Input { return &Statsd{ + ServiceAddress: ":8125", MetricSeparator: "_", AllowedPendingMessages: defaultAllowPendingMessage, + DeleteCounters: true, + DeleteGauges: true, + DeleteSets: true, + DeleteTimings: true, } }) } From c630212dde2d741912bdf09fe9b7f22e97e8194d Mon Sep 17 00:00:00 2001 From: Pieter Slabbert Date: Thu, 24 Nov 2016 17:11:24 +0200 Subject: [PATCH 0015/1302] Enable setting a clientID for MQTT Output closes #2079 closes #1910 --- CHANGELOG.md | 1 + plugins/outputs/mqtt/mqtt.go | 12 ++++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c745d462f..d5c08c3b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ in their config file. - [#1937](https://github.com/influxdata/telegraf/pull/1937): HAProxy session limit metric. - [#2068](https://github.com/influxdata/telegraf/issues/2068): Accept strings for StatsD sets. - [#1893](https://github.com/influxdata/telegraf/issues/1893): Change StatsD default "reset" behavior. +- [#2079](https://github.com/influxdata/telegraf/pull/2079): Enable setting ClientID in MQTT output. ### Bugfixes diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index 45f2c91c8..b1d94f87c 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -25,6 +25,9 @@ var sampleConfig = ` # username = "telegraf" # password = "metricsmetricsmetricsmetrics" + ## client ID, if not set a random ID is generated + # client_id = "" + ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" # ssl_cert = "/etc/telegraf/cert.pem" @@ -46,7 +49,8 @@ type MQTT struct { Database string Timeout internal.Duration TopicPrefix string - QoS int `toml:"qos"` + QoS int `toml:"qos"` + ClientID string `toml:"client_id"` // Path to CA file SSLCA string `toml:"ssl_ca"` @@ -155,7 +159,11 @@ func (m *MQTT) publish(topic string, body []byte) error { func (m *MQTT) createOpts() (*paho.ClientOptions, error) { opts := paho.NewClientOptions() - opts.SetClientID("Telegraf-Output-" + internal.RandomString(5)) + if m.ClientID != "" { + opts.SetClientID(m.ClientID) + } else { + opts.SetClientID("Telegraf-Output-" + internal.RandomString(5)) + } tlsCfg, err := internal.GetTLSConfig( m.SSLCert, m.SSLKey, m.SSLCA, m.InsecureSkipVerify) From 393f5044bbc92f97b34548c835d651f6bfb14e8a Mon Sep 17 00:00:00 2001 From: Jose Luis Navarro Date: Fri, 4 Nov 2016 12:32:20 +0100 Subject: [PATCH 0016/1302] Collect JSON values recursively closes #1993 closes #1693 --- CHANGELOG.md | 1 + plugins/inputs/jolokia/jolokia.go | 27 +++++------ plugins/inputs/jolokia/jolokia_test.go | 63 ++++++++++++++++++++++++++ 3 files changed, 75 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d5c08c3b2..ed7ff2a2e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,7 @@ in their config file. - [#2061](https://github.com/influxdata/telegraf/issues/2061): Fix panic when file stat info cannot be collected due to permissions or other issue(s). - [#2045](https://github.com/influxdata/telegraf/issues/2045): Graylog output should set short_message field. - [#1904](https://github.com/influxdata/telegraf/issues/1904): Hddtemp always put the value in the field temperature. +- [#1693](https://github.com/influxdata/telegraf/issues/1693): Properly collect nested jolokia struct data. ## v1.1.2 [2016-12-12] diff --git a/plugins/inputs/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go index 2cc0d6422..6a51e9b43 100644 --- a/plugins/inputs/jolokia/jolokia.go +++ b/plugins/inputs/jolokia/jolokia.go @@ -220,6 +220,16 @@ func (j *Jolokia) prepareRequest(server Server, metric Metric) (*http.Request, e return req, nil } +func extractValues(measurement string, value interface{}, fields map[string]interface{}) { + if mapValues, ok := value.(map[string]interface{}); ok { + for k2, v2 := range mapValues { + extractValues(measurement+"_"+k2, v2, fields) + } + } else { + fields[measurement] = value + } +} + func (j *Jolokia) Gather(acc telegraf.Accumulator) error { servers := j.Servers metrics := j.Metrics @@ -244,23 +254,8 @@ func (j *Jolokia) Gather(acc telegraf.Accumulator) error { if err != nil { fmt.Printf("Error handling response: %s\n", err) } else { - if values, ok := out["value"]; ok { - switch t := values.(type) { - case map[string]interface{}: - for k, v := range t { - switch t2 := v.(type) { - case map[string]interface{}: - for k2, v2 := range t2 { - fields[measurement+"_"+k+"_"+k2] = v2 - } - case interface{}: - fields[measurement+"_"+k] = t2 - } - } - case interface{}: - fields[measurement] = t - } + extractValues(measurement, values, fields) } else { fmt.Printf("Missing key 'value' in output response\n") } diff --git a/plugins/inputs/jolokia/jolokia_test.go b/plugins/inputs/jolokia/jolokia_test.go index 13724b937..ccde619b5 100644 --- a/plugins/inputs/jolokia/jolokia_test.go +++ b/plugins/inputs/jolokia/jolokia_test.go @@ -12,6 +12,37 @@ import ( _ "github.com/stretchr/testify/require" ) +const validThreeLevelMultiValueJSON = ` +{ + "request":{ + "mbean":"java.lang:type=*", + "type":"read" + }, + "value":{ + "java.lang:type=Memory":{ + "ObjectPendingFinalizationCount":0, + "Verbose":false, + "HeapMemoryUsage":{ + "init":134217728, + "committed":173015040, + "max":1908932608, + "used":16840016 + }, + "NonHeapMemoryUsage":{ + "init":2555904, + "committed":51380224, + "max":-1, + "used":49944048 + }, + "ObjectName":{ + "objectName":"java.lang:type=Memory" + } + } + }, + "timestamp":1446129191, + "status":200 +}` + const validMultiValueJSON = ` { "request":{ @@ -103,6 +134,38 @@ func TestHttpJsonMultiValue(t *testing.T) { acc.AssertContainsTaggedFields(t, "jolokia", fields, tags) } +// Test that the proper values are ignored or collected +func TestHttpJsonThreeLevelMultiValue(t *testing.T) { + jolokia := genJolokiaClientStub(validThreeLevelMultiValueJSON, 200, Servers, []Metric{HeapMetric}) + + var acc testutil.Accumulator + err := jolokia.Gather(&acc) + + assert.Nil(t, err) + assert.Equal(t, 1, len(acc.Metrics)) + + fields := map[string]interface{}{ + "heap_memory_usage_java.lang:type=Memory_ObjectPendingFinalizationCount": 0.0, + "heap_memory_usage_java.lang:type=Memory_Verbose": false, + "heap_memory_usage_java.lang:type=Memory_HeapMemoryUsage_init": 134217728.0, + "heap_memory_usage_java.lang:type=Memory_HeapMemoryUsage_max": 1908932608.0, + "heap_memory_usage_java.lang:type=Memory_HeapMemoryUsage_used": 16840016.0, + "heap_memory_usage_java.lang:type=Memory_HeapMemoryUsage_committed": 173015040.0, + "heap_memory_usage_java.lang:type=Memory_NonHeapMemoryUsage_init": 2555904.0, + "heap_memory_usage_java.lang:type=Memory_NonHeapMemoryUsage_committed": 51380224.0, + "heap_memory_usage_java.lang:type=Memory_NonHeapMemoryUsage_max": -1.0, + "heap_memory_usage_java.lang:type=Memory_NonHeapMemoryUsage_used": 49944048.0, + "heap_memory_usage_java.lang:type=Memory_ObjectName_objectName": "java.lang:type=Memory", + } + + tags := map[string]string{ + "jolokia_host": "127.0.0.1", + "jolokia_port": "8080", + "jolokia_name": "as1", + } + acc.AssertContainsTaggedFields(t, "jolokia", fields, tags) +} + // Test that the proper values are ignored or collected func TestHttpJsonOn404(t *testing.T) { From 17b307a7bca6f9639fb816ec61bb0254c650b4f2 Mon Sep 17 00:00:00 2001 From: Florian Klink Date: Wed, 14 Dec 2016 20:47:48 +0100 Subject: [PATCH 0017/1302] ping: fix typo in README (#2163) --- plugins/inputs/ping/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index 1f087c774..38558a33c 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -3,7 +3,7 @@ This input plugin will measures the round-trip ## Windows: -### Configration: +### Configuration: ``` ## urls to ping urls = ["www.google.com"] # required @@ -33,4 +33,4 @@ This input plugin will measures the round-trip ``` * Plugin: ping, Collection 1 ping,host=WIN-PBAPLP511R7,url=www.google.com average_response_ms=7i,maximum_response_ms=9i,minimum_response_ms=7i,packets_received=4i,packets_transmitted=4i,percent_packet_loss=0,percent_reply_loss=0,reply_received=4i 1469879119000000000 -``` \ No newline at end of file +``` From a970b9c62c198fe6ad87723ae0bc56e15eb733e1 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 15 Dec 2016 19:31:40 +0000 Subject: [PATCH 0018/1302] Revert "Rabbitmq plugin: connection-related metrics." (#2169) --- plugins/inputs/rabbitmq/rabbitmq.go | 69 ++---------------------- plugins/inputs/rabbitmq/rabbitmq_test.go | 64 ---------------------- 2 files changed, 3 insertions(+), 130 deletions(-) diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index d1c973dea..5519ee14a 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -50,9 +50,8 @@ type RabbitMQ struct { ClientTimeout internal.Duration `toml:"client_timeout"` // InsecureSkipVerify bool - Nodes []string - Queues []string - Connections []string + Nodes []string + Queues []string Client *http.Client } @@ -136,22 +135,10 @@ type Node struct { SocketsUsed int64 `json:"sockets_used"` } -// Connection ... -type Connection struct { - Name string - State string - Vhost string - Host string - Node string - ReceiveCount int64 `json:"recv_cnt"` - SendCount int64 `json:"send_cnt"` - SendPend int64 `json:"send_pend"` -} - // gatherFunc ... type gatherFunc func(r *RabbitMQ, acc telegraf.Accumulator, errChan chan error) -var gatherFunctions = []gatherFunc{gatherOverview, gatherNodes, gatherQueues, gatherConnections} +var gatherFunctions = []gatherFunc{gatherOverview, gatherNodes, gatherQueues} var sampleConfig = ` # url = "http://localhost:15672" @@ -393,42 +380,6 @@ func gatherQueues(r *RabbitMQ, acc telegraf.Accumulator, errChan chan error) { errChan <- nil } -func gatherConnections(r *RabbitMQ, acc telegraf.Accumulator, errChan chan error) { - // Gather information about connections - connections := make([]Connection, 0) - err := r.requestJSON("/api/connections", &connections) - if err != nil { - errChan <- err - return - } - - for _, connection := range connections { - if !r.shouldGatherConnection(connection) { - continue - } - tags := map[string]string{ - "url": r.URL, - "connection": connection.Name, - "vhost": connection.Vhost, - "host": connection.Host, - "node": connection.Node, - } - - acc.AddFields( - "rabbitmq_connection", - map[string]interface{}{ - "recv_cnt": connection.ReceiveCount, - "send_cnt": connection.SendCount, - "send_pend": connection.SendPend, - "state": connection.State, - }, - tags, - ) - } - - errChan <- nil -} - func (r *RabbitMQ) shouldGatherNode(node Node) bool { if len(r.Nodes) == 0 { return true @@ -457,20 +408,6 @@ func (r *RabbitMQ) shouldGatherQueue(queue Queue) bool { return false } -func (r *RabbitMQ) shouldGatherConnection(connection Connection) bool { - if len(r.Connections) == 0 { - return true - } - - for _, name := range r.Connections { - if name == connection.Name { - return true - } - } - - return false -} - func init() { inputs.Add("rabbitmq", func() telegraf.Input { return &RabbitMQ{ diff --git a/plugins/inputs/rabbitmq/rabbitmq_test.go b/plugins/inputs/rabbitmq/rabbitmq_test.go index bbb3dd450..4bdc980db 100644 --- a/plugins/inputs/rabbitmq/rabbitmq_test.go +++ b/plugins/inputs/rabbitmq/rabbitmq_test.go @@ -374,57 +374,6 @@ const sampleQueuesResponse = ` ] ` -const sampleConnectionsResponse = ` -[ - { - "recv_oct": 166055, - "recv_oct_details": { - "rate": 0 - }, - "send_oct": 589, - "send_oct_details": { - "rate": 0 - }, - "recv_cnt": 124, - "send_cnt": 7, - "send_pend": 0, - "state": "running", - "channels": 1, - "type": "network", - "node": "rabbit@ip-10-0-12-133", - "name": "10.0.10.8:32774 -> 10.0.12.131:5672", - "port": 5672, - "peer_port": 32774, - "host": "10.0.12.131", - "peer_host": "10.0.10.8", - "ssl": false, - "peer_cert_subject": null, - "peer_cert_issuer": null, - "peer_cert_validity": null, - "auth_mechanism": "AMQPLAIN", - "ssl_protocol": null, - "ssl_key_exchange": null, - "ssl_cipher": null, - "ssl_hash": null, - "protocol": "AMQP 0-9-1", - "user": "workers", - "vhost": "main", - "timeout": 0, - "frame_max": 131072, - "channel_max": 65535, - "client_properties": { - "product": "py-amqp", - "product_version": "1.4.7", - "capabilities": { - "connection.blocked": true, - "consumer_cancel_notify": true - } - }, - "connected_at": 1476647837266 - } -] -` - func TestRabbitMQGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var rsp string @@ -436,8 +385,6 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) { rsp = sampleNodesResponse case "/api/queues": rsp = sampleQueuesResponse - case "/api/connections": - rsp = sampleConnectionsResponse default: panic("Cannot handle request") } @@ -494,15 +441,4 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) { } assert.True(t, acc.HasMeasurement("rabbitmq_queue")) - - assert.True(t, acc.HasMeasurement("rabbitmq_connection")) - - connection_fields := map[string]interface{}{ - "recv_cnt": int64(124), - "send_cnt": int64(7), - "send_pend": int64(0), - "state": "running", - } - - acc.AssertContainsFields(t, "rabbitmq_connection", connection_fields) } From e6fc32bdf0652e1defcfdb79be98f60020e1f9a0 Mon Sep 17 00:00:00 2001 From: Frank Stutz Date: Tue, 18 Oct 2016 14:09:35 -0700 Subject: [PATCH 0019/1302] fix for puppetagent config - test 1 put Makefile back to normal removed comment from puppetagent.go changed config_version to config_version_string and fixed yaml for build changed workind from branch to environment for config_string fixed casing and Changelog fixed test case closes #1917 --- CHANGELOG.md | 1 + plugins/inputs/puppetagent/last_run_summary.yaml | 4 ++-- plugins/inputs/puppetagent/puppetagent.go | 4 ++-- plugins/inputs/puppetagent/puppetagent_test.go | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ed7ff2a2e..1357e6d90 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ in their config file. - [#2045](https://github.com/influxdata/telegraf/issues/2045): Graylog output should set short_message field. - [#1904](https://github.com/influxdata/telegraf/issues/1904): Hddtemp always put the value in the field temperature. - [#1693](https://github.com/influxdata/telegraf/issues/1693): Properly collect nested jolokia struct data. +- [#1917](https://github.com/influxdata/telegraf/pull/1917): fix puppetagent inputs plugin to support string for config variable. ## v1.1.2 [2016-12-12] diff --git a/plugins/inputs/puppetagent/last_run_summary.yaml b/plugins/inputs/puppetagent/last_run_summary.yaml index 1f5dea3ce..be2f01746 100644 --- a/plugins/inputs/puppetagent/last_run_summary.yaml +++ b/plugins/inputs/puppetagent/last_run_summary.yaml @@ -30,5 +30,5 @@ last_run: 1444936531 cron: 0.000584 version: - config: 1444936521 - puppet: "3.7.5" \ No newline at end of file + config: "environment:d6018ce" + puppet: "3.7.5" diff --git a/plugins/inputs/puppetagent/puppetagent.go b/plugins/inputs/puppetagent/puppetagent.go index f66aa989f..c8a265bb8 100644 --- a/plugins/inputs/puppetagent/puppetagent.go +++ b/plugins/inputs/puppetagent/puppetagent.go @@ -68,8 +68,8 @@ type time struct { } type version struct { - Config int64 `yaml:"config"` - Puppet string `yaml:"puppet"` + ConfigString string `yaml:"config"` + Puppet string `yaml:"puppet"` } // SampleConfig returns sample configuration message diff --git a/plugins/inputs/puppetagent/puppetagent_test.go b/plugins/inputs/puppetagent/puppetagent_test.go index d1470bc27..b1c447887 100644 --- a/plugins/inputs/puppetagent/puppetagent_test.go +++ b/plugins/inputs/puppetagent/puppetagent_test.go @@ -28,7 +28,7 @@ func TestGather(t *testing.T) { "resources_outofsync": int64(0), "changes_total": int64(0), "time_lastrun": int64(1444936531), - "version_config": int64(1444936521), + "version_configstring": "environment:d6018ce", "time_user": float64(0.004331), "time_schedule": float64(0.001123), "time_filebucket": float64(0.000353), From bc13d32d53caba8c035a9b853d1a0fb7e83f264e Mon Sep 17 00:00:00 2001 From: Doug Reese Date: Fri, 16 Dec 2016 05:46:32 -0800 Subject: [PATCH 0020/1302] MongoDB input plugin: Improve state data (#2001) * MongoDB input plugin: Improve state data Adds ARB as a "member_status" (replica set arbiter). Uses MongoDB replica set state string for "state" value. * MongoDB input plugin: Improve state data - changelog update --- CHANGELOG.md | 1 + plugins/inputs/mongodb/mongodb_data.go | 4 +--- plugins/inputs/mongodb/mongodb_data_test.go | 3 ++- plugins/inputs/mongodb/mongostat.go | 6 ++++++ 4 files changed, 10 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1357e6d90..efb599ded 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ in their config file. - [#2068](https://github.com/influxdata/telegraf/issues/2068): Accept strings for StatsD sets. - [#1893](https://github.com/influxdata/telegraf/issues/1893): Change StatsD default "reset" behavior. - [#2079](https://github.com/influxdata/telegraf/pull/2079): Enable setting ClientID in MQTT output. +- [#2001](https://github.com/influxdata/telegraf/pull/2001): MongoDB input plugin: Improve state data. ### Bugfixes diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go index afa4ddd2f..47f35f199 100644 --- a/plugins/inputs/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -21,9 +21,6 @@ type DbData struct { } func NewMongodbData(statLine *StatLine, tags map[string]string) *MongodbData { - if statLine.NodeType != "" && statLine.NodeType != "UNK" { - tags["state"] = statLine.NodeType - } return &MongodbData{ StatLine: statLine, Tags: tags, @@ -61,6 +58,7 @@ var DefaultReplStats = map[string]string{ "repl_getmores_per_sec": "GetMoreR", "repl_commands_per_sec": "CommandR", "member_status": "NodeType", + "state": "NodeState", "repl_lag": "ReplLag", } diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index a08549cfd..a74d9ed65 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -95,12 +95,12 @@ func TestStateTag(t *testing.T) { Insert: 0, Query: 0, NodeType: "PRI", + NodeState: "PRIMARY", }, tags, ) stateTags := make(map[string]string) - stateTags["state"] = "PRI" var acc testutil.Accumulator @@ -115,6 +115,7 @@ func TestStateTag(t *testing.T) { "getmores_per_sec": int64(0), "inserts_per_sec": int64(0), "member_status": "PRI", + "state": "PRIMARY", "net_in_bytes": int64(0), "net_out_bytes": int64(0), "open_connections": int64(0), diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index da539f8aa..e77c67e15 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -107,6 +107,7 @@ type ReplSetStatus struct { type ReplSetMember struct { Name string `bson:"name"` State int64 `bson:"state"` + StateStr string `bson:"stateStr"` OptimeDate *bson.MongoTimestamp `bson:"optimeDate"` } @@ -420,6 +421,7 @@ type StatLine struct { NumConnections int64 ReplSetName string NodeType string + NodeState string // Cluster fields JumboChunksCount int64 @@ -566,6 +568,8 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal.NodeType = "PRI" } else if newStat.Repl.Secondary.(bool) { returnVal.NodeType = "SEC" + } else if newStat.Repl.ArbiterOnly != nil && newStat.Repl.ArbiterOnly.(bool) { + returnVal.NodeType = "ARB" } else { returnVal.NodeType = "UNK" } @@ -692,6 +696,8 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec me := ReplSetMember{} for _, member := range newReplStat.Members { if member.Name == myName { + // Store my state string + returnVal.NodeState = member.StateStr if member.State == 1 { // I'm the master returnVal.ReplLag = 0 From 4a83c8c518a89b2fd752cef331834c9bd12a2f6c Mon Sep 17 00:00:00 2001 From: Pierre Tessier Date: Fri, 16 Dec 2016 08:47:47 -0500 Subject: [PATCH 0021/1302] Add Questions status variable for issue: #1988 (#2004) --- plugins/inputs/mysql/mysql.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index ece5e1447..5cf19f2db 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -828,6 +828,13 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, serv string, acc telegraf.Accum } fields["queries"] = i + case "Questions": + i, err := strconv.ParseInt(string(val.([]byte)), 10, 64) + if err != nil { + return err + } + + fields["questions"] = i case "Slow_queries": i, err := strconv.ParseInt(string(val.([]byte)), 10, 64) if err != nil { From 3a45d8851dc10b13dfff198e3c4c9045b8ce16fd Mon Sep 17 00:00:00 2001 From: Alex Sherwin Date: Fri, 16 Dec 2016 08:53:16 -0500 Subject: [PATCH 0022/1302] fixes #1987 custom docker repos with non-standard port (#2018) * fixed parsing of docker image name/version now accounts for custom docker repo's which contain a colon for a non-default port * 1978: modifying docker test case to have a custom repo with non-standard port * using a temp var to store index, ran gofmt * fixes #1987, renaming iterator to 'i' --- plugins/inputs/docker/docker.go | 16 ++++++++++------ plugins/inputs/docker/docker_test.go | 6 +++--- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index e2c488dc8..7fc48689f 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -221,14 +221,18 @@ func (d *Docker) gatherContainer( cname = strings.TrimPrefix(container.Names[0], "/") } - // the image name sometimes has a version part. - // ie, rabbitmq:3-management - imageParts := strings.Split(container.Image, ":") - imageName := imageParts[0] + // the image name sometimes has a version part, or a private repo + // ie, rabbitmq:3-management or docker.someco.net:4443/rabbitmq:3-management + imageName := "" imageVersion := "unknown" - if len(imageParts) > 1 { - imageVersion = imageParts[1] + i := strings.LastIndex(container.Image, ":") // index of last ':' character + if i > -1 { + imageVersion = container.Image[i+1:] + imageName = container.Image[:i] + } else { + imageName = container.Image } + tags := map[string]string{ "engine_host": d.engine_host, "container_name": cname, diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index 21960a4d8..a60203af5 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -340,7 +340,7 @@ func (d FakeDockerClient) ContainerList(octx context.Context, options types.Cont container2 := types.Container{ ID: "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173", Names: []string{"/etcd2"}, - Image: "quay.io/coreos/etcd:v2.2.2", + Image: "quay.io:4443/coreos/etcd:v2.2.2", Command: "/etcd -name etcd2 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379", Created: 1455941933, Status: "Up 4 hours", @@ -429,7 +429,7 @@ func TestDockerGatherInfo(t *testing.T) { }, map[string]string{ "container_name": "etcd2", - "container_image": "quay.io/coreos/etcd", + "container_image": "quay.io:4443/coreos/etcd", "cpu": "cpu3", "container_version": "v2.2.2", "engine_host": "absol", @@ -477,7 +477,7 @@ func TestDockerGatherInfo(t *testing.T) { map[string]string{ "engine_host": "absol", "container_name": "etcd2", - "container_image": "quay.io/coreos/etcd", + "container_image": "quay.io:4443/coreos/etcd", "container_version": "v2.2.2", }, ) From bcbf82f8e8d151bfe53137ed05aa7131243422d5 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 16 Dec 2016 13:54:51 +0000 Subject: [PATCH 0023/1302] changelog update --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index efb599ded..8cf403e4d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ in their config file. - [#1904](https://github.com/influxdata/telegraf/issues/1904): Hddtemp always put the value in the field temperature. - [#1693](https://github.com/influxdata/telegraf/issues/1693): Properly collect nested jolokia struct data. - [#1917](https://github.com/influxdata/telegraf/pull/1917): fix puppetagent inputs plugin to support string for config variable. +- [#1987](https://github.com/influxdata/telegraf/issues/1987): fix docker input plugin tags when registry has port. ## v1.1.2 [2016-12-12] From 90cec20d1db04e2aefaa40805540ac1f2dd45ee0 Mon Sep 17 00:00:00 2001 From: alekseyp Date: Fri, 16 Dec 2016 08:58:27 -0500 Subject: [PATCH 0024/1302] Standard deviation (jitter) for Input plugin Ping (#2078) --- plugins/inputs/ping/ping.go | 20 ++++++++++++-------- plugins/inputs/ping/ping_test.go | 26 +++++++++++++++----------- 2 files changed, 27 insertions(+), 19 deletions(-) diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 089248efe..32264eec7 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -84,7 +84,7 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error { strings.TrimSpace(out) + ", " + err.Error()) } tags := map[string]string{"url": u} - trans, rec, avg, err := processPingOutput(out) + trans, rec, avg, stddev, err := processPingOutput(out) if err != nil { // fatal error errorChannel <- err @@ -100,6 +100,9 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error { if avg > 0 { fields["average_response_ms"] = avg } + if stddev > 0 { + fields["standard_deviation_ms"] = stddev + } acc.AddFields("ping", fields, tags) }(url) } @@ -166,9 +169,9 @@ func (p *Ping) args(url string) []string { // round-trip min/avg/max/stddev = 34.843/43.508/52.172/8.664 ms // // It returns (, , ) -func processPingOutput(out string) (int, int, float64, error) { +func processPingOutput(out string) (int, int, float64, float64, error) { var trans, recv int - var avg float64 + var avg, stddev float64 // Set this error to nil if we find a 'transmitted' line err := errors.New("Fatal error processing ping output") lines := strings.Split(out, "\n") @@ -180,22 +183,23 @@ func processPingOutput(out string) (int, int, float64, error) { // Transmitted packets trans, err = strconv.Atoi(strings.Split(stats[0], " ")[0]) if err != nil { - return trans, recv, avg, err + return trans, recv, avg, stddev, err } // Received packets recv, err = strconv.Atoi(strings.Split(stats[1], " ")[0]) if err != nil { - return trans, recv, avg, err + return trans, recv, avg, stddev, err } } else if strings.Contains(line, "min/avg/max") { - stats := strings.Split(line, " = ")[1] + stats := strings.Split(line, " ")[3] avg, err = strconv.ParseFloat(strings.Split(stats, "/")[1], 64) + stddev, err = strconv.ParseFloat(strings.Split(stats, "/")[3], 64) if err != nil { - return trans, recv, avg, err + return trans, recv, avg, stddev, err } } } - return trans, recv, avg, err + return trans, recv, avg, stddev, err } func init() { diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index b5d0d16e7..a7a6931f5 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -48,23 +48,25 @@ ping: -i interval too short: Operation not permitted // Test that ping command output is processed properly func TestProcessPingOutput(t *testing.T) { - trans, rec, avg, err := processPingOutput(bsdPingOutput) + trans, rec, avg, stddev, err := processPingOutput(bsdPingOutput) assert.NoError(t, err) assert.Equal(t, 5, trans, "5 packets were transmitted") assert.Equal(t, 5, rec, "5 packets were transmitted") assert.InDelta(t, 20.224, avg, 0.001) + assert.InDelta(t, 4.076, stddev, 0.001) - trans, rec, avg, err = processPingOutput(linuxPingOutput) + trans, rec, avg, stddev, err = processPingOutput(linuxPingOutput) assert.NoError(t, err) assert.Equal(t, 5, trans, "5 packets were transmitted") assert.Equal(t, 5, rec, "5 packets were transmitted") assert.InDelta(t, 43.628, avg, 0.001) + assert.InDelta(t, 5.325, stddev, 0.001) } // Test that processPingOutput returns an error when 'ping' fails to run, such // as when an invalid argument is provided func TestErrorProcessPingOutput(t *testing.T) { - _, _, _, err := processPingOutput(fatalPingOutput) + _, _, _, _, err := processPingOutput(fatalPingOutput) assert.Error(t, err, "Error was expected from processPingOutput") } @@ -145,10 +147,11 @@ func TestPingGather(t *testing.T) { p.Gather(&acc) tags := map[string]string{"url": "www.google.com"} fields := map[string]interface{}{ - "packets_transmitted": 5, - "packets_received": 5, - "percent_packet_loss": 0.0, - "average_response_ms": 43.628, + "packets_transmitted": 5, + "packets_received": 5, + "percent_packet_loss": 0.0, + "average_response_ms": 43.628, + "standard_deviation_ms": 5.325, } acc.AssertContainsTaggedFields(t, "ping", fields, tags) @@ -182,10 +185,11 @@ func TestLossyPingGather(t *testing.T) { p.Gather(&acc) tags := map[string]string{"url": "www.google.com"} fields := map[string]interface{}{ - "packets_transmitted": 5, - "packets_received": 3, - "percent_packet_loss": 40.0, - "average_response_ms": 44.033, + "packets_transmitted": 5, + "packets_received": 3, + "percent_packet_loss": 40.0, + "average_response_ms": 44.033, + "standard_deviation_ms": 5.325, } acc.AssertContainsTaggedFields(t, "ping", fields, tags) } From ecbc6342211f4e469e7ce3c240c198dc0a38ea03 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Fri, 16 Dec 2016 09:01:49 -0500 Subject: [PATCH 0025/1302] fix tail input seeking when used with pipe (#2090) --- CHANGELOG.md | 2 ++ plugins/inputs/tail/README.md | 2 ++ plugins/inputs/tail/tail.go | 20 +++++++++++++++----- 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8cf403e4d..12e635e14 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ in their config file. - [#2049](https://github.com/influxdata/telegraf/pull/2049): Fix the Value data format not trimming null characters from input. - [#1949](https://github.com/influxdata/telegraf/issues/1949): Fix windows `net` plugin. +- [#1775](https://github.com/influxdata/telegraf/issues/1775): Cache & expire metrics for delivery to prometheus - [#1775](https://github.com/influxdata/telegraf/issues/1775): Cache & expire metrics for delivery to prometheus. - [#2146](https://github.com/influxdata/telegraf/issues/2146): Fix potential panic in aggregator plugin metric maker. - [#1843](https://github.com/influxdata/telegraf/pull/1843) & [#1668](https://github.com/influxdata/telegraf/issues/1668): Add optional ability to define PID as a tag. @@ -41,6 +42,7 @@ in their config file. - [#1693](https://github.com/influxdata/telegraf/issues/1693): Properly collect nested jolokia struct data. - [#1917](https://github.com/influxdata/telegraf/pull/1917): fix puppetagent inputs plugin to support string for config variable. - [#1987](https://github.com/influxdata/telegraf/issues/1987): fix docker input plugin tags when registry has port. +- [#2089](https://github.com/influxdata/telegraf/issues/2089): Fix tail input when reading from a pipe. ## v1.1.2 [2016-12-12] diff --git a/plugins/inputs/tail/README.md b/plugins/inputs/tail/README.md index 9ae120e91..3aa0c4ac4 100644 --- a/plugins/inputs/tail/README.md +++ b/plugins/inputs/tail/README.md @@ -36,6 +36,8 @@ The plugin expects messages in one of the files = ["/var/mymetrics.out"] ## Read file from beginning. from_beginning = false + ## Whether file is a named pipe + pipe = false ## Data format to consume. ## Each data format has it's own unique set of configuration options, read diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index e1bc32e51..508c1e320 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -16,6 +16,7 @@ import ( type Tail struct { Files []string FromBeginning bool + Pipe bool tailers []*tail.Tail parser parsers.Parser @@ -44,6 +45,8 @@ const sampleConfig = ` files = ["/var/mymetrics.out"] ## Read file from beginning. from_beginning = false + ## Whether file is a named pipe + pipe = false ## Data format to consume. ## Each data format has it's own unique set of configuration options, read @@ -70,10 +73,12 @@ func (t *Tail) Start(acc telegraf.Accumulator) error { t.acc = acc - var seek tail.SeekInfo - if !t.FromBeginning { - seek.Whence = 2 - seek.Offset = 0 + var seek *tail.SeekInfo + if !t.Pipe && !t.FromBeginning { + seek = &tail.SeekInfo{ + Whence: 2, + Offset: 0, + } } var errS string @@ -88,8 +93,9 @@ func (t *Tail) Start(acc telegraf.Accumulator) error { tail.Config{ ReOpen: true, Follow: true, - Location: &seek, + Location: seek, MustExist: true, + Pipe: t.Pipe, }) if err != nil { errS += err.Error() + " " @@ -130,6 +136,10 @@ func (t *Tail) receiver(tailer *tail.Tail) { tailer.Filename, line.Text, err) } } + if err := tailer.Err(); err != nil { + log.Printf("E! Error tailing file %s, Error: %s\n", + tailer.Filename, err) + } } func (t *Tail) Stop() { From 21fac3ebec3875a675b02857cfaec88766417140 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 16 Dec 2016 14:01:58 +0000 Subject: [PATCH 0026/1302] changelog update --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 12e635e14..95ad6d1a2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,7 @@ in their config file. - [#1893](https://github.com/influxdata/telegraf/issues/1893): Change StatsD default "reset" behavior. - [#2079](https://github.com/influxdata/telegraf/pull/2079): Enable setting ClientID in MQTT output. - [#2001](https://github.com/influxdata/telegraf/pull/2001): MongoDB input plugin: Improve state data. +- [#2078](https://github.com/influxdata/telegraf/pull/2078): Ping input: add standard deviation field. ### Bugfixes From 4957717df5726b2f0d453e5ae97194632ec4b530 Mon Sep 17 00:00:00 2001 From: Tevin Jeffrey Date: Fri, 16 Dec 2016 09:03:53 -0500 Subject: [PATCH 0027/1302] Add field for last GC pause time (#2121) --- CHANGELOG.md | 1 + plugins/inputs/influxdb/influxdb.go | 54 ++++++++++++------------ plugins/inputs/influxdb/influxdb_test.go | 1 + 3 files changed, 30 insertions(+), 26 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 95ad6d1a2..357d9f22d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ in their config file. - [#2079](https://github.com/influxdata/telegraf/pull/2079): Enable setting ClientID in MQTT output. - [#2001](https://github.com/influxdata/telegraf/pull/2001): MongoDB input plugin: Improve state data. - [#2078](https://github.com/influxdata/telegraf/pull/2078): Ping input: add standard deviation field. +- [#2121](https://github.com/influxdata/telegraf/pull/2121): Add GC pause metric to InfluxDB input plugin. ### Bugfixes diff --git a/plugins/inputs/influxdb/influxdb.go b/plugins/inputs/influxdb/influxdb.go index bb11cfee4..3c98eead3 100644 --- a/plugins/inputs/influxdb/influxdb.go +++ b/plugins/inputs/influxdb/influxdb.go @@ -94,32 +94,33 @@ type point struct { } type memstats struct { - Alloc int64 `json:"Alloc"` - TotalAlloc int64 `json:"TotalAlloc"` - Sys int64 `json:"Sys"` - Lookups int64 `json:"Lookups"` - Mallocs int64 `json:"Mallocs"` - Frees int64 `json:"Frees"` - HeapAlloc int64 `json:"HeapAlloc"` - HeapSys int64 `json:"HeapSys"` - HeapIdle int64 `json:"HeapIdle"` - HeapInuse int64 `json:"HeapInuse"` - HeapReleased int64 `json:"HeapReleased"` - HeapObjects int64 `json:"HeapObjects"` - StackInuse int64 `json:"StackInuse"` - StackSys int64 `json:"StackSys"` - MSpanInuse int64 `json:"MSpanInuse"` - MSpanSys int64 `json:"MSpanSys"` - MCacheInuse int64 `json:"MCacheInuse"` - MCacheSys int64 `json:"MCacheSys"` - BuckHashSys int64 `json:"BuckHashSys"` - GCSys int64 `json:"GCSys"` - OtherSys int64 `json:"OtherSys"` - NextGC int64 `json:"NextGC"` - LastGC int64 `json:"LastGC"` - PauseTotalNs int64 `json:"PauseTotalNs"` - NumGC int64 `json:"NumGC"` - GCCPUFraction float64 `json:"GCCPUFraction"` + Alloc int64 `json:"Alloc"` + TotalAlloc int64 `json:"TotalAlloc"` + Sys int64 `json:"Sys"` + Lookups int64 `json:"Lookups"` + Mallocs int64 `json:"Mallocs"` + Frees int64 `json:"Frees"` + HeapAlloc int64 `json:"HeapAlloc"` + HeapSys int64 `json:"HeapSys"` + HeapIdle int64 `json:"HeapIdle"` + HeapInuse int64 `json:"HeapInuse"` + HeapReleased int64 `json:"HeapReleased"` + HeapObjects int64 `json:"HeapObjects"` + StackInuse int64 `json:"StackInuse"` + StackSys int64 `json:"StackSys"` + MSpanInuse int64 `json:"MSpanInuse"` + MSpanSys int64 `json:"MSpanSys"` + MCacheInuse int64 `json:"MCacheInuse"` + MCacheSys int64 `json:"MCacheSys"` + BuckHashSys int64 `json:"BuckHashSys"` + GCSys int64 `json:"GCSys"` + OtherSys int64 `json:"OtherSys"` + NextGC int64 `json:"NextGC"` + LastGC int64 `json:"LastGC"` + PauseTotalNs int64 `json:"PauseTotalNs"` + PauseNs [256]int64 `json:"PauseNs"` + NumGC int64 `json:"NumGC"` + GCCPUFraction float64 `json:"GCCPUFraction"` } // Gathers data from a particular URL @@ -202,6 +203,7 @@ func (i *InfluxDB) gatherURL( "next_gc": m.NextGC, "last_gc": m.LastGC, "pause_total_ns": m.PauseTotalNs, + "pause_ns": m.PauseNs[(m.NumGC+255)%256], "num_gc": m.NumGC, "gcc_pu_fraction": m.GCCPUFraction, }, diff --git a/plugins/inputs/influxdb/influxdb_test.go b/plugins/inputs/influxdb/influxdb_test.go index 09707a548..c27aa77dc 100644 --- a/plugins/inputs/influxdb/influxdb_test.go +++ b/plugins/inputs/influxdb/influxdb_test.go @@ -86,6 +86,7 @@ func TestInfluxDB(t *testing.T) { "frees": int64(381008), "heap_idle": int64(15802368), "pause_total_ns": int64(5132914), + "pause_ns": int64(127053), "lookups": int64(77), "heap_sys": int64(33849344), "mcache_sys": int64(16384), From 9c8f24601fd0d8fb299bd9fd499bc22d49896502 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 16 Dec 2016 14:11:28 +0000 Subject: [PATCH 0028/1302] rabbitmq, decrease timeout verbosity in config --- plugins/inputs/rabbitmq/rabbitmq.go | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index 5519ee14a..c33b11e66 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -155,17 +155,12 @@ var sampleConfig = ` ## Optional request timeouts ## - ## ResponseHeaderTimeout, if non-zero, specifies the amount of - ## time to wait for a server's response headers after fully - ## writing the request (including its body, if any). This - ## time does not include the time to read the response body. - ## See http.Transport.ResponseHeaderTimeout + ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait + ## for a server's response headers after fully writing the request. # header_timeout = "3s" ## - ## Timeout specifies a time limit for requests made by this - ## Client. The timeout includes connection time, any - ## redirects, and reading the response body. - ## See http.Client.Timeout + ## client_timeout specifies a time limit for requests made by this client. + ## Includes connection time, any redirects, and reading the response body. # client_timeout = "4s" ## A list of nodes to pull metrics about. If not specified, metrics for From e8bf968c78f63dc619d3d205de622941a1e45cb2 Mon Sep 17 00:00:00 2001 From: Vincent Date: Sat, 17 Dec 2016 01:29:04 +0800 Subject: [PATCH 0029/1302] fix mongodb replica set lag awalys 0 #1449 (#2125) --- plugins/inputs/mongodb/mongostat.go | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index e77c67e15..c5ccf5a86 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -11,8 +11,6 @@ import ( "sort" "strings" "time" - - "gopkg.in/mgo.v2/bson" ) const ( @@ -105,10 +103,10 @@ type ReplSetStatus struct { // ReplSetMember stores information related to a replica set member type ReplSetMember struct { - Name string `bson:"name"` - State int64 `bson:"state"` - StateStr string `bson:"stateStr"` - OptimeDate *bson.MongoTimestamp `bson:"optimeDate"` + Name string `bson:"name"` + State int64 `bson:"state"` + StateStr string `bson:"stateStr"` + OptimeDate time.Time `bson:"optimeDate"` } // WiredTiger stores information related to the WiredTiger storage engine. @@ -712,9 +710,9 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } } - if me.OptimeDate != nil && master.OptimeDate != nil && me.State == 2 { - // MongoTimestamp type is int64 where the first 32bits are the unix timestamp - lag := int64(*master.OptimeDate>>32 - *me.OptimeDate>>32) + if me.State == 2 { + // OptimeDate.Unix() type is int64 + lag := master.OptimeDate.Unix() - me.OptimeDate.Unix() if lag < 0 { returnVal.ReplLag = 0 } else { From 6723ea5fe66b0df729a65e42dfa67ab7b6f80972 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 16 Dec 2016 17:30:13 +0000 Subject: [PATCH 0030/1302] changelog update --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 357d9f22d..6bd0ee0e0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,6 +45,7 @@ in their config file. - [#1917](https://github.com/influxdata/telegraf/pull/1917): fix puppetagent inputs plugin to support string for config variable. - [#1987](https://github.com/influxdata/telegraf/issues/1987): fix docker input plugin tags when registry has port. - [#2089](https://github.com/influxdata/telegraf/issues/2089): Fix tail input when reading from a pipe. +- [#1449](https://github.com/influxdata/telegraf/issues/1449): MongoDB plugin always shows 0 replication lag. ## v1.1.2 [2016-12-12] From 0e8122a2fca6073d00a324790f3f6cf54e7c1baf Mon Sep 17 00:00:00 2001 From: Steven Pall Date: Sat, 17 Dec 2016 07:51:46 -0500 Subject: [PATCH 0031/1302] Add trailing slash to jolokia context (#2105) --- plugins/inputs/jolokia/README.md | 3 ++- plugins/inputs/jolokia/jolokia.go | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/jolokia/README.md b/plugins/inputs/jolokia/README.md index 596dbed5f..d25ab6f46 100644 --- a/plugins/inputs/jolokia/README.md +++ b/plugins/inputs/jolokia/README.md @@ -6,7 +6,8 @@ # Read JMX metrics through Jolokia [[inputs.jolokia]] ## This is the context root used to compose the jolokia url - context = "/jolokia" + ## NOTE that Jolokia requires a trailing slash at the end of the context root + context = "/jolokia/" ## This specifies the mode used # mode = "proxy" diff --git a/plugins/inputs/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go index 6a51e9b43..812e5e66b 100644 --- a/plugins/inputs/jolokia/jolokia.go +++ b/plugins/inputs/jolokia/jolokia.go @@ -52,8 +52,9 @@ type Jolokia struct { const sampleConfig = ` ## This is the context root used to compose the jolokia url + ## NOTE that Jolokia requires a trailing slash at the end of the context root ## NOTE that your jolokia security policy must allow for POST requests. - context = "/jolokia" + context = "/jolokia/" ## This specifies the mode used # mode = "proxy" @@ -148,7 +149,7 @@ func (j *Jolokia) doRequest(req *http.Request) (map[string]interface{}, error) { func (j *Jolokia) prepareRequest(server Server, metric Metric) (*http.Request, error) { var jolokiaUrl *url.URL - context := j.Context // Usually "/jolokia" + context := j.Context // Usually "/jolokia/" // Create bodyContent bodyContent := map[string]interface{}{ From f09c08d1f3805270a4a22d7f9c9c96ea3caf6f90 Mon Sep 17 00:00:00 2001 From: Pierre Tessier Date: Mon, 7 Nov 2016 11:34:02 -0500 Subject: [PATCH 0032/1302] Added response_timeout property closes #2006 --- CHANGELOG.md | 1 + plugins/inputs/prometheus/README.md | 11 +++++++++++ plugins/inputs/prometheus/prometheus.go | 9 +++++++-- 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6bd0ee0e0..d53e46093 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,7 @@ in their config file. - [#2001](https://github.com/influxdata/telegraf/pull/2001): MongoDB input plugin: Improve state data. - [#2078](https://github.com/influxdata/telegraf/pull/2078): Ping input: add standard deviation field. - [#2121](https://github.com/influxdata/telegraf/pull/2121): Add GC pause metric to InfluxDB input plugin. +- [#2006](https://github.com/influxdata/telegraf/pull/2006): Added response_timeout property to prometheus input plugin. ### Bugfixes diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index 8298b9d27..e2b1b45ae 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -13,6 +13,17 @@ Example for Kubernetes apiserver urls = ["http://my-kube-apiserver:8080/metrics"] ``` +Specify a 10 second timeout for slower/over-loaded clients +```toml +# Get all metrics from Kube-apiserver +[[inputs.prometheus]] + # An array of urls to scrape metrics from. + urls = ["http://my-kube-apiserver:8080/metrics"] + + # Specify timeout duration for slower prometheus clients (default is 3s) + response_timeout = "10s" +``` + You can use more complex configuration to filter and some tags diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 12f7fd38e..821f71f21 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -21,6 +21,8 @@ type Prometheus struct { // Bearer Token authorization file path BearerToken string `toml:"bearer_token"` + ResponseTimeout internal.Duration `toml:"response_timeout"` + // Path to CA file SSLCA string `toml:"ssl_ca"` // Path to host cert file @@ -38,6 +40,9 @@ var sampleConfig = ` ## Use bearer token for authorization # bearer_token = /path/to/bearer/token + ## Specify timeout duration for slower prometheus clients (default is 3s) + # response_timeout = "3s" + ## Optional SSL Config # ssl_ca = /path/to/cafile # ssl_cert = /path/to/certfile @@ -105,7 +110,7 @@ func (p *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error { }).Dial, TLSHandshakeTimeout: 5 * time.Second, TLSClientConfig: tlsCfg, - ResponseHeaderTimeout: time.Duration(3 * time.Second), + ResponseHeaderTimeout: p.ResponseTimeout.Duration, DisableKeepAlives: true, } @@ -148,6 +153,6 @@ func (p *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error { func init() { inputs.Add("prometheus", func() telegraf.Input { - return &Prometheus{} + return &Prometheus{ResponseTimeout: "3s"} }) } From e36c354ff5e08696acce13e6739d8ca1433b42e8 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Sat, 17 Dec 2016 13:10:33 +0000 Subject: [PATCH 0033/1302] internal.Duration build fixup --- plugins/inputs/prometheus/prometheus.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 821f71f21..97da17f04 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -153,6 +153,6 @@ func (p *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error { func init() { inputs.Add("prometheus", func() telegraf.Input { - return &Prometheus{ResponseTimeout: "3s"} + return &Prometheus{ResponseTimeout: internal.Duration{Duration: time.Second * 3}} }) } From eb96443a341671895356fee79dfb54a7d758d69f Mon Sep 17 00:00:00 2001 From: Ken Dilley Date: Tue, 20 Dec 2016 03:17:00 -0700 Subject: [PATCH 0034/1302] Update MySQL Readme to clarify connection string examples. (#2175) * Update MySQL Readme to clarify connection string examples. * Update mysql sample config to clarify connection string examples --- plugins/inputs/mysql/README.md | 4 ++-- plugins/inputs/mysql/mysql.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/mysql/README.md b/plugins/inputs/mysql/README.md index 3599b2fb8..34bb07bef 100644 --- a/plugins/inputs/mysql/README.md +++ b/plugins/inputs/mysql/README.md @@ -25,8 +25,8 @@ This plugin gathers the statistic data from MySQL server ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name ## e.g. - ## db_user:passwd@tcp(127.0.0.1:3306)/?tls=false - ## db_user@tcp(127.0.0.1:3306)/?tls=false + ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"] + ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"] # ## If no servers are specified, then localhost is used as the host. servers = ["tcp(127.0.0.1:3306)/"] diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 5cf19f2db..54f296586 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -41,8 +41,8 @@ var sampleConfig = ` ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name ## e.g. - ## db_user:passwd@tcp(127.0.0.1:3306)/?tls=false - ## db_user@tcp(127.0.0.1:3306)/?tls=false + ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"] + ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"] # ## If no servers are specified, then localhost is used as the host. servers = ["tcp(127.0.0.1:3306)/"] From 4e808c5c20a79d35b20eb7c7ec236eede6ca85c6 Mon Sep 17 00:00:00 2001 From: Jeff Ashton Date: Wed, 9 Nov 2016 13:43:39 -0500 Subject: [PATCH 0035/1302] Importing pdh from github.com/lxn/win closes #1763 closes #2017 --- CHANGELOG.md | 1 + Godeps_windows | 1 - plugins/inputs/win_perf_counters/pdh.go | 419 ++++++++++++++++++ .../win_perf_counters/win_perf_counters.go | 45 +- 4 files changed, 442 insertions(+), 24 deletions(-) create mode 100644 plugins/inputs/win_perf_counters/pdh.go diff --git a/CHANGELOG.md b/CHANGELOG.md index d53e46093..e69a1004e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,7 @@ in their config file. - [#2078](https://github.com/influxdata/telegraf/pull/2078): Ping input: add standard deviation field. - [#2121](https://github.com/influxdata/telegraf/pull/2121): Add GC pause metric to InfluxDB input plugin. - [#2006](https://github.com/influxdata/telegraf/pull/2006): Added response_timeout property to prometheus input plugin. +- [#1763](https://github.com/influxdata/telegraf/issues/1763): Pulling github.com/lxn/win's pdh wrapper into telegraf ### Bugfixes diff --git a/Godeps_windows b/Godeps_windows index 067c98c1c..129fce1a2 100644 --- a/Godeps_windows +++ b/Godeps_windows @@ -1,7 +1,6 @@ github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34 github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5 github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7 -github.com/lxn/win 950a0e81e7678e63d8e6cd32412bdecb325ccd88 github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad golang.org/x/sys a646d33e2ee3172a661fc09bca23bb4889a41bc8 github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438 diff --git a/plugins/inputs/win_perf_counters/pdh.go b/plugins/inputs/win_perf_counters/pdh.go new file mode 100644 index 000000000..36563d6b7 --- /dev/null +++ b/plugins/inputs/win_perf_counters/pdh.go @@ -0,0 +1,419 @@ +// Copyright (c) 2010 The win Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// 3. The names of the authors may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// This is the official list of 'win' authors for copyright purposes. +// +// Alexander Neumann +// Joseph Watson +// Kevin Pors + +// +build windows + +package win_perf_counters + +import ( + "syscall" + "unsafe" +) + +// Error codes +const ( + ERROR_SUCCESS = 0 + ERROR_INVALID_FUNCTION = 1 +) + +type ( + HANDLE uintptr +) + +// PDH error codes, which can be returned by all Pdh* functions. Taken from mingw-w64 pdhmsg.h +const ( + PDH_CSTATUS_VALID_DATA = 0x00000000 // The returned data is valid. + PDH_CSTATUS_NEW_DATA = 0x00000001 // The return data value is valid and different from the last sample. + PDH_CSTATUS_NO_MACHINE = 0x800007D0 // Unable to connect to the specified computer, or the computer is offline. + PDH_CSTATUS_NO_INSTANCE = 0x800007D1 + PDH_MORE_DATA = 0x800007D2 // The PdhGetFormattedCounterArray* function can return this if there's 'more data to be displayed'. + PDH_CSTATUS_ITEM_NOT_VALIDATED = 0x800007D3 + PDH_RETRY = 0x800007D4 + PDH_NO_DATA = 0x800007D5 // The query does not currently contain any counters (for example, limited access) + PDH_CALC_NEGATIVE_DENOMINATOR = 0x800007D6 + PDH_CALC_NEGATIVE_TIMEBASE = 0x800007D7 + PDH_CALC_NEGATIVE_VALUE = 0x800007D8 + PDH_DIALOG_CANCELLED = 0x800007D9 + PDH_END_OF_LOG_FILE = 0x800007DA + PDH_ASYNC_QUERY_TIMEOUT = 0x800007DB + PDH_CANNOT_SET_DEFAULT_REALTIME_DATASOURCE = 0x800007DC + PDH_CSTATUS_NO_OBJECT = 0xC0000BB8 + PDH_CSTATUS_NO_COUNTER = 0xC0000BB9 // The specified counter could not be found. + PDH_CSTATUS_INVALID_DATA = 0xC0000BBA // The counter was successfully found, but the data returned is not valid. + PDH_MEMORY_ALLOCATION_FAILURE = 0xC0000BBB + PDH_INVALID_HANDLE = 0xC0000BBC + PDH_INVALID_ARGUMENT = 0xC0000BBD // Required argument is missing or incorrect. + PDH_FUNCTION_NOT_FOUND = 0xC0000BBE + PDH_CSTATUS_NO_COUNTERNAME = 0xC0000BBF + PDH_CSTATUS_BAD_COUNTERNAME = 0xC0000BC0 // Unable to parse the counter path. Check the format and syntax of the specified path. + PDH_INVALID_BUFFER = 0xC0000BC1 + PDH_INSUFFICIENT_BUFFER = 0xC0000BC2 + PDH_CANNOT_CONNECT_MACHINE = 0xC0000BC3 + PDH_INVALID_PATH = 0xC0000BC4 + PDH_INVALID_INSTANCE = 0xC0000BC5 + PDH_INVALID_DATA = 0xC0000BC6 // specified counter does not contain valid data or a successful status code. + PDH_NO_DIALOG_DATA = 0xC0000BC7 + PDH_CANNOT_READ_NAME_STRINGS = 0xC0000BC8 + PDH_LOG_FILE_CREATE_ERROR = 0xC0000BC9 + PDH_LOG_FILE_OPEN_ERROR = 0xC0000BCA + PDH_LOG_TYPE_NOT_FOUND = 0xC0000BCB + PDH_NO_MORE_DATA = 0xC0000BCC + PDH_ENTRY_NOT_IN_LOG_FILE = 0xC0000BCD + PDH_DATA_SOURCE_IS_LOG_FILE = 0xC0000BCE + PDH_DATA_SOURCE_IS_REAL_TIME = 0xC0000BCF + PDH_UNABLE_READ_LOG_HEADER = 0xC0000BD0 + PDH_FILE_NOT_FOUND = 0xC0000BD1 + PDH_FILE_ALREADY_EXISTS = 0xC0000BD2 + PDH_NOT_IMPLEMENTED = 0xC0000BD3 + PDH_STRING_NOT_FOUND = 0xC0000BD4 + PDH_UNABLE_MAP_NAME_FILES = 0x80000BD5 + PDH_UNKNOWN_LOG_FORMAT = 0xC0000BD6 + PDH_UNKNOWN_LOGSVC_COMMAND = 0xC0000BD7 + PDH_LOGSVC_QUERY_NOT_FOUND = 0xC0000BD8 + PDH_LOGSVC_NOT_OPENED = 0xC0000BD9 + PDH_WBEM_ERROR = 0xC0000BDA + PDH_ACCESS_DENIED = 0xC0000BDB + PDH_LOG_FILE_TOO_SMALL = 0xC0000BDC + PDH_INVALID_DATASOURCE = 0xC0000BDD + PDH_INVALID_SQLDB = 0xC0000BDE + PDH_NO_COUNTERS = 0xC0000BDF + PDH_SQL_ALLOC_FAILED = 0xC0000BE0 + PDH_SQL_ALLOCCON_FAILED = 0xC0000BE1 + PDH_SQL_EXEC_DIRECT_FAILED = 0xC0000BE2 + PDH_SQL_FETCH_FAILED = 0xC0000BE3 + PDH_SQL_ROWCOUNT_FAILED = 0xC0000BE4 + PDH_SQL_MORE_RESULTS_FAILED = 0xC0000BE5 + PDH_SQL_CONNECT_FAILED = 0xC0000BE6 + PDH_SQL_BIND_FAILED = 0xC0000BE7 + PDH_CANNOT_CONNECT_WMI_SERVER = 0xC0000BE8 + PDH_PLA_COLLECTION_ALREADY_RUNNING = 0xC0000BE9 + PDH_PLA_ERROR_SCHEDULE_OVERLAP = 0xC0000BEA + PDH_PLA_COLLECTION_NOT_FOUND = 0xC0000BEB + PDH_PLA_ERROR_SCHEDULE_ELAPSED = 0xC0000BEC + PDH_PLA_ERROR_NOSTART = 0xC0000BED + PDH_PLA_ERROR_ALREADY_EXISTS = 0xC0000BEE + PDH_PLA_ERROR_TYPE_MISMATCH = 0xC0000BEF + PDH_PLA_ERROR_FILEPATH = 0xC0000BF0 + PDH_PLA_SERVICE_ERROR = 0xC0000BF1 + PDH_PLA_VALIDATION_ERROR = 0xC0000BF2 + PDH_PLA_VALIDATION_WARNING = 0x80000BF3 + PDH_PLA_ERROR_NAME_TOO_LONG = 0xC0000BF4 + PDH_INVALID_SQL_LOG_FORMAT = 0xC0000BF5 + PDH_COUNTER_ALREADY_IN_QUERY = 0xC0000BF6 + PDH_BINARY_LOG_CORRUPT = 0xC0000BF7 + PDH_LOG_SAMPLE_TOO_SMALL = 0xC0000BF8 + PDH_OS_LATER_VERSION = 0xC0000BF9 + PDH_OS_EARLIER_VERSION = 0xC0000BFA + PDH_INCORRECT_APPEND_TIME = 0xC0000BFB + PDH_UNMATCHED_APPEND_COUNTER = 0xC0000BFC + PDH_SQL_ALTER_DETAIL_FAILED = 0xC0000BFD + PDH_QUERY_PERF_DATA_TIMEOUT = 0xC0000BFE +) + +// Formatting options for GetFormattedCounterValue(). +const ( + PDH_FMT_RAW = 0x00000010 + PDH_FMT_ANSI = 0x00000020 + PDH_FMT_UNICODE = 0x00000040 + PDH_FMT_LONG = 0x00000100 // Return data as a long int. + PDH_FMT_DOUBLE = 0x00000200 // Return data as a double precision floating point real. + PDH_FMT_LARGE = 0x00000400 // Return data as a 64 bit integer. + PDH_FMT_NOSCALE = 0x00001000 // can be OR-ed: Do not apply the counter's default scaling factor. + PDH_FMT_1000 = 0x00002000 // can be OR-ed: multiply the actual value by 1,000. + PDH_FMT_NODATA = 0x00004000 // can be OR-ed: unknown what this is for, MSDN says nothing. + PDH_FMT_NOCAP100 = 0x00008000 // can be OR-ed: do not cap values > 100. + PERF_DETAIL_COSTLY = 0x00010000 + PERF_DETAIL_STANDARD = 0x0000FFFF +) + +type ( + PDH_HQUERY HANDLE // query handle + PDH_HCOUNTER HANDLE // counter handle +) + +// Union specialization for double values +type PDH_FMT_COUNTERVALUE_DOUBLE struct { + CStatus uint32 + DoubleValue float64 +} + +// Union specialization for 64 bit integer values +type PDH_FMT_COUNTERVALUE_LARGE struct { + CStatus uint32 + LargeValue int64 +} + +// Union specialization for long values +type PDH_FMT_COUNTERVALUE_LONG struct { + CStatus uint32 + LongValue int32 + padding [4]byte +} + +// Union specialization for double values, used by PdhGetFormattedCounterArrayDouble() +type PDH_FMT_COUNTERVALUE_ITEM_DOUBLE struct { + SzName *uint16 // pointer to a string + FmtValue PDH_FMT_COUNTERVALUE_DOUBLE +} + +// Union specialization for 'large' values, used by PdhGetFormattedCounterArrayLarge() +type PDH_FMT_COUNTERVALUE_ITEM_LARGE struct { + SzName *uint16 // pointer to a string + FmtValue PDH_FMT_COUNTERVALUE_LARGE +} + +// Union specialization for long values, used by PdhGetFormattedCounterArrayLong() +type PDH_FMT_COUNTERVALUE_ITEM_LONG struct { + SzName *uint16 // pointer to a string + FmtValue PDH_FMT_COUNTERVALUE_LONG +} + +var ( + // Library + libpdhDll *syscall.DLL + + // Functions + pdh_AddCounterW *syscall.Proc + pdh_AddEnglishCounterW *syscall.Proc + pdh_CloseQuery *syscall.Proc + pdh_CollectQueryData *syscall.Proc + pdh_GetFormattedCounterValue *syscall.Proc + pdh_GetFormattedCounterArrayW *syscall.Proc + pdh_OpenQuery *syscall.Proc + pdh_ValidatePathW *syscall.Proc +) + +func init() { + // Library + libpdhDll = syscall.MustLoadDLL("pdh.dll") + + // Functions + pdh_AddCounterW = libpdhDll.MustFindProc("PdhAddCounterW") + pdh_AddEnglishCounterW, _ = libpdhDll.FindProc("PdhAddEnglishCounterW") // XXX: only supported on versions > Vista. + pdh_CloseQuery = libpdhDll.MustFindProc("PdhCloseQuery") + pdh_CollectQueryData = libpdhDll.MustFindProc("PdhCollectQueryData") + pdh_GetFormattedCounterValue = libpdhDll.MustFindProc("PdhGetFormattedCounterValue") + pdh_GetFormattedCounterArrayW = libpdhDll.MustFindProc("PdhGetFormattedCounterArrayW") + pdh_OpenQuery = libpdhDll.MustFindProc("PdhOpenQuery") + pdh_ValidatePathW = libpdhDll.MustFindProc("PdhValidatePathW") +} + +// Adds the specified counter to the query. This is the internationalized version. Preferably, use the +// function PdhAddEnglishCounter instead. hQuery is the query handle, which has been fetched by PdhOpenQuery. +// szFullCounterPath is a full, internationalized counter path (this will differ per Windows language version). +// dwUserData is a 'user-defined value', which becomes part of the counter information. To retrieve this value +// later, call PdhGetCounterInfo() and access dwQueryUserData of the PDH_COUNTER_INFO structure. +// +// Examples of szFullCounterPath (in an English version of Windows): +// +// \\Processor(_Total)\\% Idle Time +// \\Processor(_Total)\\% Processor Time +// \\LogicalDisk(C:)\% Free Space +// +// To view all (internationalized...) counters on a system, there are three non-programmatic ways: perfmon utility, +// the typeperf command, and the the registry editor. perfmon.exe is perhaps the easiest way, because it's basically a +// full implemention of the pdh.dll API, except with a GUI and all that. The registry setting also provides an +// interface to the available counters, and can be found at the following key: +// +// HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Perflib\CurrentLanguage +// +// This registry key contains several values as follows: +// +// 1 +// 1847 +// 2 +// System +// 4 +// Memory +// 6 +// % Processor Time +// ... many, many more +// +// Somehow, these numeric values can be used as szFullCounterPath too: +// +// \2\6 will correspond to \\System\% Processor Time +// +// The typeperf command may also be pretty easy. To find all performance counters, simply execute: +// +// typeperf -qx +func PdhAddCounter(hQuery PDH_HQUERY, szFullCounterPath string, dwUserData uintptr, phCounter *PDH_HCOUNTER) uint32 { + ptxt, _ := syscall.UTF16PtrFromString(szFullCounterPath) + ret, _, _ := pdh_AddCounterW.Call( + uintptr(hQuery), + uintptr(unsafe.Pointer(ptxt)), + dwUserData, + uintptr(unsafe.Pointer(phCounter))) + + return uint32(ret) +} + +// Adds the specified language-neutral counter to the query. See the PdhAddCounter function. This function only exists on +// Windows versions higher than Vista. +func PdhAddEnglishCounter(hQuery PDH_HQUERY, szFullCounterPath string, dwUserData uintptr, phCounter *PDH_HCOUNTER) uint32 { + if pdh_AddEnglishCounterW == nil { + return ERROR_INVALID_FUNCTION + } + + ptxt, _ := syscall.UTF16PtrFromString(szFullCounterPath) + ret, _, _ := pdh_AddEnglishCounterW.Call( + uintptr(hQuery), + uintptr(unsafe.Pointer(ptxt)), + dwUserData, + uintptr(unsafe.Pointer(phCounter))) + + return uint32(ret) +} + +// Closes all counters contained in the specified query, closes all handles related to the query, +// and frees all memory associated with the query. +func PdhCloseQuery(hQuery PDH_HQUERY) uint32 { + ret, _, _ := pdh_CloseQuery.Call(uintptr(hQuery)) + + return uint32(ret) +} + +// Collects the current raw data value for all counters in the specified query and updates the status +// code of each counter. With some counters, this function needs to be repeatedly called before the value +// of the counter can be extracted with PdhGetFormattedCounterValue(). For example, the following code +// requires at least two calls: +// +// var handle win.PDH_HQUERY +// var counterHandle win.PDH_HCOUNTER +// ret := win.PdhOpenQuery(0, 0, &handle) +// ret = win.PdhAddEnglishCounter(handle, "\\Processor(_Total)\\% Idle Time", 0, &counterHandle) +// var derp win.PDH_FMT_COUNTERVALUE_DOUBLE +// +// ret = win.PdhCollectQueryData(handle) +// fmt.Printf("Collect return code is %x\n", ret) // return code will be PDH_CSTATUS_INVALID_DATA +// ret = win.PdhGetFormattedCounterValueDouble(counterHandle, 0, &derp) +// +// ret = win.PdhCollectQueryData(handle) +// fmt.Printf("Collect return code is %x\n", ret) // return code will be ERROR_SUCCESS +// ret = win.PdhGetFormattedCounterValueDouble(counterHandle, 0, &derp) +// +// The PdhCollectQueryData will return an error in the first call because it needs two values for +// displaying the correct data for the processor idle time. The second call will have a 0 return code. +func PdhCollectQueryData(hQuery PDH_HQUERY) uint32 { + ret, _, _ := pdh_CollectQueryData.Call(uintptr(hQuery)) + + return uint32(ret) +} + +// Formats the given hCounter using a 'double'. The result is set into the specialized union struct pValue. +// This function does not directly translate to a Windows counterpart due to union specialization tricks. +func PdhGetFormattedCounterValueDouble(hCounter PDH_HCOUNTER, lpdwType *uint32, pValue *PDH_FMT_COUNTERVALUE_DOUBLE) uint32 { + ret, _, _ := pdh_GetFormattedCounterValue.Call( + uintptr(hCounter), + uintptr(PDH_FMT_DOUBLE), + uintptr(unsafe.Pointer(lpdwType)), + uintptr(unsafe.Pointer(pValue))) + + return uint32(ret) +} + +// Returns an array of formatted counter values. Use this function when you want to format the counter values of a +// counter that contains a wildcard character for the instance name. The itemBuffer must a slice of type PDH_FMT_COUNTERVALUE_ITEM_DOUBLE. +// An example of how this function can be used: +// +// okPath := "\\Process(*)\\% Processor Time" // notice the wildcard * character +// +// // ommitted all necessary stuff ... +// +// var bufSize uint32 +// var bufCount uint32 +// var size uint32 = uint32(unsafe.Sizeof(win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE{})) +// var emptyBuf [1]win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE // need at least 1 addressable null ptr. +// +// for { +// // collect +// ret := win.PdhCollectQueryData(queryHandle) +// if ret == win.ERROR_SUCCESS { +// ret = win.PdhGetFormattedCounterArrayDouble(counterHandle, &bufSize, &bufCount, &emptyBuf[0]) // uses null ptr here according to MSDN. +// if ret == win.PDH_MORE_DATA { +// filledBuf := make([]win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE, bufCount*size) +// ret = win.PdhGetFormattedCounterArrayDouble(counterHandle, &bufSize, &bufCount, &filledBuf[0]) +// for i := 0; i < int(bufCount); i++ { +// c := filledBuf[i] +// var s string = win.UTF16PtrToString(c.SzName) +// fmt.Printf("Index %d -> %s, value %v\n", i, s, c.FmtValue.DoubleValue) +// } +// +// filledBuf = nil +// // Need to at least set bufSize to zero, because if not, the function will not +// // return PDH_MORE_DATA and will not set the bufSize. +// bufCount = 0 +// bufSize = 0 +// } +// +// time.Sleep(2000 * time.Millisecond) +// } +// } +func PdhGetFormattedCounterArrayDouble(hCounter PDH_HCOUNTER, lpdwBufferSize *uint32, lpdwBufferCount *uint32, itemBuffer *PDH_FMT_COUNTERVALUE_ITEM_DOUBLE) uint32 { + ret, _, _ := pdh_GetFormattedCounterArrayW.Call( + uintptr(hCounter), + uintptr(PDH_FMT_DOUBLE), + uintptr(unsafe.Pointer(lpdwBufferSize)), + uintptr(unsafe.Pointer(lpdwBufferCount)), + uintptr(unsafe.Pointer(itemBuffer))) + + return uint32(ret) +} + +// Creates a new query that is used to manage the collection of performance data. +// szDataSource is a null terminated string that specifies the name of the log file from which to +// retrieve the performance data. If 0, performance data is collected from a real-time data source. +// dwUserData is a user-defined value to associate with this query. To retrieve the user data later, +// call PdhGetCounterInfo and access dwQueryUserData of the PDH_COUNTER_INFO structure. phQuery is +// the handle to the query, and must be used in subsequent calls. This function returns a PDH_ +// constant error code, or ERROR_SUCCESS if the call succeeded. +func PdhOpenQuery(szDataSource uintptr, dwUserData uintptr, phQuery *PDH_HQUERY) uint32 { + ret, _, _ := pdh_OpenQuery.Call( + szDataSource, + dwUserData, + uintptr(unsafe.Pointer(phQuery))) + + return uint32(ret) +} + +// Validates a path. Will return ERROR_SUCCESS when ok, or PDH_CSTATUS_BAD_COUNTERNAME when the path is +// erroneous. +func PdhValidatePath(path string) uint32 { + ptxt, _ := syscall.UTF16PtrFromString(path) + ret, _, _ := pdh_ValidatePathW.Call(uintptr(unsafe.Pointer(ptxt))) + + return uint32(ret) +} + +func UTF16PtrToString(s *uint16) string { + if s == nil { + return "" + } + return syscall.UTF16ToString((*[1 << 29]uint16)(unsafe.Pointer(s))[0:]) +} diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index 1f233a3d4..691f67a01 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -10,7 +10,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/lxn/win" ) var sampleConfig string = ` @@ -103,8 +102,8 @@ type item struct { instance string measurement string include_total bool - handle win.PDH_HQUERY - counterHandle win.PDH_HCOUNTER + handle PDH_HQUERY + counterHandle PDH_HCOUNTER } var sanitizedChars = strings.NewReplacer("/sec", "_persec", "/Sec", "_persec", @@ -113,10 +112,10 @@ var sanitizedChars = strings.NewReplacer("/sec", "_persec", "/Sec", "_persec", func (m *Win_PerfCounters) AddItem(metrics *itemList, query string, objectName string, counter string, instance string, measurement string, include_total bool) { - var handle win.PDH_HQUERY - var counterHandle win.PDH_HCOUNTER - ret := win.PdhOpenQuery(0, 0, &handle) - ret = win.PdhAddCounter(handle, query, 0, &counterHandle) + var handle PDH_HQUERY + var counterHandle PDH_HCOUNTER + ret := PdhOpenQuery(0, 0, &handle) + ret = PdhAddCounter(handle, query, 0, &counterHandle) _ = ret temp := &item{query, objectName, counter, instance, measurement, @@ -131,14 +130,14 @@ func (m *Win_PerfCounters) AddItem(metrics *itemList, query string, objectName s } func (m *Win_PerfCounters) InvalidObject(exists uint32, query string, PerfObject perfobject, instance string, counter string) error { - if exists == 3221228472 { // win.PDH_CSTATUS_NO_OBJECT + if exists == 3221228472 { // PDH_CSTATUS_NO_OBJECT if PerfObject.FailOnMissing { err := errors.New("Performance object does not exist") return err } else { fmt.Printf("Performance Object '%s' does not exist in query: %s\n", PerfObject.ObjectName, query) } - } else if exists == 3221228473 { //win.PDH_CSTATUS_NO_COUNTER + } else if exists == 3221228473 { // PDH_CSTATUS_NO_COUNTER if PerfObject.FailOnMissing { err := errors.New("Counter in Performance object does not exist") @@ -146,7 +145,7 @@ func (m *Win_PerfCounters) InvalidObject(exists uint32, query string, PerfObject } else { fmt.Printf("Counter '%s' does not exist in query: %s\n", counter, query) } - } else if exists == 2147485649 { //win.PDH_CSTATUS_NO_INSTANCE + } else if exists == 2147485649 { // PDH_CSTATUS_NO_INSTANCE if PerfObject.FailOnMissing { err := errors.New("Instance in Performance object does not exist") return err @@ -189,9 +188,9 @@ func (m *Win_PerfCounters) ParseConfig(metrics *itemList) error { query = "\\" + objectname + "(" + instance + ")\\" + counter } - var exists uint32 = win.PdhValidatePath(query) + var exists uint32 = PdhValidatePath(query) - if exists == win.ERROR_SUCCESS { + if exists == ERROR_SUCCESS { if m.PrintValid { fmt.Printf("Valid: %s\n", query) } @@ -218,7 +217,7 @@ func (m *Win_PerfCounters) Cleanup(metrics *itemList) { // Cleanup for _, metric := range metrics.items { - ret := win.PdhCloseQuery(metric.handle) + ret := PdhCloseQuery(metric.handle) _ = ret } } @@ -227,7 +226,7 @@ func (m *Win_PerfCounters) CleanupTestMode() { // Cleanup for the testmode. for _, metric := range gItemList { - ret := win.PdhCloseQuery(metric.handle) + ret := PdhCloseQuery(metric.handle) _ = ret } } @@ -256,26 +255,26 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error { var bufSize uint32 var bufCount uint32 - var size uint32 = uint32(unsafe.Sizeof(win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE{})) - var emptyBuf [1]win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE // need at least 1 addressable null ptr. + var size uint32 = uint32(unsafe.Sizeof(PDH_FMT_COUNTERVALUE_ITEM_DOUBLE{})) + var emptyBuf [1]PDH_FMT_COUNTERVALUE_ITEM_DOUBLE // need at least 1 addressable null ptr. // For iterate over the known metrics and get the samples. for _, metric := range gItemList { // collect - ret := win.PdhCollectQueryData(metric.handle) - if ret == win.ERROR_SUCCESS { - ret = win.PdhGetFormattedCounterArrayDouble(metric.counterHandle, &bufSize, + ret := PdhCollectQueryData(metric.handle) + if ret == ERROR_SUCCESS { + ret = PdhGetFormattedCounterArrayDouble(metric.counterHandle, &bufSize, &bufCount, &emptyBuf[0]) // uses null ptr here according to MSDN. - if ret == win.PDH_MORE_DATA { - filledBuf := make([]win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE, bufCount*size) + if ret == PDH_MORE_DATA { + filledBuf := make([]PDH_FMT_COUNTERVALUE_ITEM_DOUBLE, bufCount*size) if len(filledBuf) == 0 { continue } - ret = win.PdhGetFormattedCounterArrayDouble(metric.counterHandle, + ret = PdhGetFormattedCounterArrayDouble(metric.counterHandle, &bufSize, &bufCount, &filledBuf[0]) for i := 0; i < int(bufCount); i++ { c := filledBuf[i] - var s string = win.UTF16PtrToString(c.SzName) + var s string = UTF16PtrToString(c.SzName) var add bool From 48ae105a11fb2bdd2e05d853c601dc48eb701664 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Harasimowicz?= Date: Tue, 20 Dec 2016 14:03:31 +0100 Subject: [PATCH 0036/1302] Fixing consul with multiple health checks per service (#1994) * plugins/input/consul: moved check_id from regular fields to tags. When service has more than one check sending data for both would overwrite each other resulting only in one check being written (the last one). Adding check_id as a tag ensures we will get info for all unique checks per service. * plugins/inputs/consul: updated tests --- plugins/inputs/consul/README.md | 6 +++--- plugins/inputs/consul/consul.go | 2 +- plugins/inputs/consul/consul_test.go | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/consul/README.md b/plugins/inputs/consul/README.md index a2685e2bf..01a39cbf7 100644 --- a/plugins/inputs/consul/README.md +++ b/plugins/inputs/consul/README.md @@ -29,9 +29,9 @@ to query the data. It will not report the [telemetry](https://www.consul.io/docs Tags: - node: on which node check/service is registered on - service_name: name of the service (this is the service name not the service ID) +- check_id Fields: -- check_id - check_name - service_id - status @@ -41,6 +41,6 @@ Fields: ``` $ telegraf --config ./telegraf.conf -input-filter consul -test * Plugin: consul, Collection 1 -> consul_health_checks,host=wolfpit,node=consul-server-node check_id="serfHealth",check_name="Serf Health Status",service_id="",status="passing" 1464698464486439902 -> consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com check_id="service:www-example-com.test01",check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical" 1464698464486519036 +> consul_health_checks,host=wolfpit,node=consul-server-node,check_id="serfHealth" check_name="Serf Health Status",service_id="",status="passing" 1464698464486439902 +> consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com,check_id="service:www-example-com.test01" check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical" 1464698464486519036 ``` diff --git a/plugins/inputs/consul/consul.go b/plugins/inputs/consul/consul.go index eaeae73c1..4c28f4d12 100644 --- a/plugins/inputs/consul/consul.go +++ b/plugins/inputs/consul/consul.go @@ -95,13 +95,13 @@ func (c *Consul) GatherHealthCheck(acc telegraf.Accumulator, checks []*api.Healt record := make(map[string]interface{}) tags := make(map[string]string) - record["check_id"] = check.CheckID record["check_name"] = check.Name record["service_id"] = check.ServiceID record["status"] = check.Status tags["node"] = check.Node tags["service_name"] = check.ServiceName + tags["check_id"] = check.CheckID acc.AddFields("consul_health_checks", record, tags) } diff --git a/plugins/inputs/consul/consul_test.go b/plugins/inputs/consul/consul_test.go index 772ccba91..f970d4449 100644 --- a/plugins/inputs/consul/consul_test.go +++ b/plugins/inputs/consul/consul_test.go @@ -22,7 +22,6 @@ var sampleChecks = []*api.HealthCheck{ func TestGatherHealtCheck(t *testing.T) { expectedFields := map[string]interface{}{ - "check_id": "foo.health123", "check_name": "foo.health", "status": "passing", "service_id": "foo.123", @@ -31,6 +30,7 @@ func TestGatherHealtCheck(t *testing.T) { expectedTags := map[string]string{ "node": "localhost", "service_name": "foo", + "check_id": "foo.health123", } var acc testutil.Accumulator From 8df325a68c9b771a64770969e99500ad751c3384 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 20 Dec 2016 13:04:51 +0000 Subject: [PATCH 0037/1302] changelog update --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e69a1004e..751ee1eaa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ in their config file. - [#1987](https://github.com/influxdata/telegraf/issues/1987): fix docker input plugin tags when registry has port. - [#2089](https://github.com/influxdata/telegraf/issues/2089): Fix tail input when reading from a pipe. - [#1449](https://github.com/influxdata/telegraf/issues/1449): MongoDB plugin always shows 0 replication lag. +- [#1825](https://github.com/influxdata/telegraf/issues/1825): Consul plugin: add check_id as a tag in metrics to avoid overwrites. ## v1.1.2 [2016-12-12] From 986614654571ea62c791c0b5042b292cd3deb92d Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 20 Dec 2016 13:21:51 +0000 Subject: [PATCH 0038/1302] Support negative statsd counters closes #1898 --- CHANGELOG.md | 3 ++- plugins/inputs/statsd/statsd.go | 4 ++-- plugins/inputs/statsd/statsd_test.go | 8 ++++++-- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 751ee1eaa..b228a891d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,7 +29,8 @@ in their config file. - [#2078](https://github.com/influxdata/telegraf/pull/2078): Ping input: add standard deviation field. - [#2121](https://github.com/influxdata/telegraf/pull/2121): Add GC pause metric to InfluxDB input plugin. - [#2006](https://github.com/influxdata/telegraf/pull/2006): Added response_timeout property to prometheus input plugin. -- [#1763](https://github.com/influxdata/telegraf/issues/1763): Pulling github.com/lxn/win's pdh wrapper into telegraf +- [#1763](https://github.com/influxdata/telegraf/issues/1763): Pulling github.com/lxn/win's pdh wrapper into telegraf. +- [#1898](https://github.com/influxdata/telegraf/issues/1898): Support negative statsd counters. ### Bugfixes diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 75dfc915e..1b0189a9e 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -426,8 +426,8 @@ func (s *Statsd) parseStatsdLine(line string) error { // Parse the value if strings.HasPrefix(pipesplit[0], "-") || strings.HasPrefix(pipesplit[0], "+") { - if m.mtype != "g" { - log.Printf("E! Error: +- values are only supported for gauges: %s\n", line) + if m.mtype != "g" && m.mtype != "c" { + log.Printf("E! Error: +- values are only supported for gauges & counters: %s\n", line) return errors.New("Error Parsing statsd line") } m.additive = true diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index 9fbaf5372..ff3002d2c 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -197,6 +197,8 @@ func TestParse_Counters(t *testing.T) { "sample.rate:1|c|@0.1", "sample.rate:1|c", "scientific.notation:4.696E+5|c", + "negative.test:100|c", + "negative.test:-5|c", } for _, line := range valid_lines { @@ -230,6 +232,10 @@ func TestParse_Counters(t *testing.T) { "sample_rate", 11, }, + { + "negative_test", + 95, + }, } for _, test := range validations { @@ -299,11 +305,9 @@ func TestParse_InvalidLines(t *testing.T) { "i.dont.have.a.pipe:45g", "i.dont.have.a.colon45|c", "invalid.metric.type:45|e", - "invalid.plus.minus.non.gauge:+10|c", "invalid.plus.minus.non.gauge:+10|s", "invalid.plus.minus.non.gauge:+10|ms", "invalid.plus.minus.non.gauge:+10|h", - "invalid.plus.minus.non.gauge:-10|c", "invalid.value:foobar|c", "invalid.value:d11|c", "invalid.value:1d1|c", From a90afd95c6ff204f6ee6f4544a34f379fceec8a0 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 20 Dec 2016 15:57:32 +0000 Subject: [PATCH 0039/1302] Fix & unit test logparser CLF pattern with IPv6 deals partially with #1973 see also https://github.com/vjeantet/grok/issues/17 --- CHANGELOG.md | 1 + plugins/inputs/logparser/grok/grok_test.go | 40 +++++++++++++++++++ .../inputs/logparser/grok/influx_patterns.go | 2 +- 3 files changed, 42 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b228a891d..a444b19ca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -50,6 +50,7 @@ in their config file. - [#2089](https://github.com/influxdata/telegraf/issues/2089): Fix tail input when reading from a pipe. - [#1449](https://github.com/influxdata/telegraf/issues/1449): MongoDB plugin always shows 0 replication lag. - [#1825](https://github.com/influxdata/telegraf/issues/1825): Consul plugin: add check_id as a tag in metrics to avoid overwrites. +- [#1973](https://github.com/influxdata/telegraf/issues/1973): Partial fix: logparser CLF pattern with IPv6 addresses. ## v1.1.2 [2016-12-12] diff --git a/plugins/inputs/logparser/grok/grok_test.go b/plugins/inputs/logparser/grok/grok_test.go index 105cc048c..1344896b8 100644 --- a/plugins/inputs/logparser/grok/grok_test.go +++ b/plugins/inputs/logparser/grok/grok_test.go @@ -82,6 +82,46 @@ func TestMeasurementName(t *testing.T) { assert.Equal(t, "my_web_log", m.Name()) } +func TestCLF_IPv6(t *testing.T) { + p := &Parser{ + Measurement: "my_web_log", + Patterns: []string{"%{COMMON_LOG_FORMAT}"}, + } + assert.NoError(t, p.Compile()) + + m, err := p.ParseLine(`2001:0db8:85a3:0000:0000:8a2e:0370:7334 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`) + require.NotNil(t, m) + assert.NoError(t, err) + assert.Equal(t, + map[string]interface{}{ + "resp_bytes": int64(2326), + "auth": "frank", + "client_ip": "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + "http_version": float64(1.0), + "ident": "user-identifier", + "request": "/apache_pb.gif", + }, + m.Fields()) + assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) + assert.Equal(t, "my_web_log", m.Name()) + + m, err = p.ParseLine(`::1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`) + require.NotNil(t, m) + assert.NoError(t, err) + assert.Equal(t, + map[string]interface{}{ + "resp_bytes": int64(2326), + "auth": "frank", + "client_ip": "::1", + "http_version": float64(1.0), + "ident": "user-identifier", + "request": "/apache_pb.gif", + }, + m.Fields()) + assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) + assert.Equal(t, "my_web_log", m.Name()) +} + func TestCustomInfluxdbHttpd(t *testing.T) { p := &Parser{ Patterns: []string{`\[httpd\] %{COMBINED_LOG_FORMAT} %{UUID:uuid:drop} %{NUMBER:response_time_us:int}`}, diff --git a/plugins/inputs/logparser/grok/influx_patterns.go b/plugins/inputs/logparser/grok/influx_patterns.go index 052791140..1b115dadd 100644 --- a/plugins/inputs/logparser/grok/influx_patterns.go +++ b/plugins/inputs/logparser/grok/influx_patterns.go @@ -56,7 +56,7 @@ EXAMPLE_LOG \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE} NGUSERNAME [a-zA-Z0-9\.\@\-\+_%]+ NGUSER %{NGUSERNAME} # Wider-ranging client IP matching -CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1) +CLIENT (?:%{IPV6}|%{IPV4}|%{HOSTNAME}|%{HOSTPORT}) ## ## COMMON LOG PATTERNS From 1392e73125a3d08869ab16379f0ebb4bc12ca365 Mon Sep 17 00:00:00 2001 From: Matt O'Hara Date: Tue, 20 Dec 2016 10:30:03 -0600 Subject: [PATCH 0040/1302] Add clusterstats to elasticsearch plugin (#1979) * add clusterstats to elasticsearch input plugin * add clusterstats to elasticsearch input plugin * add clusterstats to elasticsearch input plugin * add clusterstats to elasticsearch input plugin * add clusterstats to elasticsearch input plugin * responses to requested changes * remove unnecessary recommendation --- etc/telegraf.conf | 11 +- plugins/inputs/elasticsearch/README.md | 14 +- plugins/inputs/elasticsearch/elasticsearch.go | 167 ++++++++-- .../elasticsearch/elasticsearch_test.go | 142 +++++++-- plugins/inputs/elasticsearch/testdata_test.go | 292 +++++++++++++++++- plugins/parsers/json/parser.go | 33 +- 6 files changed, 583 insertions(+), 76 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index a7b903388..a6058434c 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -784,13 +784,18 @@ # ## Timeout for HTTP requests to the elastic search server(s) # http_timeout = "5s" # -# ## set local to false when you want to read the indices stats from all nodes -# ## within the cluster +# ## When local is true (the default), the node will read only its own stats. +# ## Set local to false when you want to read the node stats from all nodes +# ## of the cluster. # local = true # -# ## set cluster_health to true when you want to also obtain cluster level stats +# ## set cluster_health to true when you want to also obtain cluster health stats # cluster_health = false # +# ## Set cluster_stats to true when you want to obtain cluster stats from the +# ## Master node. +# cluster_stats = false + # ## Optional SSL Config # # ssl_ca = "/etc/telegraf/ca.pem" # # ssl_cert = "/etc/telegraf/cert.pem" diff --git a/plugins/inputs/elasticsearch/README.md b/plugins/inputs/elasticsearch/README.md index 2cf6f4d77..9cf9b9b09 100644 --- a/plugins/inputs/elasticsearch/README.md +++ b/plugins/inputs/elasticsearch/README.md @@ -2,7 +2,8 @@ The [elasticsearch](https://www.elastic.co/) plugin queries endpoints to obtain [node](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) -and optionally [cluster](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) stats. +and optionally [cluster-health](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) +or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) metrics. ### Configuration: @@ -14,13 +15,18 @@ and optionally [cluster](https://www.elastic.co/guide/en/elasticsearch/reference ## Timeout for HTTP requests to the elastic search server(s) http_timeout = "5s" - ## set local to false when you want to read the indices stats from all nodes - ## within the cluster + ## When local is true (the default), the node will read only its own stats. + ## Set local to false when you want to read the node stats from all nodes + ## of the cluster. local = true - ## set cluster_health to true when you want to also obtain cluster level stats + ## Set cluster_health to true when you want to also obtain cluster health stats cluster_health = false + ## Set cluster_stats to true when you want to obtain cluster stats from the + ## Master node. + cluster_stats = false + ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" # ssl_cert = "/etc/telegraf/cert.pem" diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index cce3d94ff..5d5d64909 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -12,13 +12,15 @@ import ( "github.com/influxdata/telegraf/internal/errchan" "github.com/influxdata/telegraf/plugins/inputs" jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" + "io/ioutil" + "strings" ) +// Nodestats are always generated, so simply define a constant for these endpoints const statsPath = "/_nodes/stats" const statsPathLocal = "/_nodes/_local/stats" -const healthPath = "/_cluster/health" -type node struct { +type nodeStat struct { Host string `json:"host"` Name string `json:"name"` Attributes map[string]string `json:"attributes"` @@ -58,6 +60,20 @@ type indexHealth struct { UnassignedShards int `json:"unassigned_shards"` } +type clusterStats struct { + NodeName string `json:"node_name"` + ClusterName string `json:"cluster_name"` + Status string `json:"status"` + Indices interface{} `json:"indices"` + Nodes interface{} `json:"nodes"` +} + +type catMaster struct { + NodeID string `json:"id"` + NodeIP string `json:"ip"` + NodeName string `json:"node"` +} + const sampleConfig = ` ## specify a list of one or more Elasticsearch servers # you can add username and password to your url to use basic authentication: @@ -67,13 +83,18 @@ const sampleConfig = ` ## Timeout for HTTP requests to the elastic search server(s) http_timeout = "5s" - ## set local to false when you want to read the indices stats from all nodes - ## within the cluster + ## When local is true (the default), the node will read only its own stats. + ## Set local to false when you want to read the node stats from all nodes + ## of the cluster. local = true - ## set cluster_health to true when you want to also obtain cluster level stats + ## Set cluster_health to true when you want to also obtain cluster health stats cluster_health = false + ## Set cluster_stats to true when you want to also obtain cluster stats from the + ## Master node. + cluster_stats = false + ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" # ssl_cert = "/etc/telegraf/cert.pem" @@ -85,15 +106,18 @@ const sampleConfig = ` // Elasticsearch is a plugin to read stats from one or many Elasticsearch // servers. type Elasticsearch struct { - Local bool - Servers []string - HttpTimeout internal.Duration - ClusterHealth bool - SSLCA string `toml:"ssl_ca"` // Path to CA file - SSLCert string `toml:"ssl_cert"` // Path to host cert file - SSLKey string `toml:"ssl_key"` // Path to cert key file - InsecureSkipVerify bool // Use SSL but skip chain & host verification - client *http.Client + Local bool + Servers []string + HttpTimeout internal.Duration + ClusterHealth bool + ClusterStats bool + SSLCA string `toml:"ssl_ca"` // Path to CA file + SSLCert string `toml:"ssl_cert"` // Path to host cert file + SSLKey string `toml:"ssl_key"` // Path to cert key file + InsecureSkipVerify bool // Use SSL but skip chain & host verification + client *http.Client + catMasterResponseTokens []string + isMaster bool } // NewElasticsearch return a new instance of Elasticsearch @@ -138,12 +162,27 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { } else { url = s + statsPath } + e.isMaster = false + + if e.ClusterStats { + // get cat/master information here so NodeStats can determine + // whether this node is the Master + e.setCatMaster(s + "/_cat/master") + } + + // Always gather node states if err := e.gatherNodeStats(url, acc); err != nil { errChan.C <- err return } + if e.ClusterHealth { - e.gatherClusterStats(fmt.Sprintf("%s/_cluster/health?level=indices", s), acc) + url = s + "/_cluster/health?level=indices" + e.gatherClusterHealth(url, acc) + } + + if e.ClusterStats && e.isMaster { + e.gatherClusterStats(s+"/_cluster/stats", acc) } }(serv, acc) } @@ -171,12 +210,13 @@ func (e *Elasticsearch) createHttpClient() (*http.Client, error) { func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) error { nodeStats := &struct { - ClusterName string `json:"cluster_name"` - Nodes map[string]*node `json:"nodes"` + ClusterName string `json:"cluster_name"` + Nodes map[string]*nodeStat `json:"nodes"` }{} - if err := e.gatherData(url, nodeStats); err != nil { + if err := e.gatherJsonData(url, nodeStats); err != nil { return err } + for id, n := range nodeStats.Nodes { tags := map[string]string{ "node_id": id, @@ -185,6 +225,11 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er "cluster_name": nodeStats.ClusterName, } + if e.ClusterStats { + // check for master + e.isMaster = (id == e.catMasterResponseTokens[0]) + } + for k, v := range n.Attributes { tags["node_attribute_"+k] = v } @@ -204,6 +249,7 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er now := time.Now() for p, s := range stats { f := jsonparser.JSONFlattener{} + // parse Json, ignoring strings and bools err := f.FlattenJSON("", s) if err != nil { return err @@ -214,31 +260,31 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er return nil } -func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator) error { - clusterStats := &clusterHealth{} - if err := e.gatherData(url, clusterStats); err != nil { +func (e *Elasticsearch) gatherClusterHealth(url string, acc telegraf.Accumulator) error { + healthStats := &clusterHealth{} + if err := e.gatherJsonData(url, healthStats); err != nil { return err } measurementTime := time.Now() clusterFields := map[string]interface{}{ - "status": clusterStats.Status, - "timed_out": clusterStats.TimedOut, - "number_of_nodes": clusterStats.NumberOfNodes, - "number_of_data_nodes": clusterStats.NumberOfDataNodes, - "active_primary_shards": clusterStats.ActivePrimaryShards, - "active_shards": clusterStats.ActiveShards, - "relocating_shards": clusterStats.RelocatingShards, - "initializing_shards": clusterStats.InitializingShards, - "unassigned_shards": clusterStats.UnassignedShards, + "status": healthStats.Status, + "timed_out": healthStats.TimedOut, + "number_of_nodes": healthStats.NumberOfNodes, + "number_of_data_nodes": healthStats.NumberOfDataNodes, + "active_primary_shards": healthStats.ActivePrimaryShards, + "active_shards": healthStats.ActiveShards, + "relocating_shards": healthStats.RelocatingShards, + "initializing_shards": healthStats.InitializingShards, + "unassigned_shards": healthStats.UnassignedShards, } acc.AddFields( "elasticsearch_cluster_health", clusterFields, - map[string]string{"name": clusterStats.ClusterName}, + map[string]string{"name": healthStats.ClusterName}, measurementTime, ) - for name, health := range clusterStats.Indices { + for name, health := range healthStats.Indices { indexFields := map[string]interface{}{ "status": health.Status, "number_of_shards": health.NumberOfShards, @@ -259,7 +305,60 @@ func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator) return nil } -func (e *Elasticsearch) gatherData(url string, v interface{}) error { +func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator) error { + clusterStats := &clusterStats{} + if err := e.gatherJsonData(url, clusterStats); err != nil { + return err + } + now := time.Now() + tags := map[string]string{ + "node_name": clusterStats.NodeName, + "cluster_name": clusterStats.ClusterName, + "status": clusterStats.Status, + } + + stats := map[string]interface{}{ + "nodes": clusterStats.Nodes, + "indices": clusterStats.Indices, + } + + for p, s := range stats { + f := jsonparser.JSONFlattener{} + // parse json, including bools and strings + err := f.FullFlattenJSON("", s, true, true) + if err != nil { + return err + } + acc.AddFields("elasticsearch_clusterstats_"+p, f.Fields, tags, now) + } + + return nil +} + +func (e *Elasticsearch) setCatMaster(url string) error { + r, err := e.client.Get(url) + if err != nil { + return err + } + defer r.Body.Close() + if r.StatusCode != http.StatusOK { + // NOTE: we are not going to read/discard r.Body under the assumption we'd prefer + // to let the underlying transport close the connection and re-establish a new one for + // future calls. + return fmt.Errorf("status-code %d, expected %d", r.StatusCode, http.StatusOK) + } + response, err := ioutil.ReadAll(r.Body) + + if err != nil { + return err + } + + e.catMasterResponseTokens = strings.Split(string(response), " ") + + return nil +} + +func (e *Elasticsearch) gatherJsonData(url string, v interface{}) error { r, err := e.client.Get(url) if err != nil { return err @@ -272,9 +371,11 @@ func (e *Elasticsearch) gatherData(url string, v interface{}) error { return fmt.Errorf("elasticsearch: API responded with status-code %d, expected %d", r.StatusCode, http.StatusOK) } + if err = json.NewDecoder(r.Body).Decode(v); err != nil { return err } + return nil } diff --git a/plugins/inputs/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go index 760ac921b..59caa4306 100644 --- a/plugins/inputs/elasticsearch/elasticsearch_test.go +++ b/plugins/inputs/elasticsearch/elasticsearch_test.go @@ -8,6 +8,8 @@ import ( "github.com/influxdata/telegraf/testutil" + "fmt" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -37,16 +39,13 @@ func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) { func (t *transportMock) CancelRequest(_ *http.Request) { } -func TestElasticsearch(t *testing.T) { - es := newElasticsearchWithClient() - es.Servers = []string{"http://example.com:9200"} - es.client.Transport = newTransportMock(http.StatusOK, statsResponse) - - var acc testutil.Accumulator - if err := es.Gather(&acc); err != nil { - t.Fatal(err) +func checkIsMaster(es *Elasticsearch, expected bool, t *testing.T) { + if es.isMaster != expected { + msg := fmt.Sprintf("IsMaster set incorrectly") + assert.Fail(t, msg) } - +} +func checkNodeStatsResult(t *testing.T, acc *testutil.Accumulator) { tags := map[string]string{ "cluster_name": "es-testcluster", "node_attribute_master": "true", @@ -55,25 +54,55 @@ func TestElasticsearch(t *testing.T) { "node_host": "test", } - acc.AssertContainsTaggedFields(t, "elasticsearch_indices", indicesExpected, tags) - acc.AssertContainsTaggedFields(t, "elasticsearch_os", osExpected, tags) - acc.AssertContainsTaggedFields(t, "elasticsearch_process", processExpected, tags) - acc.AssertContainsTaggedFields(t, "elasticsearch_jvm", jvmExpected, tags) - acc.AssertContainsTaggedFields(t, "elasticsearch_thread_pool", threadPoolExpected, tags) - acc.AssertContainsTaggedFields(t, "elasticsearch_fs", fsExpected, tags) - acc.AssertContainsTaggedFields(t, "elasticsearch_transport", transportExpected, tags) - acc.AssertContainsTaggedFields(t, "elasticsearch_http", httpExpected, tags) - acc.AssertContainsTaggedFields(t, "elasticsearch_breakers", breakersExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_indices", nodestatsIndicesExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_os", nodestatsOsExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_process", nodestatsProcessExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_jvm", nodestatsJvmExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_thread_pool", nodestatsThreadPoolExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_fs", nodestatsFsExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_transport", nodestatsTransportExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_http", nodestatsHttpExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_breakers", nodestatsBreakersExpected, tags) } -func TestGatherClusterStats(t *testing.T) { +func TestGather(t *testing.T) { + es := newElasticsearchWithClient() + es.Servers = []string{"http://example.com:9200"} + es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse) + + var acc testutil.Accumulator + if err := es.Gather(&acc); err != nil { + t.Fatal(err) + } + + checkIsMaster(es, false, t) + checkNodeStatsResult(t, &acc) +} + +func TestGatherNodeStats(t *testing.T) { + es := newElasticsearchWithClient() + es.Servers = []string{"http://example.com:9200"} + es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse) + + var acc testutil.Accumulator + if err := es.gatherNodeStats("junk", &acc); err != nil { + t.Fatal(err) + } + + checkIsMaster(es, false, t) + checkNodeStatsResult(t, &acc) +} + +func TestGatherClusterHealth(t *testing.T) { es := newElasticsearchWithClient() es.Servers = []string{"http://example.com:9200"} es.ClusterHealth = true - es.client.Transport = newTransportMock(http.StatusOK, clusterResponse) + es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse) var acc testutil.Accumulator - require.NoError(t, es.Gather(&acc)) + require.NoError(t, es.gatherClusterHealth("junk", &acc)) + + checkIsMaster(es, false, t) acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health", clusterHealthExpected, @@ -88,6 +117,77 @@ func TestGatherClusterStats(t *testing.T) { map[string]string{"index": "v2"}) } +func TestGatherClusterStatsMaster(t *testing.T) { + // This needs multiple steps to replicate the multiple calls internally. + es := newElasticsearchWithClient() + es.ClusterStats = true + es.Servers = []string{"http://example.com:9200"} + + // first get catMaster + es.client.Transport = newTransportMock(http.StatusOK, IsMasterResult) + require.NoError(t, es.setCatMaster("junk")) + + IsMasterResultTokens := strings.Split(string(IsMasterResult), " ") + if es.catMasterResponseTokens[0] != IsMasterResultTokens[0] { + msg := fmt.Sprintf("catmaster is incorrect") + assert.Fail(t, msg) + } + + // now get node status, which determines whether we're master + var acc testutil.Accumulator + es.Local = true + es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse) + if err := es.gatherNodeStats("junk", &acc); err != nil { + t.Fatal(err) + } + + checkIsMaster(es, true, t) + checkNodeStatsResult(t, &acc) + + // now test the clusterstats method + es.client.Transport = newTransportMock(http.StatusOK, clusterStatsResponse) + require.NoError(t, es.gatherClusterStats("junk", &acc)) + + tags := map[string]string{ + "cluster_name": "es-testcluster", + "node_name": "test.host.com", + "status": "red", + } + + acc.AssertContainsTaggedFields(t, "elasticsearch_clusterstats_nodes", clusterstatsNodesExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_clusterstats_indices", clusterstatsIndicesExpected, tags) +} + +func TestGatherClusterStatsNonMaster(t *testing.T) { + // This needs multiple steps to replicate the multiple calls internally. + es := newElasticsearchWithClient() + es.ClusterStats = true + es.Servers = []string{"http://example.com:9200"} + + // first get catMaster + es.client.Transport = newTransportMock(http.StatusOK, IsNotMasterResult) + require.NoError(t, es.setCatMaster("junk")) + + IsNotMasterResultTokens := strings.Split(string(IsNotMasterResult), " ") + if es.catMasterResponseTokens[0] != IsNotMasterResultTokens[0] { + msg := fmt.Sprintf("catmaster is incorrect") + assert.Fail(t, msg) + } + + // now get node status, which determines whether we're master + var acc testutil.Accumulator + es.Local = true + es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse) + if err := es.gatherNodeStats("junk", &acc); err != nil { + t.Fatal(err) + } + + // ensure flag is clear so Cluster Stats would not be done + checkIsMaster(es, false, t) + checkNodeStatsResult(t, &acc) + +} + func newElasticsearchWithClient() *Elasticsearch { es := NewElasticsearch() es.client = &http.Client{} diff --git a/plugins/inputs/elasticsearch/testdata_test.go b/plugins/inputs/elasticsearch/testdata_test.go index bca1f9e45..19ebb3bfb 100644 --- a/plugins/inputs/elasticsearch/testdata_test.go +++ b/plugins/inputs/elasticsearch/testdata_test.go @@ -1,6 +1,6 @@ package elasticsearch -const clusterResponse = ` +const clusterHealthResponse = ` { "cluster_name": "elasticsearch_telegraf", "status": "green", @@ -71,7 +71,7 @@ var v2IndexExpected = map[string]interface{}{ "unassigned_shards": 20, } -const statsResponse = ` +const nodeStatsResponse = ` { "cluster_name": "es-testcluster", "nodes": { @@ -489,7 +489,7 @@ const statsResponse = ` } ` -var indicesExpected = map[string]interface{}{ +var nodestatsIndicesExpected = map[string]interface{}{ "id_cache_memory_size_in_bytes": float64(0), "completion_size_in_bytes": float64(0), "suggest_total": float64(0), @@ -561,7 +561,7 @@ var indicesExpected = map[string]interface{}{ "segments_fixed_bit_set_memory_in_bytes": float64(0), } -var osExpected = map[string]interface{}{ +var nodestatsOsExpected = map[string]interface{}{ "load_average_0": float64(0.01), "load_average_1": float64(0.04), "load_average_2": float64(0.05), @@ -576,7 +576,7 @@ var osExpected = map[string]interface{}{ "mem_used_in_bytes": float64(1621868544), } -var processExpected = map[string]interface{}{ +var nodestatsProcessExpected = map[string]interface{}{ "mem_total_virtual_in_bytes": float64(4747890688), "timestamp": float64(1436460392945), "open_file_descriptors": float64(160), @@ -586,7 +586,7 @@ var processExpected = map[string]interface{}{ "cpu_user_in_millis": float64(13610), } -var jvmExpected = map[string]interface{}{ +var nodestatsJvmExpected = map[string]interface{}{ "timestamp": float64(1436460392945), "uptime_in_millis": float64(202245), "mem_non_heap_used_in_bytes": float64(39634576), @@ -621,7 +621,7 @@ var jvmExpected = map[string]interface{}{ "buffer_pools_mapped_total_capacity_in_bytes": float64(0), } -var threadPoolExpected = map[string]interface{}{ +var nodestatsThreadPoolExpected = map[string]interface{}{ "merge_threads": float64(6), "merge_queue": float64(4), "merge_active": float64(5), @@ -726,7 +726,7 @@ var threadPoolExpected = map[string]interface{}{ "flush_completed": float64(3), } -var fsExpected = map[string]interface{}{ +var nodestatsFsExpected = map[string]interface{}{ "data_0_total_in_bytes": float64(19507089408), "data_0_free_in_bytes": float64(16909316096), "data_0_available_in_bytes": float64(15894814720), @@ -736,7 +736,7 @@ var fsExpected = map[string]interface{}{ "total_total_in_bytes": float64(19507089408), } -var transportExpected = map[string]interface{}{ +var nodestatsTransportExpected = map[string]interface{}{ "server_open": float64(13), "rx_count": float64(6), "rx_size_in_bytes": float64(1380), @@ -744,12 +744,12 @@ var transportExpected = map[string]interface{}{ "tx_size_in_bytes": float64(1380), } -var httpExpected = map[string]interface{}{ +var nodestatsHttpExpected = map[string]interface{}{ "current_open": float64(3), "total_opened": float64(3), } -var breakersExpected = map[string]interface{}{ +var nodestatsBreakersExpected = map[string]interface{}{ "fielddata_estimated_size_in_bytes": float64(0), "fielddata_overhead": float64(1.03), "fielddata_tripped": float64(0), @@ -763,3 +763,273 @@ var breakersExpected = map[string]interface{}{ "parent_limit_size_in_bytes": float64(727213670), "parent_estimated_size_in_bytes": float64(0), } + +const clusterStatsResponse = ` +{ + "host":"ip-10-0-1-214", + "log_type":"metrics", + "timestamp":1475767451229, + "log_level":"INFO", + "node_name":"test.host.com", + "cluster_name":"es-testcluster", + "status":"red", + "indices":{ + "count":1, + "shards":{ + "total":4, + "primaries":4, + "replication":0.0, + "index":{ + "shards":{ + "min":4, + "max":4, + "avg":4.0 + }, + "primaries":{ + "min":4, + "max":4, + "avg":4.0 + }, + "replication":{ + "min":0.0, + "max":0.0, + "avg":0.0 + } + } + }, + "docs":{ + "count":4, + "deleted":0 + }, + "store":{ + "size_in_bytes":17084, + "throttle_time_in_millis":0 + }, + "fielddata":{ + "memory_size_in_bytes":0, + "evictions":0 + }, + "query_cache":{ + "memory_size_in_bytes":0, + "total_count":0, + "hit_count":0, + "miss_count":0, + "cache_size":0, + "cache_count":0, + "evictions":0 + }, + "completion":{ + "size_in_bytes":0 + }, + "segments":{ + "count":4, + "memory_in_bytes":11828, + "terms_memory_in_bytes":8932, + "stored_fields_memory_in_bytes":1248, + "term_vectors_memory_in_bytes":0, + "norms_memory_in_bytes":1280, + "doc_values_memory_in_bytes":368, + "index_writer_memory_in_bytes":0, + "index_writer_max_memory_in_bytes":2048000, + "version_map_memory_in_bytes":0, + "fixed_bit_set_memory_in_bytes":0 + }, + "percolate":{ + "total":0, + "time_in_millis":0, + "current":0, + "memory_size_in_bytes":-1, + "memory_size":"-1b", + "queries":0 + } + }, + "nodes":{ + "count":{ + "total":1, + "master_only":0, + "data_only":0, + "master_data":1, + "client":0 + }, + "versions":[ + { + "version": "2.3.3" + } + ], + "os":{ + "available_processors":1, + "allocated_processors":1, + "mem":{ + "total_in_bytes":593301504 + }, + "names":[ + { + "name":"Linux", + "count":1 + } + ] + }, + "process":{ + "cpu":{ + "percent":0 + }, + "open_file_descriptors":{ + "min":145, + "max":145, + "avg":145 + } + }, + "jvm":{ + "max_uptime_in_millis":11580527, + "versions":[ + { + "version":"1.8.0_101", + "vm_name":"OpenJDK 64-Bit Server VM", + "vm_version":"25.101-b13", + "vm_vendor":"Oracle Corporation", + "count":1 + } + ], + "mem":{ + "heap_used_in_bytes":70550288, + "heap_max_in_bytes":1065025536 + }, + "threads":30 + }, + "fs":{ + "total_in_bytes":8318783488, + "free_in_bytes":6447439872, + "available_in_bytes":6344785920 + }, + "plugins":[ + { + "name":"cloud-aws", + "version":"2.3.3", + "description":"The Amazon Web Service (AWS) Cloud plugin allows to use AWS API for the unicast discovery mechanism and add S3 repositories.", + "jvm":true, + "classname":"org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin", + "isolated":true, + "site":false + }, + { + "name":"kopf", + "version":"2.0.1", + "description":"kopf - simple web administration tool for Elasticsearch", + "url":"/_plugin/kopf/", + "jvm":false, + "site":true + }, + { + "name":"tr-metrics", + "version":"7bd5b4b", + "description":"Logs cluster and node stats for performance monitoring.", + "jvm":true, + "classname":"com.trgr.elasticsearch.plugin.metrics.MetricsPlugin", + "isolated":true, + "site":false + } + ] + } +} +` + +var clusterstatsIndicesExpected = map[string]interface{}{ + "completion_size_in_bytes": float64(0), + "count": float64(1), + "docs_count": float64(4), + "docs_deleted": float64(0), + "fielddata_evictions": float64(0), + "fielddata_memory_size_in_bytes": float64(0), + "percolate_current": float64(0), + "percolate_memory_size_in_bytes": float64(-1), + "percolate_queries": float64(0), + "percolate_time_in_millis": float64(0), + "percolate_total": float64(0), + "percolate_memory_size": "-1b", + "query_cache_cache_count": float64(0), + "query_cache_cache_size": float64(0), + "query_cache_evictions": float64(0), + "query_cache_hit_count": float64(0), + "query_cache_memory_size_in_bytes": float64(0), + "query_cache_miss_count": float64(0), + "query_cache_total_count": float64(0), + "segments_count": float64(4), + "segments_doc_values_memory_in_bytes": float64(368), + "segments_fixed_bit_set_memory_in_bytes": float64(0), + "segments_index_writer_max_memory_in_bytes": float64(2.048e+06), + "segments_index_writer_memory_in_bytes": float64(0), + "segments_memory_in_bytes": float64(11828), + "segments_norms_memory_in_bytes": float64(1280), + "segments_stored_fields_memory_in_bytes": float64(1248), + "segments_term_vectors_memory_in_bytes": float64(0), + "segments_terms_memory_in_bytes": float64(8932), + "segments_version_map_memory_in_bytes": float64(0), + "shards_index_primaries_avg": float64(4), + "shards_index_primaries_max": float64(4), + "shards_index_primaries_min": float64(4), + "shards_index_replication_avg": float64(0), + "shards_index_replication_max": float64(0), + "shards_index_replication_min": float64(0), + "shards_index_shards_avg": float64(4), + "shards_index_shards_max": float64(4), + "shards_index_shards_min": float64(4), + "shards_primaries": float64(4), + "shards_replication": float64(0), + "shards_total": float64(4), + "store_size_in_bytes": float64(17084), + "store_throttle_time_in_millis": float64(0), +} + +var clusterstatsNodesExpected = map[string]interface{}{ + "count_client": float64(0), + "count_data_only": float64(0), + "count_master_data": float64(1), + "count_master_only": float64(0), + "count_total": float64(1), + "fs_available_in_bytes": float64(6.34478592e+09), + "fs_free_in_bytes": float64(6.447439872e+09), + "fs_total_in_bytes": float64(8.318783488e+09), + "jvm_max_uptime_in_millis": float64(1.1580527e+07), + "jvm_mem_heap_max_in_bytes": float64(1.065025536e+09), + "jvm_mem_heap_used_in_bytes": float64(7.0550288e+07), + "jvm_threads": float64(30), + "jvm_versions_0_count": float64(1), + "jvm_versions_0_version": "1.8.0_101", + "jvm_versions_0_vm_name": "OpenJDK 64-Bit Server VM", + "jvm_versions_0_vm_vendor": "Oracle Corporation", + "jvm_versions_0_vm_version": "25.101-b13", + "os_allocated_processors": float64(1), + "os_available_processors": float64(1), + "os_mem_total_in_bytes": float64(5.93301504e+08), + "os_names_0_count": float64(1), + "os_names_0_name": "Linux", + "process_cpu_percent": float64(0), + "process_open_file_descriptors_avg": float64(145), + "process_open_file_descriptors_max": float64(145), + "process_open_file_descriptors_min": float64(145), + "versions_0_version": "2.3.3", + "plugins_0_classname": "org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin", + "plugins_0_description": "The Amazon Web Service (AWS) Cloud plugin allows to use AWS API for the unicast discovery mechanism and add S3 repositories.", + "plugins_0_isolated": true, + "plugins_0_jvm": true, + "plugins_0_name": "cloud-aws", + "plugins_0_site": false, + "plugins_0_version": "2.3.3", + "plugins_1_description": "kopf - simple web administration tool for Elasticsearch", + "plugins_1_jvm": false, + "plugins_1_name": "kopf", + "plugins_1_site": true, + "plugins_1_url": "/_plugin/kopf/", + "plugins_1_version": "2.0.1", + "plugins_2_classname": "com.trgr.elasticsearch.plugin.metrics.MetricsPlugin", + "plugins_2_description": "Logs cluster and node stats for performance monitoring.", + "plugins_2_isolated": true, + "plugins_2_jvm": true, + "plugins_2_name": "tr-metrics", + "plugins_2_site": false, + "plugins_2_version": "7bd5b4b", +} + +const IsMasterResult = "SDFsfSDFsdfFSDSDfSFDSDF 10.206.124.66 10.206.124.66 test.host.com " + +const IsNotMasterResult = "junk 10.206.124.66 10.206.124.66 test.junk.com " diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index edd5afc54..a8743558d 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -103,10 +103,22 @@ type JSONFlattener struct { Fields map[string]interface{} } -// FlattenJSON flattens nested maps/interfaces into a fields map +// FlattenJSON flattens nested maps/interfaces into a fields map (ignoring bools and string) func (f *JSONFlattener) FlattenJSON( + fieldname string, + v interface{}) error { + if f.Fields == nil { + f.Fields = make(map[string]interface{}) + } + return f.FullFlattenJSON(fieldname, v, false, false) +} + +// FullFlattenJSON flattens nested maps/interfaces into a fields map (including bools and string) +func (f *JSONFlattener) FullFlattenJSON( fieldname string, v interface{}, + convertString bool, + convertBool bool, ) error { if f.Fields == nil { f.Fields = make(map[string]interface{}) @@ -115,7 +127,7 @@ func (f *JSONFlattener) FlattenJSON( switch t := v.(type) { case map[string]interface{}: for k, v := range t { - err := f.FlattenJSON(fieldname+"_"+k+"_", v) + err := f.FullFlattenJSON(fieldname+"_"+k+"_", v, convertString, convertBool) if err != nil { return err } @@ -123,15 +135,28 @@ func (f *JSONFlattener) FlattenJSON( case []interface{}: for i, v := range t { k := strconv.Itoa(i) - err := f.FlattenJSON(fieldname+"_"+k+"_", v) + err := f.FullFlattenJSON(fieldname+"_"+k+"_", v, convertString, convertBool) if err != nil { return nil } } case float64: f.Fields[fieldname] = t - case bool, string, nil: + case string: + if convertString { + f.Fields[fieldname] = v.(string) + } else { + return nil + } + case bool: + if convertBool { + f.Fields[fieldname] = v.(bool) + } else { + return nil + } + case nil: // ignored types + fmt.Println("json parser ignoring " + fieldname) return nil default: return fmt.Errorf("JSON Flattener: got unexpected type %T with value %v (%s)", From 0ae1e0611c5fc43cb859e2557a50e1b65b76b0df Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 20 Dec 2016 16:30:49 +0000 Subject: [PATCH 0041/1302] changelog update --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a444b19ca..b85e95195 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ in their config file. - [#2006](https://github.com/influxdata/telegraf/pull/2006): Added response_timeout property to prometheus input plugin. - [#1763](https://github.com/influxdata/telegraf/issues/1763): Pulling github.com/lxn/win's pdh wrapper into telegraf. - [#1898](https://github.com/influxdata/telegraf/issues/1898): Support negative statsd counters. +- [#1921](https://github.com/influxdata/telegraf/issues/1921): Elasticsearch cluster stats support. ### Bugfixes From 200237a51559de7f36453e33af736ef23e7af129 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 20 Dec 2016 17:48:06 +0000 Subject: [PATCH 0042/1302] Do not create a global statsd "previous instance" this basically reverts #887 at some point we might want to do some special handling of reloading plugins and keeping their state intact, but that will need to be done at a higher level, and in a way that is thread-safe for multiple input plugins of the same type. Unfortunately this is a rather large feature that will not have a quick fix available for it. fixes #1975 fixes #2102 --- CHANGELOG.md | 10 ++++++++++ plugins/inputs/statsd/statsd.go | 18 ++++-------------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b85e95195..ed25cf7f7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,15 @@ will change te default behavior for users who were not specifying these parameters in their config file. +- The StatsD plugin will also no longer save it's state on a service reload. +Essentially we have reverted PR [#887](https://github.com/influxdata/telegraf/pull/887). +The reason for this is that saving the state in a global variable is not +thread-safe (see [#1975](https://github.com/influxdata/telegraf/issues/1975) & [#2102](https://github.com/influxdata/telegraf/issues/2102)), +and this creates issues if users want to define multiple instances +of the statsd plugin. Saving state on reload may be considered in the future, +but this would need to be implemented at a higher level and applied to all +plugins, not just statsd. + ### Features - [#2123](https://github.com/influxdata/telegraf/pull/2123): Fix improper calculation of CPU percentages @@ -52,6 +61,7 @@ in their config file. - [#1449](https://github.com/influxdata/telegraf/issues/1449): MongoDB plugin always shows 0 replication lag. - [#1825](https://github.com/influxdata/telegraf/issues/1825): Consul plugin: add check_id as a tag in metrics to avoid overwrites. - [#1973](https://github.com/influxdata/telegraf/issues/1973): Partial fix: logparser CLF pattern with IPv6 addresses. +- [#1975](https://github.com/influxdata/telegraf/issues/1975) & [#2102](https://github.com/influxdata/telegraf/issues/2102): Fix thread-safety when using multiple instances of the statsd input plugin. ## v1.1.2 [2016-12-12] diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 1b0189a9e..d2c627b8a 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -32,8 +32,6 @@ var dropwarn = "E! Error: statsd message queue full. " + "We have dropped %d messages so far. " + "You may want to increase allowed_pending_messages in the config\n" -var prevInstance *Statsd - type Statsd struct { // Address & Port to serve from ServiceAddress string @@ -244,17 +242,10 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error { s.done = make(chan struct{}) s.in = make(chan []byte, s.AllowedPendingMessages) - if prevInstance == nil { - s.gauges = make(map[string]cachedgauge) - s.counters = make(map[string]cachedcounter) - s.sets = make(map[string]cachedset) - s.timings = make(map[string]cachedtimings) - } else { - s.gauges = prevInstance.gauges - s.counters = prevInstance.counters - s.sets = prevInstance.sets - s.timings = prevInstance.timings - } + s.gauges = make(map[string]cachedgauge) + s.counters = make(map[string]cachedcounter) + s.sets = make(map[string]cachedset) + s.timings = make(map[string]cachedtimings) if s.ConvertNames { log.Printf("I! WARNING statsd: convert_names config option is deprecated," + @@ -271,7 +262,6 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error { // Start the line parser go s.parser() log.Printf("I! Started the statsd service on %s\n", s.ServiceAddress) - prevInstance = s return nil } From 8a982ca68fd1a08451f9c1362e243f10ca642f25 Mon Sep 17 00:00:00 2001 From: Mark Wolfe Date: Wed, 21 Dec 2016 05:49:28 +1100 Subject: [PATCH 0043/1302] Moved to using the inbuilt serializer. (#1942) * Moved to using the inbuilt serializer. * Remove Atomic variable as it is not required. * Adjusted metric type in line with latest changes. --- plugins/outputs/kinesis/kinesis.go | 46 +++++++++++++------------ plugins/outputs/kinesis/kinesis_test.go | 39 --------------------- 2 files changed, 24 insertions(+), 61 deletions(-) delete mode 100644 plugins/outputs/kinesis/kinesis_test.go diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index a30ab8801..8cbdea682 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -1,10 +1,8 @@ package kinesis import ( - "fmt" "log" "os" - "sync/atomic" "time" "github.com/aws/aws-sdk-go/aws" @@ -13,6 +11,7 @@ import ( "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/internal/config/aws" "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers" ) type KinesisOutput struct { @@ -26,9 +25,10 @@ type KinesisOutput struct { StreamName string `toml:"streamname"` PartitionKey string `toml:"partitionkey"` - Format string `toml:"format"` Debug bool `toml:"debug"` svc *kinesis.Kinesis + + serializer serializers.Serializer } var sampleConfig = ` @@ -54,9 +54,13 @@ var sampleConfig = ` streamname = "StreamName" ## PartitionKey as used for sharding data. partitionkey = "PartitionKey" - ## format of the Data payload in the kinesis PutRecord, supported - ## String and Custom. - format = "string" + + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" + ## debug will show upstream aws messages. debug = false ` @@ -125,16 +129,8 @@ func (k *KinesisOutput) Close() error { return nil } -func FormatMetric(k *KinesisOutput, point telegraf.Metric) (string, error) { - if k.Format == "string" { - return point.String(), nil - } else { - m := fmt.Sprintf("%+v,%+v,%+v", - point.Name(), - point.Tags(), - point.String()) - return m, nil - } +func (k *KinesisOutput) SetSerializer(serializer serializers.Serializer) { + k.serializer = serializer } func writekinesis(k *KinesisOutput, r []*kinesis.PutRecordsRequestEntry) time.Duration { @@ -161,7 +157,7 @@ func writekinesis(k *KinesisOutput, r []*kinesis.PutRecordsRequestEntry) time.Du } func (k *KinesisOutput) Write(metrics []telegraf.Metric) error { - var sz uint32 = 0 + var sz uint32 if len(metrics) == 0 { return nil @@ -169,23 +165,29 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error { r := []*kinesis.PutRecordsRequestEntry{} - for _, p := range metrics { - atomic.AddUint32(&sz, 1) + for _, metric := range metrics { + sz++ + + values, err := k.serializer.Serialize(metric) + if err != nil { + return err + } - metric, _ := FormatMetric(k, p) d := kinesis.PutRecordsRequestEntry{ - Data: []byte(metric), + Data: values, PartitionKey: aws.String(k.PartitionKey), } + r = append(r, &d) if sz == 500 { // Max Messages Per PutRecordRequest is 500 elapsed := writekinesis(k, r) log.Printf("E! Wrote a %+v point batch to Kinesis in %+v.\n", sz, elapsed) - atomic.StoreUint32(&sz, 0) + sz = 0 r = nil } + } writekinesis(k, r) diff --git a/plugins/outputs/kinesis/kinesis_test.go b/plugins/outputs/kinesis/kinesis_test.go deleted file mode 100644 index de365fa99..000000000 --- a/plugins/outputs/kinesis/kinesis_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package kinesis - -import ( - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" - "testing" -) - -func TestFormatMetric(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - - k := &KinesisOutput{ - Format: "string", - } - - p := testutil.MockMetrics()[0] - - valid_string := "test1,tag1=value1 value=1 1257894000000000000\n" - func_string, err := FormatMetric(k, p) - - if func_string != valid_string { - t.Error("Expected ", valid_string) - } - require.NoError(t, err) - - k = &KinesisOutput{ - Format: "custom", - } - - valid_custom := "test1,map[tag1:value1],test1,tag1=value1 value=1 1257894000000000000\n" - func_custom, err := FormatMetric(k, p) - - if func_custom != valid_custom { - t.Error("Expected ", valid_custom) - } - require.NoError(t, err) -} From 7fc57812a711247c4abcb0d160bddfdc5a354ba8 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 20 Dec 2016 18:50:32 +0000 Subject: [PATCH 0044/1302] changelog update --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ed25cf7f7..07e589ce3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,7 @@ plugins, not just statsd. - [#1763](https://github.com/influxdata/telegraf/issues/1763): Pulling github.com/lxn/win's pdh wrapper into telegraf. - [#1898](https://github.com/influxdata/telegraf/issues/1898): Support negative statsd counters. - [#1921](https://github.com/influxdata/telegraf/issues/1921): Elasticsearch cluster stats support. +- [#1942](https://github.com/influxdata/telegraf/pull/1942): Change Amazon Kinesis output plugin to use the built-in serializer plugins. ### Bugfixes From a9f03a72f5aadf198f8c4ef8a99513b0801c4050 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 20 Dec 2016 19:32:04 +0000 Subject: [PATCH 0045/1302] Mask username/password from error messages closes #1980 --- CHANGELOG.md | 1 + plugins/inputs/elasticsearch/elasticsearch.go | 19 ++++++++++++++++--- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 07e589ce3..7ad357f66 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,7 @@ plugins, not just statsd. - [#1898](https://github.com/influxdata/telegraf/issues/1898): Support negative statsd counters. - [#1921](https://github.com/influxdata/telegraf/issues/1921): Elasticsearch cluster stats support. - [#1942](https://github.com/influxdata/telegraf/pull/1942): Change Amazon Kinesis output plugin to use the built-in serializer plugins. +- [#1980](https://github.com/influxdata/telegraf/issues/1980): Hide username/password from elasticsearch error log messages. ### Bugfixes diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 5d5d64909..370e3fbdd 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "net/http" + "regexp" "sync" "time" @@ -16,6 +17,9 @@ import ( "strings" ) +// mask for masking username/password from error messages +var mask = regexp.MustCompile(`https?:\/\/\S+:\S+@`) + // Nodestats are always generated, so simply define a constant for these endpoints const statsPath = "/_nodes/stats" const statsPathLocal = "/_nodes/_local/stats" @@ -149,7 +153,7 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { e.client = client } - errChan := errchan.New(len(e.Servers)) + errChan := errchan.New(len(e.Servers) * 3) var wg sync.WaitGroup wg.Add(len(e.Servers)) @@ -172,17 +176,26 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { // Always gather node states if err := e.gatherNodeStats(url, acc); err != nil { + err = fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")) errChan.C <- err return } if e.ClusterHealth { url = s + "/_cluster/health?level=indices" - e.gatherClusterHealth(url, acc) + if err := e.gatherClusterHealth(url, acc); err != nil { + err = fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")) + errChan.C <- err + return + } } if e.ClusterStats && e.isMaster { - e.gatherClusterStats(s+"/_cluster/stats", acc) + if err := e.gatherClusterStats(s+"/_cluster/stats", acc); err != nil { + err = fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")) + errChan.C <- err + return + } } }(serv, acc) } From d2787e8ef53b95c95603f9f1a339c0eb9e32df3e Mon Sep 17 00:00:00 2001 From: Mark Wolfe Date: Wed, 21 Dec 2016 09:56:02 +1100 Subject: [PATCH 0046/1302] Fix for loop over value array range issue. (#2187) --- plugins/inputs/system/ps.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go index a885e2d59..b0e021e40 100644 --- a/plugins/inputs/system/ps.go +++ b/plugins/inputs/system/ps.go @@ -73,7 +73,10 @@ func (s *systemPS) DiskUsage( var usage []*disk.UsageStat var partitions []*disk.PartitionStat - for _, p := range parts { + for i := range parts { + + p := parts[i] + if len(mountPointFilter) > 0 { // If the mount point is not a member of the filter set, // don't gather info on it. From bf5f2659a15e5bfae673ae66f25edff0d4c9a49c Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 20 Dec 2016 23:21:40 +0000 Subject: [PATCH 0047/1302] Do not try Uint parsing in redis plugin this is just a waste of cpu cycles, since telegraf converts all uints to int64 anyways. --- plugins/inputs/redis/redis.go | 16 ++---- plugins/inputs/redis/redis_test.go | 84 +++++++++++++++--------------- 2 files changed, 47 insertions(+), 53 deletions(-) diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index 98a6bc659..2dd947a2a 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -158,7 +158,7 @@ func gatherInfoOutput( tags map[string]string, ) error { var section string - var keyspace_hits, keyspace_misses uint64 = 0, 0 + var keyspace_hits, keyspace_misses int64 scanner := bufio.NewScanner(rdr) fields := make(map[string]interface{}) @@ -210,8 +210,8 @@ func gatherInfoOutput( val := strings.TrimSpace(parts[1]) - // Try parsing as a uint - if ival, err := strconv.ParseUint(val, 10, 64); err == nil { + // Try parsing as int + if ival, err := strconv.ParseInt(val, 10, 64); err == nil { switch name { case "keyspace_hits": keyspace_hits = ival @@ -219,18 +219,12 @@ func gatherInfoOutput( keyspace_misses = ival case "rdb_last_save_time": // influxdb can't calculate this, so we have to do it - fields["rdb_last_save_time_elapsed"] = uint64(time.Now().Unix()) - ival + fields["rdb_last_save_time_elapsed"] = time.Now().Unix() - ival } fields[metric] = ival continue } - // Try parsing as an int - if ival, err := strconv.ParseInt(val, 10, 64); err == nil { - fields[metric] = ival - continue - } - // Try parsing as a float if fval, err := strconv.ParseFloat(val, 64); err == nil { fields[metric] = fval @@ -275,7 +269,7 @@ func gatherKeyspaceLine( dbparts := strings.Split(line, ",") for _, dbp := range dbparts { kv := strings.Split(dbp, "=") - ival, err := strconv.ParseUint(kv[1], 10, 64) + ival, err := strconv.ParseInt(kv[1], 10, 64) if err == nil { fields[kv[0]] = ival } diff --git a/plugins/inputs/redis/redis_test.go b/plugins/inputs/redis/redis_test.go index cf62da0bd..0c3781500 100644 --- a/plugins/inputs/redis/redis_test.go +++ b/plugins/inputs/redis/redis_test.go @@ -39,53 +39,53 @@ func TestRedis_ParseMetrics(t *testing.T) { tags = map[string]string{"host": "redis.net", "replication_role": "master"} fields := map[string]interface{}{ - "uptime": uint64(238), - "lru_clock": uint64(2364819), - "clients": uint64(1), - "client_longest_output_list": uint64(0), - "client_biggest_input_buf": uint64(0), - "blocked_clients": uint64(0), - "used_memory": uint64(1003936), - "used_memory_rss": uint64(811008), - "used_memory_peak": uint64(1003936), - "used_memory_lua": uint64(33792), + "uptime": int64(238), + "lru_clock": int64(2364819), + "clients": int64(1), + "client_longest_output_list": int64(0), + "client_biggest_input_buf": int64(0), + "blocked_clients": int64(0), + "used_memory": int64(1003936), + "used_memory_rss": int64(811008), + "used_memory_peak": int64(1003936), + "used_memory_lua": int64(33792), "mem_fragmentation_ratio": float64(0.81), - "loading": uint64(0), - "rdb_changes_since_last_save": uint64(0), - "rdb_bgsave_in_progress": uint64(0), - "rdb_last_save_time": uint64(1428427941), + "loading": int64(0), + "rdb_changes_since_last_save": int64(0), + "rdb_bgsave_in_progress": int64(0), + "rdb_last_save_time": int64(1428427941), "rdb_last_bgsave_status": "ok", "rdb_last_bgsave_time_sec": int64(-1), "rdb_current_bgsave_time_sec": int64(-1), - "aof_enabled": uint64(0), - "aof_rewrite_in_progress": uint64(0), - "aof_rewrite_scheduled": uint64(0), + "aof_enabled": int64(0), + "aof_rewrite_in_progress": int64(0), + "aof_rewrite_scheduled": int64(0), "aof_last_rewrite_time_sec": int64(-1), "aof_current_rewrite_time_sec": int64(-1), "aof_last_bgrewrite_status": "ok", "aof_last_write_status": "ok", - "total_connections_received": uint64(2), - "total_commands_processed": uint64(1), - "instantaneous_ops_per_sec": uint64(0), + "total_connections_received": int64(2), + "total_commands_processed": int64(1), + "instantaneous_ops_per_sec": int64(0), "instantaneous_input_kbps": float64(876.16), "instantaneous_output_kbps": float64(3010.23), - "rejected_connections": uint64(0), - "sync_full": uint64(0), - "sync_partial_ok": uint64(0), - "sync_partial_err": uint64(0), - "expired_keys": uint64(0), - "evicted_keys": uint64(0), - "keyspace_hits": uint64(1), - "keyspace_misses": uint64(1), - "pubsub_channels": uint64(0), - "pubsub_patterns": uint64(0), - "latest_fork_usec": uint64(0), - "connected_slaves": uint64(0), - "master_repl_offset": uint64(0), - "repl_backlog_active": uint64(0), - "repl_backlog_size": uint64(1048576), - "repl_backlog_first_byte_offset": uint64(0), - "repl_backlog_histlen": uint64(0), + "rejected_connections": int64(0), + "sync_full": int64(0), + "sync_partial_ok": int64(0), + "sync_partial_err": int64(0), + "expired_keys": int64(0), + "evicted_keys": int64(0), + "keyspace_hits": int64(1), + "keyspace_misses": int64(1), + "pubsub_channels": int64(0), + "pubsub_patterns": int64(0), + "latest_fork_usec": int64(0), + "connected_slaves": int64(0), + "master_repl_offset": int64(0), + "repl_backlog_active": int64(0), + "repl_backlog_size": int64(1048576), + "repl_backlog_first_byte_offset": int64(0), + "repl_backlog_histlen": int64(0), "used_cpu_sys": float64(0.14), "used_cpu_user": float64(0.05), "used_cpu_sys_children": float64(0.00), @@ -102,15 +102,15 @@ func TestRedis_ParseMetrics(t *testing.T) { } } assert.InDelta(t, - uint64(time.Now().Unix())-fields["rdb_last_save_time"].(uint64), - fields["rdb_last_save_time_elapsed"].(uint64), + time.Now().Unix()-fields["rdb_last_save_time"].(int64), + fields["rdb_last_save_time_elapsed"].(int64), 2) // allow for 2 seconds worth of offset keyspaceTags := map[string]string{"host": "redis.net", "replication_role": "master", "database": "db0"} keyspaceFields := map[string]interface{}{ - "avg_ttl": uint64(0), - "expires": uint64(0), - "keys": uint64(2), + "avg_ttl": int64(0), + "expires": int64(0), + "keys": int64(2), } acc.AssertContainsTaggedFields(t, "redis", fields, tags) acc.AssertContainsTaggedFields(t, "redis_keyspace", keyspaceFields, keyspaceTags) From b762546fa7aa17be693f392cdf719fbebaccf663 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 21 Dec 2016 10:51:07 +0000 Subject: [PATCH 0048/1302] docker: check type when totalling blkio & net metrics closes #2027 --- CHANGELOG.md | 1 + plugins/inputs/docker/docker.go | 30 ++++++++++++++++++++++++++---- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ad357f66..dd0f848a2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -64,6 +64,7 @@ plugins, not just statsd. - [#1825](https://github.com/influxdata/telegraf/issues/1825): Consul plugin: add check_id as a tag in metrics to avoid overwrites. - [#1973](https://github.com/influxdata/telegraf/issues/1973): Partial fix: logparser CLF pattern with IPv6 addresses. - [#1975](https://github.com/influxdata/telegraf/issues/1975) & [#2102](https://github.com/influxdata/telegraf/issues/2102): Fix thread-safety when using multiple instances of the statsd input plugin. +- [#2027](https://github.com/influxdata/telegraf/issues/2027): docker input: interface conversion panic fix. ## v1.1.2 [2016-12-12] diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index 7fc48689f..82a3791b6 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -368,11 +368,22 @@ func gatherContainerStats( if field == "container_id" { continue } + + var uintV uint64 + switch v := value.(type) { + case uint64: + uintV = v + case int64: + uintV = uint64(v) + default: + continue + } + _, ok := totalNetworkStatMap[field] if ok { - totalNetworkStatMap[field] = totalNetworkStatMap[field].(uint64) + value.(uint64) + totalNetworkStatMap[field] = totalNetworkStatMap[field].(uint64) + uintV } else { - totalNetworkStatMap[field] = value + totalNetworkStatMap[field] = uintV } } } @@ -491,11 +502,22 @@ func gatherBlockIOMetrics( if field == "container_id" { continue } + + var uintV uint64 + switch v := value.(type) { + case uint64: + uintV = v + case int64: + uintV = uint64(v) + default: + continue + } + _, ok := totalStatMap[field] if ok { - totalStatMap[field] = totalStatMap[field].(uint64) + value.(uint64) + totalStatMap[field] = totalStatMap[field].(uint64) + uintV } else { - totalStatMap[field] = value + totalStatMap[field] = uintV } } } From 37bc9cf79514184f065519bbacf972d0b686aed7 Mon Sep 17 00:00:00 2001 From: Dominik Labuda Date: Wed, 21 Dec 2016 13:41:58 +0100 Subject: [PATCH 0049/1302] [plugins] jolokia input plugin: configurable http timeouts (#2098) --- CHANGELOG.md | 1 + plugins/inputs/jolokia/README.md | 11 +++++++++- plugins/inputs/jolokia/jolokia.go | 34 ++++++++++++++++++++++++++----- 3 files changed, 40 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dd0f848a2..dd044a941 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -43,6 +43,7 @@ plugins, not just statsd. - [#1921](https://github.com/influxdata/telegraf/issues/1921): Elasticsearch cluster stats support. - [#1942](https://github.com/influxdata/telegraf/pull/1942): Change Amazon Kinesis output plugin to use the built-in serializer plugins. - [#1980](https://github.com/influxdata/telegraf/issues/1980): Hide username/password from elasticsearch error log messages. +- [#2097](https://github.com/influxdata/telegraf/issues/2097): Configurable HTTP timeouts in Jolokia plugin ### Bugfixes diff --git a/plugins/inputs/jolokia/README.md b/plugins/inputs/jolokia/README.md index d25ab6f46..9d33c8a2b 100644 --- a/plugins/inputs/jolokia/README.md +++ b/plugins/inputs/jolokia/README.md @@ -18,7 +18,16 @@ # [inputs.jolokia.proxy] # host = "127.0.0.1" # port = "8080" - + + ## Optional http timeouts + ## + ## response_header_timeout, if non-zero, specifies the amount of time to wait + ## for a server's response headers after fully writing the request. + # response_header_timeout = "3s" + ## + ## client_timeout specifies a time limit for requests made by this client. + ## Includes connection time, any redirects, and reading the response body. + # client_timeout = "4s" ## List of servers exposing jolokia read service [[inputs.jolokia.servers]] diff --git a/plugins/inputs/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go index 812e5e66b..32e6a9f57 100644 --- a/plugins/inputs/jolokia/jolokia.go +++ b/plugins/inputs/jolokia/jolokia.go @@ -11,9 +11,14 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) +// Default http timeouts +var DefaultResponseHeaderTimeout = internal.Duration{Duration: 3 * time.Second} +var DefaultClientTimeout = internal.Duration{Duration: 4 * time.Second} + type Server struct { Name string Host string @@ -48,6 +53,9 @@ type Jolokia struct { Servers []Server Metrics []Metric Proxy Server + + ResponseHeaderTimeout internal.Duration `toml:"response_header_timeout"` + ClientTimeout internal.Duration `toml:"client_timeout"` } const sampleConfig = ` @@ -66,6 +74,15 @@ const sampleConfig = ` # host = "127.0.0.1" # port = "8080" + ## Optional http timeouts + ## + ## response_header_timeout, if non-zero, specifies the amount of time to wait + ## for a server's response headers after fully writing the request. + # response_header_timeout = "3s" + ## + ## client_timeout specifies a time limit for requests made by this client. + ## Includes connection time, any redirects, and reading the response body. + # client_timeout = "4s" ## List of servers exposing jolokia read service [[inputs.jolokia.servers]] @@ -232,6 +249,15 @@ func extractValues(measurement string, value interface{}, fields map[string]inte } func (j *Jolokia) Gather(acc telegraf.Accumulator) error { + + if j.jClient == nil { + tr := &http.Transport{ResponseHeaderTimeout: j.ResponseHeaderTimeout.Duration} + j.jClient = &JolokiaClientImpl{&http.Client{ + Transport: tr, + Timeout: j.ClientTimeout.Duration, + }} + } + servers := j.Servers metrics := j.Metrics tags := make(map[string]string) @@ -272,11 +298,9 @@ func (j *Jolokia) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("jolokia", func() telegraf.Input { - tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)} - client := &http.Client{ - Transport: tr, - Timeout: time.Duration(4 * time.Second), + return &Jolokia{ + ResponseHeaderTimeout: DefaultResponseHeaderTimeout, + ClientTimeout: DefaultClientTimeout, } - return &Jolokia{jClient: &JolokiaClientImpl{client: client}} }) } From fd1feff7b4cb48cbde3adc25c0a44a6d7bea947f Mon Sep 17 00:00:00 2001 From: YKlausz Date: Wed, 21 Dec 2016 18:23:54 +0100 Subject: [PATCH 0050/1302] Remove print call in cassandra plugin (#2192) --- plugins/inputs/cassandra/cassandra.go | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/inputs/cassandra/cassandra.go b/plugins/inputs/cassandra/cassandra.go index dc4bb2b72..710a2b661 100644 --- a/plugins/inputs/cassandra/cassandra.go +++ b/plugins/inputs/cassandra/cassandra.go @@ -289,7 +289,6 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error { requestUrl.User = url.UserPassword(serverTokens["user"], serverTokens["passwd"]) } - fmt.Printf("host %s url %s\n", serverTokens["host"], requestUrl) out, err := c.getAttr(requestUrl) if out["status"] != 200.0 { From 4b08d127e0c87235d560f6633ebd79f4a0457235 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 6 Jan 2017 13:11:24 +0100 Subject: [PATCH 0051/1302] mongodb: dont print unecessary & inaccurate auth failure closes #2209 --- plugins/inputs/mongodb/mongodb.go | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go index a4bdabd96..0bf822a4c 100644 --- a/plugins/inputs/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -130,7 +130,6 @@ func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error { sess, err := mgo.DialWithInfo(dialInfo) if err != nil { - fmt.Printf("error dialing over ssl, %s\n", err.Error()) return fmt.Errorf("Unable to connect to MongoDB, %s\n", err.Error()) } server.Session = sess From 7279018cfee7827fb67c4487c41fc3c35f255977 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 9 Jan 2017 12:28:13 +0000 Subject: [PATCH 0052/1302] readme fixup & test output fixup --- internal/models/running_input.go | 2 +- plugins/inputs/system/SYSTEM_README.md | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/internal/models/running_input.go b/internal/models/running_input.go index 4279a7f62..e9b757bbf 100644 --- a/internal/models/running_input.go +++ b/internal/models/running_input.go @@ -75,7 +75,7 @@ func (r *RunningInput) MakeMetric( ) if r.trace && m != nil { - fmt.Println("> " + m.String()) + fmt.Print("> " + m.String()) } r.MetricsGathered.Incr(1) diff --git a/plugins/inputs/system/SYSTEM_README.md b/plugins/inputs/system/SYSTEM_README.md index fc873c7e8..2fcde4e6b 100644 --- a/plugins/inputs/system/SYSTEM_README.md +++ b/plugins/inputs/system/SYSTEM_README.md @@ -19,6 +19,7 @@ to the unix `uptime` command. - load15 (float) - load5 (float) - n_users (integer) + - n_cpus (integer) - uptime (integer, seconds) - uptime_format (string) @@ -31,5 +32,7 @@ None ``` $ telegraf -config ~/ws/telegraf.conf -input-filter system -test * Plugin: system, Collection 1 -> system load1=2.05,load15=2.38,load5=2.03,n_users=4i,uptime=239043i,uptime_format="2 days, 18:24" 1457546165399253452 +* Plugin: inputs.system, Collection 1 +> system,host=tyrion load1=3.72,load5=2.4,load15=2.1,n_users=3i,n_cpus=4i 1483964144000000000 +> system,host=tyrion uptime=1249632i,uptime_format="14 days, 11:07" 1483964144000000000 ``` From 5f6766f6e18ec8f5d3e639fd70364d0ed5574aca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Vizcaino?= Date: Mon, 9 Jan 2017 13:51:15 +0100 Subject: [PATCH 0053/1302] ceph: sample config should reflect actual defaults (#2228) --- plugins/inputs/ceph/README.md | 2 +- plugins/inputs/ceph/ceph.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/ceph/README.md b/plugins/inputs/ceph/README.md index 49ae09e73..b3bba1e50 100644 --- a/plugins/inputs/ceph/README.md +++ b/plugins/inputs/ceph/README.md @@ -82,7 +82,7 @@ the cluster. The currently supported commands are: ## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config ## to be specified - gather_cluster_stats = true + gather_cluster_stats = false ``` ### Measurements & Fields: diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go index 9f0a6ac78..e43c3d7d3 100644 --- a/plugins/inputs/ceph/ceph.go +++ b/plugins/inputs/ceph/ceph.go @@ -68,7 +68,7 @@ var sampleConfig = ` gather_admin_socket_stats = true ## Whether to gather statistics via ceph commands - gather_cluster_stats = true + gather_cluster_stats = false ` func (c *Ceph) SampleConfig() string { From a658e6c509510b39d2fc924c64bf97863265dc94 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Mon, 9 Jan 2017 08:03:33 -0500 Subject: [PATCH 0054/1302] ensure proper context on snmp error messages (#2220) --- CHANGELOG.md | 1 + plugins/inputs/snmp/snmp.go | 22 +++++++++++----------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dd044a941..74bed1278 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -66,6 +66,7 @@ plugins, not just statsd. - [#1973](https://github.com/influxdata/telegraf/issues/1973): Partial fix: logparser CLF pattern with IPv6 addresses. - [#1975](https://github.com/influxdata/telegraf/issues/1975) & [#2102](https://github.com/influxdata/telegraf/issues/2102): Fix thread-safety when using multiple instances of the statsd input plugin. - [#2027](https://github.com/influxdata/telegraf/issues/2027): docker input: interface conversion panic fix. +- [#1814](https://github.com/influxdata/telegraf/issues/1814): snmp: ensure proper context is present on error messages ## v1.1.2 [2016-12-12] diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 5426bf28c..6f515e227 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -146,13 +146,13 @@ func (s *Snmp) init() error { for i := range s.Tables { if err := s.Tables[i].init(); err != nil { - return err + return Errorf(err, "initializing table %s", s.Tables[i].Name) } } for i := range s.Fields { if err := s.Fields[i].init(); err != nil { - return err + return Errorf(err, "initializing field %s", s.Fields[i].Name) } } @@ -192,7 +192,7 @@ func (t *Table) init() error { // initialize all the nested fields for i := range t.Fields { if err := t.Fields[i].init(); err != nil { - return err + return Errorf(err, "initializing field %s", t.Fields[i].Name) } } @@ -210,7 +210,7 @@ func (t *Table) initBuild() error { _, _, oidText, fields, err := snmpTable(t.Oid) if err != nil { - return Errorf(err, "initializing table %s", t.Oid) + return err } if t.Name == "" { t.Name = oidText @@ -252,7 +252,7 @@ func (f *Field) init() error { _, oidNum, oidText, conversion, err := snmpTranslate(f.Oid) if err != nil { - return Errorf(err, "translating %s", f.Oid) + return Errorf(err, "translating") } f.Oid = oidNum if f.Name == "" { @@ -358,7 +358,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { // Now is the real tables. for _, t := range s.Tables { if err := s.gatherTable(acc, gs, t, topTags, true); err != nil { - acc.AddError(Errorf(err, "agent %s", agent)) + acc.AddError(Errorf(err, "agent %s: gathering table %s", agent, t.Name)) } } } @@ -406,7 +406,7 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { } if len(f.Oid) == 0 { - return nil, fmt.Errorf("cannot have empty OID") + return nil, fmt.Errorf("cannot have empty OID on field %s", f.Name) } var oid string if f.Oid[0] == '.' { @@ -426,12 +426,12 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { // empty string. This results in all the non-table fields sharing the same // index, and being added on the same row. if pkt, err := gs.Get([]string{oid}); err != nil { - return nil, Errorf(err, "performing get") + return nil, Errorf(err, "performing get on field %s", f.Name) } else if pkt != nil && len(pkt.Variables) > 0 && pkt.Variables[0].Type != gosnmp.NoSuchObject && pkt.Variables[0].Type != gosnmp.NoSuchInstance { ent := pkt.Variables[0] fv, err := fieldConvert(f.Conversion, ent.Value) if err != nil { - return nil, Errorf(err, "converting %q", ent.Value) + return nil, Errorf(err, "converting %q (OID %s) for field %s", ent.Value, ent.Name, f.Name) } if fvs, ok := fv.(string); !ok || fvs != "" { ifv[""] = fv @@ -454,7 +454,7 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { fv, err := fieldConvert(f.Conversion, ent.Value) if err != nil { - return Errorf(err, "converting %q", ent.Value) + return Errorf(err, "converting %q (OID %s) for field %s", ent.Value, ent.Name, f.Name) } if fvs, ok := fv.(string); !ok || fvs != "" { ifv[idx] = fv @@ -463,7 +463,7 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { }) if err != nil { if _, ok := err.(NestedError); !ok { - return nil, Errorf(err, "performing bulk walk") + return nil, Errorf(err, "performing bulk walk for field %s", f.Name) } } } From 2aa2c796e5867c5652144c341d259e2b2c61746f Mon Sep 17 00:00:00 2001 From: Kurt Mackey Date: Mon, 9 Jan 2017 08:48:32 -0600 Subject: [PATCH 0055/1302] Fix for broken librato output (#2225) * Fix for broken librato output These errors are delightful, but I'd rather avoid them: ``` Error parsing /etc/telegraf/telegraf.conf, line 2: field corresponding to `api_user' is not defined in `*librato.Librato' ``` * Fixed bad format from last commit --- plugins/outputs/librato/librato.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go index 3c4cb6d2a..ed020d54f 100644 --- a/plugins/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -17,8 +17,8 @@ import ( // Librato structure for configuration and client type Librato struct { - APIUser string - APIToken string + APIUser string `toml:"api_user"` + APIToken string `toml:"api_token"` Debug bool SourceTag string // Deprecated, keeping for backward-compatibility Timeout internal.Duration From 81f95e7a298b0a938bb8522f2f8b4c0b34ea7cb2 Mon Sep 17 00:00:00 2001 From: Emil Haugbergsmyr Date: Wed, 11 Jan 2017 17:24:09 +0100 Subject: [PATCH 0056/1302] Fixes change in Kafka consumer input plugin (#2222) * Fixes change to the error api in the kafka project. * Updated test to reflect the change. * Update kafka to match master branch. --- Godeps | 2 +- plugins/inputs/kafka_consumer/kafka_consumer.go | 2 +- plugins/inputs/kafka_consumer/kafka_consumer_test.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Godeps b/Godeps index 6a0a17df1..885213c96 100644 --- a/Godeps +++ b/Godeps @@ -52,7 +52,7 @@ github.com/soniah/gosnmp 3fe3beb30fa9700988893c56a63b1df8e1b68c26 github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744 github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2 -github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866 +github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8 github.com/yuin/gopher-lua bf3808abd44b1e55143a2d7f08571aaa80db1808 github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363 diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 52117759d..f4176edd3 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -33,7 +33,7 @@ type Kafka struct { // channel for all incoming kafka messages in <-chan *sarama.ConsumerMessage // channel for all kafka consumer errors - errs <-chan *sarama.ConsumerError + errs <-chan error done chan struct{} // keep the accumulator internally: diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index 1777aa89e..c4936974f 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -27,7 +27,7 @@ func newTestKafka() (*Kafka, chan *sarama.ConsumerMessage) { Offset: "oldest", in: in, doNotCommitMsgs: true, - errs: make(chan *sarama.ConsumerError, 1000), + errs: make(chan error, 1000), done: make(chan struct{}), } return &k, in From 31a4f03031dcec9db7da6d8d0cddd09122bb11d5 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 11 Jan 2017 17:50:01 +0000 Subject: [PATCH 0057/1302] mongodb: Remove superfluous ReplSet log message closes #2248 --- plugins/inputs/mongodb/mongodb_server.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index e843c70f0..2bab8242b 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -40,15 +40,14 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error return err } result_repl := &ReplSetStatus{} - err = s.Session.DB("admin").Run(bson.D{ + // ignore error because it simply indicates that the db is not a member + // in a replica set, which is fine. + _ = s.Session.DB("admin").Run(bson.D{ { Name: "replSetGetStatus", Value: 1, }, }, result_repl) - if err != nil { - log.Println("E! Not gathering replica set status, member not in replica set (" + err.Error() + ")") - } jumbo_chunks, _ := s.Session.DB("config").C("chunks").Find(bson.M{"jumbo": true}).Count() From 947e1909ff96a700c5eed44d9f8c2f9a742b0379 Mon Sep 17 00:00:00 2001 From: Mohammad Ali Alfarra Date: Thu, 12 Jan 2017 16:47:01 +0800 Subject: [PATCH 0058/1302] Document basic auth for haproxy (#2258) * Document basic auth for haproxy * Typo in haproxy readme --- plugins/inputs/haproxy/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/inputs/haproxy/README.md b/plugins/inputs/haproxy/README.md index 52be25aa7..7f2bfa1dd 100644 --- a/plugins/inputs/haproxy/README.md +++ b/plugins/inputs/haproxy/README.md @@ -12,6 +12,8 @@ Server addresses need to explicitly start with 'http' if you wish to use HAproxy status page. Otherwise, address will be assumed to be an UNIX socket and protocol (if present) will be discarded. +For basic authentication you need to add username and password in the URL: `http://user:password@1.2.3.4/haproxy?stats`. + Following examples will all resolve to the same socket: ``` socket:/var/run/haproxy.sock From b7d29ca0e94cb9a89af16f313aefd5d19d5fc002 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Thu, 12 Jan 2017 06:08:22 -0500 Subject: [PATCH 0059/1302] allow changing jolokia delimiter (#2255) --- CHANGELOG.md | 1 + plugins/inputs/jolokia/jolokia.go | 27 +++++++++++++++++--------- plugins/inputs/jolokia/jolokia_test.go | 7 ++++--- 3 files changed, 23 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 74bed1278..8919669d1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,7 @@ plugins, not just statsd. - [#1942](https://github.com/influxdata/telegraf/pull/1942): Change Amazon Kinesis output plugin to use the built-in serializer plugins. - [#1980](https://github.com/influxdata/telegraf/issues/1980): Hide username/password from elasticsearch error log messages. - [#2097](https://github.com/influxdata/telegraf/issues/2097): Configurable HTTP timeouts in Jolokia plugin +- [#2255](https://github.com/influxdata/telegraf/pull/2255): Allow changing jolokia attribute delimiter ### Bugfixes diff --git a/plugins/inputs/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go index 32e6a9f57..7f371c935 100644 --- a/plugins/inputs/jolokia/jolokia.go +++ b/plugins/inputs/jolokia/jolokia.go @@ -47,12 +47,13 @@ func (c JolokiaClientImpl) MakeRequest(req *http.Request) (*http.Response, error } type Jolokia struct { - jClient JolokiaClient - Context string - Mode string - Servers []Server - Metrics []Metric - Proxy Server + jClient JolokiaClient + Context string + Mode string + Servers []Server + Metrics []Metric + Proxy Server + Delimiter string ResponseHeaderTimeout internal.Duration `toml:"response_header_timeout"` ClientTimeout internal.Duration `toml:"client_timeout"` @@ -84,6 +85,13 @@ const sampleConfig = ` ## Includes connection time, any redirects, and reading the response body. # client_timeout = "4s" + ## Attribute delimiter + ## + ## When multiple attributes are returned for a single + ## [inputs.jolokia.metrics], the field name is a concatenation of the metric + ## name, and the attribute name, separated by the given delimiter. + # delimiter = "_" + ## List of servers exposing jolokia read service [[inputs.jolokia.servers]] name = "as-server-01" @@ -238,10 +246,10 @@ func (j *Jolokia) prepareRequest(server Server, metric Metric) (*http.Request, e return req, nil } -func extractValues(measurement string, value interface{}, fields map[string]interface{}) { +func (j *Jolokia) extractValues(measurement string, value interface{}, fields map[string]interface{}) { if mapValues, ok := value.(map[string]interface{}); ok { for k2, v2 := range mapValues { - extractValues(measurement+"_"+k2, v2, fields) + j.extractValues(measurement+j.Delimiter+k2, v2, fields) } } else { fields[measurement] = value @@ -282,7 +290,7 @@ func (j *Jolokia) Gather(acc telegraf.Accumulator) error { fmt.Printf("Error handling response: %s\n", err) } else { if values, ok := out["value"]; ok { - extractValues(measurement, values, fields) + j.extractValues(measurement, values, fields) } else { fmt.Printf("Missing key 'value' in output response\n") } @@ -301,6 +309,7 @@ func init() { return &Jolokia{ ResponseHeaderTimeout: DefaultResponseHeaderTimeout, ClientTimeout: DefaultClientTimeout, + Delimiter: "_", } }) } diff --git a/plugins/inputs/jolokia/jolokia_test.go b/plugins/inputs/jolokia/jolokia_test.go index ccde619b5..3c4fc2561 100644 --- a/plugins/inputs/jolokia/jolokia_test.go +++ b/plugins/inputs/jolokia/jolokia_test.go @@ -104,9 +104,10 @@ func (c jolokiaClientStub) MakeRequest(req *http.Request) (*http.Response, error // *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client func genJolokiaClientStub(response string, statusCode int, servers []Server, metrics []Metric) *Jolokia { return &Jolokia{ - jClient: jolokiaClientStub{responseBody: response, statusCode: statusCode}, - Servers: servers, - Metrics: metrics, + jClient: jolokiaClientStub{responseBody: response, statusCode: statusCode}, + Servers: servers, + Metrics: metrics, + Delimiter: "_", } } From 411853fc74306c2fae685b750ecfbdc9754b6425 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 12 Jan 2017 11:14:12 +0000 Subject: [PATCH 0060/1302] update etc/telegraf.conf --- etc/telegraf.conf | 130 +++++++++++++++++++++++++++++++++++++--------- 1 file changed, 106 insertions(+), 24 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index a6058434c..3d0cdfd3a 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -140,8 +140,6 @@ # # retention_policy = "default" # ## InfluxDB database # # database = "telegraf" -# ## InfluxDB precision -# # precision = "s" # # ## Optional SSL Config # # ssl_ca = "/etc/telegraf/ca.pem" @@ -190,6 +188,11 @@ # # timeout = "5s" +# # Send metrics to nowhere at all +# [[outputs.discard]] +# # no configuration + + # # Send telegraf metrics to file(s) # [[outputs.file]] # ## Files to write to, "stdout" is a specially handled file. @@ -219,7 +222,7 @@ # # Send telegraf metrics to graylog(s) # [[outputs.graylog]] -# ## Udp endpoint for your graylog instance. +# ## UDP endpoint for your graylog instance. # servers = ["127.0.0.1:12201", "192.168.1.1:12201"] @@ -312,9 +315,13 @@ # streamname = "StreamName" # ## PartitionKey as used for sharding data. # partitionkey = "PartitionKey" -# ## format of the Data payload in the kinesis PutRecord, supported -# ## String and Custom. -# format = "string" +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" +# # ## debug will show upstream aws messages. # debug = false @@ -351,6 +358,9 @@ # # username = "telegraf" # # password = "metricsmetricsmetricsmetrics" # +# ## client ID, if not set a random ID is generated +# # client_id = "" +# # ## Optional SSL Config # # ssl_ca = "/etc/telegraf/ca.pem" # # ssl_cert = "/etc/telegraf/cert.pem" @@ -428,6 +438,9 @@ # [[outputs.prometheus_client]] # ## Address to listen on # # listen = ":9126" +# +# ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration +# # expiration_interval = "60s" # # Configuration for the Riemann server to send metrics to @@ -538,6 +551,19 @@ # ## An array of Apache status URI to gather stats. # ## Default is "http://localhost/server-status?auto". # urls = ["http://localhost/server-status?auto"] +# ## user credentials for basic HTTP authentication +# username = "myuser" +# password = "mypassword" +# +# ## Timeout to the complete conection and reponse time in seconds +# response_timeout = "25s" ## default to 5 seconds +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false # # Read metrics of bcache from stats_total and dirty_data @@ -640,6 +666,13 @@ # #profile = "" # #shared_credential_file = "" # +# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all +# # metrics are made available to the 1 minute period. Some are collected at +# # 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. +# # Note that if a period is configured that is smaller than the minimum for a +# # particular metric, that metric will not be returned by the Cloudwatch API +# # and will not be collected by Telegraf. +# # # ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s) # period = "5m" # @@ -789,13 +822,13 @@ # ## of the cluster. # local = true # -# ## set cluster_health to true when you want to also obtain cluster health stats +# ## Set cluster_health to true when you want to also obtain cluster health stats # cluster_health = false # -# ## Set cluster_stats to true when you want to obtain cluster stats from the +# ## Set cluster_stats to true when you want to also obtain cluster stats from the # ## Master node. # cluster_stats = false - +# # ## Optional SSL Config # # ssl_ca = "/etc/telegraf/ca.pem" # # ssl_cert = "/etc/telegraf/cert.pem" @@ -980,6 +1013,12 @@ # timeout = "5s" +# # Collect statistics about itself +# [[inputs.internal]] +# ## If true, collect telegraf memory stats. +# # collect_memstats = true + + # # Read metrics from one or many bare metal servers # [[inputs.ipmi_sensor]] # ## specify servers via a url matching: @@ -993,8 +1032,9 @@ # # Read JMX metrics through Jolokia # [[inputs.jolokia]] # ## This is the context root used to compose the jolokia url +# ## NOTE that Jolokia requires a trailing slash at the end of the context root # ## NOTE that your jolokia security policy must allow for POST requests. -# context = "/jolokia" +# context = "/jolokia/" # # ## This specifies the mode used # # mode = "proxy" @@ -1006,6 +1046,15 @@ # # host = "127.0.0.1" # # port = "8080" # +# ## Optional http timeouts +# ## +# ## response_header_timeout, if non-zero, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request. +# # response_header_timeout = "3s" +# ## +# ## client_timeout specifies a time limit for requests made by this client. +# ## Includes connection time, any redirects, and reading the response body. +# # client_timeout = "4s" # # ## List of servers exposing jolokia read service # [[inputs.jolokia.servers]] @@ -1144,8 +1193,8 @@ # ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] # ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name # ## e.g. -# ## db_user:passwd@tcp(127.0.0.1:3306)/?tls=false -# ## db_user@tcp(127.0.0.1:3306)/?tls=false +# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"] +# ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"] # # # ## If no servers are specified, then localhost is used as the host. # servers = ["tcp(127.0.0.1:3306)/"] @@ -1206,18 +1255,24 @@ # # TCP or UDP 'ping' given url and collect response time in seconds # [[inputs.net_response]] # ## Protocol, must be "tcp" or "udp" +# ## NOTE: because the "udp" protocol does not respond to requests, it requires +# ## a send/expect string pair (see below). # protocol = "tcp" # ## Server address (default localhost) -# address = "github.com:80" +# address = "localhost:80" # ## Set timeout # timeout = "1s" # -# ## Optional string sent to the server -# # send = "ssh" -# ## Optional expected string in answer -# # expect = "ssh" # ## Set read timeout (only used if expecting a response) # read_timeout = "1s" +# +# ## The following options are required for UDP checks. For TCP, they are +# ## optional. The plugin will send the given string to the server and then +# ## expect to receive the given 'expect' string back. +# ## string sent to the server +# # send = "ssh" +# ## expected string in answer +# # expect = "ssh" # # Read TCP metrics such as established, time wait and sockets counts. @@ -1419,6 +1474,8 @@ # prefix = "" # ## comment this out if you want raw cpu_time stats # fielddrop = ["cpu_time_*"] +# ## This is optional; moves pid into a tag instead of a field +# pid_tag = false # # Read metrics from one or many prometheus clients @@ -1429,6 +1486,9 @@ # ## Use bearer token for authorization # # bearer_token = /path/to/bearer/token # +# ## Specify timeout duration for slower prometheus clients (default is 3s) +# # response_timeout = "3s" +# # ## Optional SSL Config # # ssl_ca = /path/to/cafile # # ssl_cert = /path/to/certfile @@ -1457,6 +1517,16 @@ # ## Use SSL but skip chain & host verification # # insecure_skip_verify = false # +# ## Optional request timeouts +# ## +# ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request. +# # header_timeout = "3s" +# ## +# ## client_timeout specifies a time limit for requests made by this client. +# ## Includes connection time, any redirects, and reading the response body. +# # client_timeout = "4s" +# # ## A list of nodes to pull metrics about. If not specified, metrics for # ## all nodes are gathered. # # nodes = ["rabbit@node1", "rabbit@node2"] @@ -1879,14 +1949,19 @@ # [[inputs.statsd]] # ## Address and port to host UDP listener on # service_address = ":8125" -# ## Delete gauges every interval (default=false) -# delete_gauges = false -# ## Delete counters every interval (default=false) -# delete_counters = false -# ## Delete sets every interval (default=false) -# delete_sets = false -# ## Delete timings & histograms every interval (default=true) +# +# ## The following configuration options control when telegraf clears it's cache +# ## of previous values. If set to false, then telegraf will only clear it's +# ## cache when the daemon is restarted. +# ## Reset gauges every interval (default=true) +# delete_gauges = true +# ## Reset counters every interval (default=true) +# delete_counters = true +# ## Reset sets every interval (default=true) +# delete_sets = true +# ## Reset timings & histograms every interval (default=true) # delete_timings = true +# # ## Percentiles to calculate for timing & histogram stats # percentiles = [90] # @@ -1927,6 +2002,8 @@ # files = ["/var/mymetrics.out"] # ## Read file from beginning. # from_beginning = false +# ## Whether file is a named pipe +# pipe = false # # ## Data format to consume. # ## Each data format has it's own unique set of configuration options, read @@ -1963,6 +2040,10 @@ # ## UDP listener will start dropping packets. # # allowed_pending_messages = 10000 # +# ## Set the buffer size of the UDP connection outside of OS default (in bytes) +# ## If set to 0, take OS default +# udp_buffer_size = 16777216 +# # ## Data format to consume. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: @@ -1986,3 +2067,4 @@ # # [inputs.webhooks.rollbar] # path = "/rollbar" + From e812a2efc6880084c177074d14d4f2a6f649d895 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 13 Jan 2017 11:43:50 +0000 Subject: [PATCH 0061/1302] Accept an HTTP request body without newline at end (#2266) I don't like this behavior, but it's what InfluxDB accepts, so the telegraf listener should be consistent with that. I accidentally reverted this behavior when I refactored the telegraf metric representation earlier in this release cycle. --- plugins/inputs/http_listener/http_listener.go | 3 +++ .../http_listener/http_listener_test.go | 24 +++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/plugins/inputs/http_listener/http_listener.go b/plugins/inputs/http_listener/http_listener.go index 0f426f809..05551a966 100644 --- a/plugins/inputs/http_listener/http_listener.go +++ b/plugins/inputs/http_listener/http_listener.go @@ -300,6 +300,9 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { } func (h *HTTPListener) parse(b []byte, t time.Time) error { + if !bytes.HasSuffix(b, []byte("\n")) { + b = append(b, '\n') + } metrics, err := h.parser.ParseWithDefaultTime(b, t) for _, m := range metrics { diff --git a/plugins/inputs/http_listener/http_listener_test.go b/plugins/inputs/http_listener/http_listener_test.go index 84cf209ff..b5f858fde 100644 --- a/plugins/inputs/http_listener/http_listener_test.go +++ b/plugins/inputs/http_listener/http_listener_test.go @@ -16,6 +16,8 @@ import ( const ( testMsg = "cpu_load_short,host=server01 value=12.0 1422568543702900257\n" + testMsgNoNewline = "cpu_load_short,host=server01 value=12.0 1422568543702900257" + testMsgs = `cpu_load_short,host=server02 value=12.0 1422568543702900257 cpu_load_short,host=server03 value=12.0 1422568543702900257 cpu_load_short,host=server04 value=12.0 1422568543702900257 @@ -81,6 +83,28 @@ func TestWriteHTTP(t *testing.T) { ) } +// http listener should add a newline at the end of the buffer if it's not there +func TestWriteHTTPNoNewline(t *testing.T) { + listener := newTestHTTPListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + time.Sleep(time.Millisecond * 25) + + // post single message to listener + resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(testMsgNoNewline))) + require.NoError(t, err) + require.EqualValues(t, 204, resp.StatusCode) + + time.Sleep(time.Millisecond * 15) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01"}, + ) +} + func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) { listener := &HTTPListener{ ServiceAddress: ":8296", From 95bad9e55b635862a10af7b15976bda99301f0a4 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 13 Jan 2017 11:35:36 +0000 Subject: [PATCH 0062/1302] OpenTSDB filter types for HTTP AND telnet --- plugins/outputs/opentsdb/opentsdb.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/plugins/outputs/opentsdb/opentsdb.go b/plugins/outputs/opentsdb/opentsdb.go index ce797e10f..84ff99058 100644 --- a/plugins/outputs/opentsdb/opentsdb.go +++ b/plugins/outputs/opentsdb/opentsdb.go @@ -157,6 +157,15 @@ func (o *OpenTSDB) WriteTelnet(metrics []telegraf.Metric, u *url.URL) error { tags := ToLineFormat(cleanTags(m.Tags())) for fieldName, value := range m.Fields() { + switch value.(type) { + case int64: + case uint64: + case float64: + default: + log.Printf("D! OpenTSDB does not support metric value: [%s] of type [%T].\n", value, value) + continue + } + metricValue, buildError := buildValue(value) if buildError != nil { log.Printf("E! OpenTSDB: %s\n", buildError.Error()) From 734988d732819761e84fc665a9aebf3effd4d1a9 Mon Sep 17 00:00:00 2001 From: Kebus1 Date: Fri, 13 Jan 2017 14:47:47 +0100 Subject: [PATCH 0063/1302] Fixed Bug 2077 SQL Server (#2212) --- plugins/inputs/sqlserver/sqlserver.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index e428d3098..5afbb067e 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -402,8 +402,8 @@ IF OBJECT_ID('tempdb..#baseline') IS NOT NULL DROP TABLE #baseline; SELECT DB_NAME(mf.database_id) AS database_name , - mf.size as database_size_8k_pages, - mf.max_size as database_max_size_8k_pages, + CAST(mf.size AS BIGINT) as database_size_8k_pages, + CAST(mf.max_size AS BIGINT) as database_max_size_8k_pages, size_on_disk_bytes , type_desc as datafile_type, GETDATE() AS baselineDate From b60b360f13cec961a18294350dae333112058ab1 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 13 Jan 2017 13:50:07 +0000 Subject: [PATCH 0064/1302] Changelog update --- CHANGELOG.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8919669d1..eb42e511d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,14 @@ -## v1.2 [unreleased] +## v1.3 [unreleased] + +### Release Notes + +### Features + +### Bugfixes + +- [#2077](https://github.com/influxdata/telegraf/issues/2077): SQL Server Input - Arithmetic overflow error converting numeric to data type int. + +## v1.2 [2017-01-00] ### Release Notes From b89c45b858728a8f7330ec65cd064575035402e7 Mon Sep 17 00:00:00 2001 From: Viet Hung Nguyen Date: Fri, 13 Jan 2017 21:19:57 +0700 Subject: [PATCH 0065/1302] Ignore devfs on OSX (#2232) --- plugins/inputs/system/disk.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/system/disk.go b/plugins/inputs/system/disk.go index f308a243b..548a9ce23 100644 --- a/plugins/inputs/system/disk.go +++ b/plugins/inputs/system/disk.go @@ -29,7 +29,7 @@ var diskSampleConfig = ` ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually ## present on /run, /var/run, /dev/shm or /dev). - ignore_fs = ["tmpfs", "devtmpfs"] + ignore_fs = ["tmpfs", "devtmpfs", "devfs"] ` func (_ *DiskStats) SampleConfig() string { From 0c9da0985a5af267fad2beb0483b8ab9b47ee059 Mon Sep 17 00:00:00 2001 From: acezellponce Date: Fri, 13 Jan 2017 06:25:25 -0800 Subject: [PATCH 0066/1302] Added userstats to mysql input plugin (#2137) * Added GatherUserStatistics, row Uptime in gatherGlobalStatuses, and version fields & tags * Updated README file * pulling in latest from master * ran go fmt to fix formatting * fix unreachable code * few fixes * cleaning up and applying suggestions from sparrc --- plugins/inputs/mysql/README.md | 30 ++++++ plugins/inputs/mysql/mysql.go | 177 ++++++++++++++++++++++++++++++++- 2 files changed, 206 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/mysql/README.md b/plugins/inputs/mysql/README.md index 34bb07bef..a55ddb8ff 100644 --- a/plugins/inputs/mysql/README.md +++ b/plugins/inputs/mysql/README.md @@ -7,6 +7,7 @@ This plugin gathers the statistic data from MySQL server * Slave statuses * Binlog size * Process list +* User Statistics * Info schema auto increment columns * Table I/O waits * Index I/O waits @@ -44,6 +45,9 @@ This plugin gathers the statistic data from MySQL server ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST gather_process_list = true # + ## gather thread state counts from INFORMATION_SCHEMA.USER_STATISTICS + gather_user_statistics = true + # ## gather auto_increment columns and max values from information schema gather_info_schema_auto_inc = true # @@ -89,6 +93,30 @@ Requires to be turned on in configuration. * binary_files_count(int, number) * Process list - connection metrics from processlist for each user. It has the following tags * connections(int, number) +* User Statistics - connection metrics from user statistics for each user. It has the following fields + * access_denied + * binlog_bytes_written + * busy_time + * bytes_received + * bytes_sent + * commit_transactions + * concurrent_connections + * connected_time + * cpu_time + * denied_connections + * empty_queries + * hostlost_connections + * other_commands + * rollback_transactions + * rows_fetched + * rows_updated + * select_commands + * server + * table_rows_read + * total_connections + * total_ssl_connections + * update_commands + * user * Perf Table IO waits - total count and time of I/O waits event for each table and process. It has following fields: * table_io_waits_total_fetch(float, number) @@ -158,6 +186,8 @@ The unit of fields varies by the tags. * server (the host name from which the metrics are gathered) * Process list measurement has following tags * user (username for whom the metrics are gathered) +* User Statistics measurement has following tags + * user (username for whom the metrics are gathered) * Perf table IO waits measurement has following tags * schema * name (object name for event or process) diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 54f296586..c0a31aeed 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -23,6 +23,7 @@ type Mysql struct { PerfEventsStatementsTimeLimit int64 `toml:"perf_events_statemetns_time_limit"` TableSchemaDatabases []string `toml:"table_schema_databases"` GatherProcessList bool `toml:"gather_process_list"` + GatherUserStatistics bool `toml:"gather_user_statistics"` GatherInfoSchemaAutoInc bool `toml:"gather_info_schema_auto_inc"` GatherSlaveStatus bool `toml:"gather_slave_status"` GatherBinaryLogs bool `toml:"gather_binary_logs"` @@ -60,6 +61,9 @@ var sampleConfig = ` ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST gather_process_list = true # + ## gather thread state counts from INFORMATION_SCHEMA.USER_STATISTICS + gather_user_statistics = true + # ## gather auto_increment columns and max values from information schema gather_info_schema_auto_inc = true # @@ -415,6 +419,10 @@ const ( WHERE ID != connection_id() GROUP BY command,state ORDER BY null` + infoSchemaUserStatisticsQuery = ` + SELECT *,count(*) + FROM information_schema.user_statistics + GROUP BY user` infoSchemaAutoIncQuery = ` SELECT table_schema, table_name, column_name, auto_increment, CAST(pow(2, case data_type @@ -530,7 +538,6 @@ const ( table_name FROM information_schema.tables WHERE table_schema = 'performance_schema' AND table_name = ? - ` ) @@ -582,6 +589,13 @@ func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error { } } + if m.GatherUserStatistics { + err = m.GatherUserStatisticsStatuses(db, serv, acc) + if err != nil { + return err + } + } + if m.GatherSlaveStatus { err = m.gatherSlaveStatuses(db, serv, acc) if err != nil { @@ -669,6 +683,11 @@ func (m *Mysql) gatherGlobalVariables(db *sql.DB, serv string, acc telegraf.Accu return err } key = strings.ToLower(key) + // parse mysql version and put into field and tag + if strings.Contains(key, "version") { + fields[key] = string(val) + tags[key] = string(val) + } // parse value, if it is numeric then save, otherwise ignore if floatVal, ok := parseValue(val); ok { fields[key] = floatVal @@ -854,6 +873,12 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, serv string, acc telegraf.Accum return err } fields["syncs"] = i + case "Uptime": + i, err := strconv.ParseInt(string(val.([]byte)), 10, 64) + if err != nil { + return err + } + fields["uptime"] = i } } // Send any remaining fields @@ -884,6 +909,74 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, serv string, acc telegraf.Accum } } + // gather connection metrics from user_statistics for each user + if m.GatherUserStatistics { + conn_rows, err := db.Query("select user, total_connections, concurrent_connections, connected_time, busy_time, cpu_time, bytes_received, bytes_sent, binlog_bytes_written, rows_fetched, rows_updated, table_rows_read, select_commands, update_commands, other_commands, commit_transactions, rollback_transactions, denied_connections, lost_connections, access_denied, empty_queries, total_ssl_connections FROM INFORMATION_SCHEMA.USER_STATISTICS GROUP BY user") + + for conn_rows.Next() { + var user string + var total_connections int64 + var concurrent_connections int64 + var connected_time int64 + var busy_time int64 + var cpu_time int64 + var bytes_received int64 + var bytes_sent int64 + var binlog_bytes_written int64 + var rows_fetched int64 + var rows_updated int64 + var table_rows_read int64 + var select_commands int64 + var update_commands int64 + var other_commands int64 + var commit_transactions int64 + var rollback_transactions int64 + var denied_connections int64 + var lost_connections int64 + var access_denied int64 + var empty_queries int64 + var total_ssl_connections int64 + + err = conn_rows.Scan(&user, &total_connections, &concurrent_connections, + &connected_time, &busy_time, &cpu_time, &bytes_received, &bytes_sent, &binlog_bytes_written, + &rows_fetched, &rows_updated, &table_rows_read, &select_commands, &update_commands, &other_commands, + &commit_transactions, &rollback_transactions, &denied_connections, &lost_connections, &access_denied, + &empty_queries, &total_ssl_connections, + ) + + if err != nil { + return err + } + + tags := map[string]string{"server": servtag, "user": user} + fields := map[string]interface{}{ + "total_connections": total_connections, + "concurrent_connections": concurrent_connections, + "connected_time": connected_time, + "busy_time": busy_time, + "cpu_time": cpu_time, + "bytes_received": bytes_received, + "bytes_sent": bytes_sent, + "binlog_bytes_written": binlog_bytes_written, + "rows_fetched": rows_fetched, + "rows_updated": rows_updated, + "table_rows_read": table_rows_read, + "select_commands": select_commands, + "update_commands": update_commands, + "other_commands": other_commands, + "commit_transactions": commit_transactions, + "rollback_transactions": rollback_transactions, + "denied_connections": denied_connections, + "lost_connections": lost_connections, + "access_denied": access_denied, + "empty_queries": empty_queries, + "total_ssl_connections": total_ssl_connections, + } + + acc.AddFields("mysql_user_stats", fields, tags) + } + } + return nil } @@ -932,6 +1025,88 @@ func (m *Mysql) GatherProcessListStatuses(db *sql.DB, serv string, acc telegraf. return nil } +// GatherUserStatistics can be used to collect metrics on each running command +// and its state with its running count +func (m *Mysql) GatherUserStatisticsStatuses(db *sql.DB, serv string, acc telegraf.Accumulator) error { + // run query + rows, err := db.Query(infoSchemaUserStatisticsQuery) + if err != nil { + return err + } + defer rows.Close() + var ( + user string + total_connections int64 + concurrent_connections int64 + connected_time int64 + busy_time int64 + cpu_time int64 + bytes_received int64 + bytes_sent int64 + binlog_bytes_written int64 + rows_fetched int64 + rows_updated int64 + table_rows_read int64 + select_commands int64 + update_commands int64 + other_commands int64 + commit_transactions int64 + rollback_transactions int64 + denied_connections int64 + lost_connections int64 + access_denied int64 + empty_queries int64 + total_ssl_connections int64 + count uint32 + ) + + var servtag string + servtag, err = parseDSN(serv) + if err != nil { + servtag = "localhost" + } + + for rows.Next() { + err = rows.Scan(&user, &total_connections, &concurrent_connections, + &connected_time, &busy_time, &cpu_time, &bytes_received, &bytes_sent, &binlog_bytes_written, + &rows_fetched, &rows_updated, &table_rows_read, &select_commands, &update_commands, &other_commands, + &commit_transactions, &rollback_transactions, &denied_connections, &lost_connections, &access_denied, + &empty_queries, &total_ssl_connections, &count, + ) + if err != nil { + return err + } + + tags := map[string]string{"server": servtag, "user": user} + fields := map[string]interface{}{ + + "total_connections": total_connections, + "concurrent_connections": concurrent_connections, + "connected_time": connected_time, + "busy_time": busy_time, + "cpu_time": cpu_time, + "bytes_received": bytes_received, + "bytes_sent": bytes_sent, + "binlog_bytes_written": binlog_bytes_written, + "rows_fetched": rows_fetched, + "rows_updated": rows_updated, + "table_rows_read": table_rows_read, + "select_commands": select_commands, + "update_commands": update_commands, + "other_commands": other_commands, + "commit_transactions": commit_transactions, + "rollback_transactions": rollback_transactions, + "denied_connections": denied_connections, + "lost_connections": lost_connections, + "access_denied": access_denied, + "empty_queries": empty_queries, + "total_ssl_connections": total_ssl_connections, + } + acc.AddFields("mysql_user_stats", fields, tags) + } + return nil +} + // gatherPerfTableIOWaits can be used to get total count and time // of I/O wait event for each table and process func (m *Mysql) gatherPerfTableIOWaits(db *sql.DB, serv string, acc telegraf.Accumulator) error { From 9262712f0ab6c8c44d13738efcf594c7d4290b8b Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 13 Jan 2017 14:27:20 +0000 Subject: [PATCH 0067/1302] Changelog update and go fmt --- CHANGELOG.md | 2 ++ plugins/inputs/mysql/mysql.go | 46 +++++++++++++++++------------------ 2 files changed, 25 insertions(+), 23 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index eb42e511d..990d417e5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,8 @@ ### Features +- [#2137](https://github.com/influxdata/telegraf/pull/2137): Added userstats to mysql input plugin. + ### Bugfixes - [#2077](https://github.com/influxdata/telegraf/issues/2077): SQL Server Input - Arithmetic overflow error converting numeric to data type int. diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index c0a31aeed..ca169f0d4 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -683,7 +683,7 @@ func (m *Mysql) gatherGlobalVariables(db *sql.DB, serv string, acc telegraf.Accu return err } key = strings.ToLower(key) - // parse mysql version and put into field and tag + // parse mysql version and put into field and tag if strings.Contains(key, "version") { fields[key] = string(val) tags[key] = string(val) @@ -950,28 +950,28 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, serv string, acc telegraf.Accum tags := map[string]string{"server": servtag, "user": user} fields := map[string]interface{}{ - "total_connections": total_connections, - "concurrent_connections": concurrent_connections, - "connected_time": connected_time, - "busy_time": busy_time, - "cpu_time": cpu_time, - "bytes_received": bytes_received, - "bytes_sent": bytes_sent, - "binlog_bytes_written": binlog_bytes_written, - "rows_fetched": rows_fetched, - "rows_updated": rows_updated, - "table_rows_read": table_rows_read, - "select_commands": select_commands, - "update_commands": update_commands, - "other_commands": other_commands, - "commit_transactions": commit_transactions, - "rollback_transactions": rollback_transactions, - "denied_connections": denied_connections, - "lost_connections": lost_connections, - "access_denied": access_denied, - "empty_queries": empty_queries, - "total_ssl_connections": total_ssl_connections, - } + "total_connections": total_connections, + "concurrent_connections": concurrent_connections, + "connected_time": connected_time, + "busy_time": busy_time, + "cpu_time": cpu_time, + "bytes_received": bytes_received, + "bytes_sent": bytes_sent, + "binlog_bytes_written": binlog_bytes_written, + "rows_fetched": rows_fetched, + "rows_updated": rows_updated, + "table_rows_read": table_rows_read, + "select_commands": select_commands, + "update_commands": update_commands, + "other_commands": other_commands, + "commit_transactions": commit_transactions, + "rollback_transactions": rollback_transactions, + "denied_connections": denied_connections, + "lost_connections": lost_connections, + "access_denied": access_denied, + "empty_queries": empty_queries, + "total_ssl_connections": total_ssl_connections, + } acc.AddFields("mysql_user_stats", fields, tags) } From 9b2f6499e7b0c41aa97ed5d2f137e14e644b243a Mon Sep 17 00:00:00 2001 From: Pierre Fersing Date: Fri, 13 Jan 2017 15:28:56 +0100 Subject: [PATCH 0068/1302] Added more InnoDB metric to MySQL plugin (#2179) --- plugins/inputs/mysql/README.md | 5 ++++ plugins/inputs/mysql/mysql.go | 55 ++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/plugins/inputs/mysql/README.md b/plugins/inputs/mysql/README.md index a55ddb8ff..f941207c9 100644 --- a/plugins/inputs/mysql/README.md +++ b/plugins/inputs/mysql/README.md @@ -9,6 +9,7 @@ This plugin gathers the statistic data from MySQL server * Process list * User Statistics * Info schema auto increment columns +* InnoDB metrics * Table I/O waits * Index I/O waits * Perf Schema table lock waits @@ -51,6 +52,9 @@ This plugin gathers the statistic data from MySQL server ## gather auto_increment columns and max values from information schema gather_info_schema_auto_inc = true # + ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS + gather_innodb_metrics = true + # ## gather metrics from SHOW SLAVE STATUS command output gather_slave_status = true # @@ -141,6 +145,7 @@ and process. It has following fields: for them. It has following fields: * auto_increment_column(int, number) * auto_increment_column_max(int, number) +* InnoDB metrics - all metrics of information_schema.INNODB_METRICS with a status "enabled" * Perf table lock waits - gathers total number and time for SQL and external lock waits events for each table and operation. It has following fields. The unit of fields varies by the tags. diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index ca169f0d4..956b3f9b0 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -25,6 +25,7 @@ type Mysql struct { GatherProcessList bool `toml:"gather_process_list"` GatherUserStatistics bool `toml:"gather_user_statistics"` GatherInfoSchemaAutoInc bool `toml:"gather_info_schema_auto_inc"` + GatherInnoDBMetrics bool `toml:"gather_innodb_metrics"` GatherSlaveStatus bool `toml:"gather_slave_status"` GatherBinaryLogs bool `toml:"gather_binary_logs"` GatherTableIOWaits bool `toml:"gather_table_io_waits"` @@ -67,6 +68,9 @@ var sampleConfig = ` ## gather auto_increment columns and max values from information schema gather_info_schema_auto_inc = true # + ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS + gather_innodb_metrics = true + # ## gather metrics from SHOW SLAVE STATUS command output gather_slave_status = true # @@ -435,6 +439,11 @@ const ( FROM information_schema.tables t JOIN information_schema.columns c USING (table_schema,table_name) WHERE c.extra = 'auto_increment' AND t.auto_increment IS NOT NULL + ` + innoDBMetricsQuery = ` + SELECT NAME, COUNT + FROM information_schema.INNODB_METRICS + WHERE status='enabled' ` perfTableIOWaitsQuery = ` SELECT OBJECT_SCHEMA, OBJECT_NAME, COUNT_FETCH, COUNT_INSERT, COUNT_UPDATE, COUNT_DELETE, @@ -610,6 +619,13 @@ func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error { } } + if m.GatherInnoDBMetrics { + err = m.gatherInnoDBMetrics(db, serv, acc) + if err != nil { + return err + } + } + if m.GatherTableIOWaits { err = m.gatherPerfTableIOWaits(db, serv, acc) if err != nil { @@ -1244,6 +1260,45 @@ func (m *Mysql) gatherInfoSchemaAutoIncStatuses(db *sql.DB, serv string, acc tel return nil } +// gatherInnoDBMetrics can be used to fetch enabled metrics from +// information_schema.INNODB_METRICS +func (m *Mysql) gatherInnoDBMetrics(db *sql.DB, serv string, acc telegraf.Accumulator) error { + // run query + rows, err := db.Query(innoDBMetricsQuery) + if err != nil { + return err + } + defer rows.Close() + + var key string + var val sql.RawBytes + + // parse DSN and save server tag + servtag := getDSNTag(serv) + tags := map[string]string{"server": servtag} + fields := make(map[string]interface{}) + for rows.Next() { + if err := rows.Scan(&key, &val); err != nil { + return err + } + key = strings.ToLower(key) + // parse value, if it is numeric then save, otherwise ignore + if floatVal, ok := parseValue(val); ok { + fields[key] = floatVal + } + // Send 20 fields at a time + if len(fields) >= 20 { + acc.AddFields("mysql_innodb", fields, tags) + fields = make(map[string]interface{}) + } + } + // Send any remaining fields + if len(fields) > 0 { + acc.AddFields("mysql_innodb", fields, tags) + } + return nil +} + // gatherPerfTableLockWaits can be used to get // the total number and time for SQL and external lock wait events // for each table and operation From e0c6262e0bf5372109b7e525ee640d15e13f0263 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 13 Jan 2017 14:34:07 +0000 Subject: [PATCH 0069/1302] mysql build fixup and changelog update --- CHANGELOG.md | 1 + plugins/inputs/mysql/mysql.go | 7 +------ 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 990d417e5..c0c96476a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ ### Features - [#2137](https://github.com/influxdata/telegraf/pull/2137): Added userstats to mysql input plugin. +- [#2179](https://github.com/influxdata/telegraf/pull/2179): Added more InnoDB metric to MySQL plugin. ### Bugfixes diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 956b3f9b0..adc21880b 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -1076,12 +1076,7 @@ func (m *Mysql) GatherUserStatisticsStatuses(db *sql.DB, serv string, acc telegr count uint32 ) - var servtag string - servtag, err = parseDSN(serv) - if err != nil { - servtag = "localhost" - } - + servtag := getDSNTag(serv) for rows.Next() { err = rows.Scan(&user, &total_connections, &concurrent_connections, &connected_time, &busy_time, &cpu_time, &bytes_received, &bytes_sent, &binlog_bytes_written, From 3de6bfbcb809c15c18adb2dc6b4f9a1ad3f68c54 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 13 Jan 2017 17:02:10 +0000 Subject: [PATCH 0070/1302] Direct people to downloads page for installation --- README.md | 60 ++++++---------------------------- plugins/outputs/amqp/README.md | 34 +++++++++++++++++++ 2 files changed, 44 insertions(+), 50 deletions(-) diff --git a/README.md b/README.md index b11412065..b758609d3 100644 --- a/README.md +++ b/README.md @@ -25,60 +25,20 @@ new plugins. ## Installation: -### Linux deb and rpm Packages: +You can either download the binaries directly from the +[downloads](https://www.influxdata.com/downloads) page. -Latest: -* https://dl.influxdata.com/telegraf/releases/telegraf_1.1.1_amd64.deb -* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1.x86_64.rpm - -Latest (arm): -* https://dl.influxdata.com/telegraf/releases/telegraf_1.1.1_armhf.deb -* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1.armhf.rpm - -##### Package Instructions: - -* Telegraf binary is installed in `/usr/bin/telegraf` -* Telegraf daemon configuration file is in `/etc/telegraf/telegraf.conf` -* On sysv systems, the telegraf daemon can be controlled via -`service telegraf [action]` -* On systemd systems (such as Ubuntu 15+), the telegraf daemon can be -controlled via `systemctl [action] telegraf` - -### yum/apt Repositories: - -There is a yum/apt repo available for the whole InfluxData stack, see -[here](https://docs.influxdata.com/influxdb/latest/introduction/installation/#installation) -for instructions on setting up the repo. Once it is configured, you will be able -to use this repo to install & update telegraf. - -### Linux tarballs: - -Latest: -* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_linux_amd64.tar.gz -* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_linux_i386.tar.gz -* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_linux_armhf.tar.gz +A few alternate installs are available here as well: ### FreeBSD tarball: Latest: -* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_freebsd_amd64.tar.gz +* https://dl.influxdata.com/telegraf/releases/telegraf-VERSION_freebsd_amd64.tar.gz ### Ansible Role: Ansible role: https://github.com/rossmcdonald/telegraf -### OSX via Homebrew: - -``` -brew update -brew install telegraf -``` - -### Windows Binaries (EXPERIMENTAL) - -Latest: -* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_windows_amd64.zip - ### From Source: Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm), @@ -99,31 +59,31 @@ See usage with: telegraf --help ``` -### Generate a telegraf config file: +#### Generate a telegraf config file: ``` telegraf config > telegraf.conf ``` -### Generate config with only cpu input & influxdb output plugins defined +#### Generate config with only cpu input & influxdb output plugins defined ``` telegraf --input-filter cpu --output-filter influxdb config ``` -### Run a single telegraf collection, outputing metrics to stdout +#### Run a single telegraf collection, outputing metrics to stdout ``` telegraf --config telegraf.conf -test ``` -### Run telegraf with all plugins defined in config file +#### Run telegraf with all plugins defined in config file ``` telegraf --config telegraf.conf ``` -### Run telegraf, enabling the cpu & memory input, and influxdb output plugins +#### Run telegraf, enabling the cpu & memory input, and influxdb output plugins ``` telegraf --config telegraf.conf -input-filter cpu:mem -output-filter influxdb @@ -242,7 +202,7 @@ Telegraf can also collect metrics via the following service plugins: * [influxdb](./plugins/outputs/influxdb) * [amon](./plugins/outputs/amon) -* [amqp](./plugins/outputs/amqp) +* [amqp](./plugins/outputs/amqp) (rabbitmq) * [aws kinesis](./plugins/outputs/kinesis) * [aws cloudwatch](./plugins/outputs/cloudwatch) * [datadog](./plugins/outputs/datadog) diff --git a/plugins/outputs/amqp/README.md b/plugins/outputs/amqp/README.md index 2fdedfbf1..d49c507b8 100644 --- a/plugins/outputs/amqp/README.md +++ b/plugins/outputs/amqp/README.md @@ -7,3 +7,37 @@ If RoutingTag is empty, then empty routing key will be used. Metrics are grouped in batches by RoutingTag. This plugin doesn't bind exchange to a queue, so it should be done by consumer. + +### Configuration: + +``` +# Configuration for the AMQP server to send metrics to +[[outputs.amqp]] + ## AMQP url + url = "amqp://localhost:5672/influxdb" + ## AMQP exchange + exchange = "telegraf" + ## Auth method. PLAIN and EXTERNAL are supported + # auth_method = "PLAIN" + ## Telegraf tag to use as a routing key + ## ie, if this tag exists, it's value will be used as the routing key + routing_tag = "host" + + ## InfluxDB retention policy + # retention_policy = "default" + ## InfluxDB database + # database = "telegraf" + + ## Optional SSL Config + # ssl_ca = "/etc/telegraf/ca.pem" + # ssl_cert = "/etc/telegraf/cert.pem" + # ssl_key = "/etc/telegraf/key.pem" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false + + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" +``` From 20bf90ee52d7be1d2f0864b05b0d75ed2da2f5c2 Mon Sep 17 00:00:00 2001 From: Claudius Zingerli Date: Sun, 22 Jan 2017 00:08:17 +0100 Subject: [PATCH 0071/1302] Add minimal documentation to the diskio plugin (#2296) * Add documentation to diskio plugin * Update spelling, fix iops_in_progress unit --- plugins/inputs/system/DISK_README.md | 54 ++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/plugins/inputs/system/DISK_README.md b/plugins/inputs/system/DISK_README.md index c510615ee..b79ca07b3 100644 --- a/plugins/inputs/system/DISK_README.md +++ b/plugins/inputs/system/DISK_README.md @@ -51,3 +51,57 @@ In this case, the host's root volume should be mounted into the container and th > disk,fstype=autofs,path=/net free=0i,inodes_free=0i,inodes_total=0i,inodes_used=0i,total=0i,used=0i,used_percent=0 1453832006274157077 > disk,fstype=autofs,path=/home free=0i,inodes_free=0i,inodes_total=0i,inodes_used=0i,total=0i,used=0i,used_percent=0 1453832006274169688 ``` + + +# DiskIO Input Plugin + +The diskio input plugin gathers metrics about disk traffic and timing. + +### Configuration: + +``` +# Read metrics about disk IO by device +[[inputs.diskio]] + ## By default, telegraf will gather stats for all devices including + ## disk partitions. + ## Setting devices will restrict the stats to the specified devices. + # devices = ["sda", "sdb"] + ## Uncomment the following line if you need disk serial numbers. + # skip_serial_number = false +``` + +Data collection is based on github.com/shirou/gopsutil. This package handles platform dependencies and converts all timing information to milliseconds. + + +### Measurements & Fields: + +- diskio + - reads (integer, counter) + - writes (integer, counter) + - read_bytes (integer, bytes) + - write_bytes (integer, bytes) + - read_time (integer, milliseconds) + - write_time (integer, milliseconds) + - io_time (integer, milliseconds) + - iops_in_progress (integer, counter) (since #2037, not yet in STABLE) + +### Tags: + +- All measurements have the following tags: + - name (device name) +- If configured to use serial numbers (default: disabled): + - serial (device serial number) + +### Example Output: + +``` +% telegraf -config ~/.telegraf/telegraf.conf -input-filter diskio -test +* Plugin: inputs.diskio, Collection 1 +> diskio,name=mmcblk1p2 io_time=244i,read_bytes=966656i,read_time=276i,reads=128i,write_bytes=0i,write_time=0i,writes=0i 1484916036000000000 +> diskio,name=mmcblk1boot1 io_time=264i,read_bytes=90112i,read_time=264i,reads=22i,write_bytes=0i,write_time=0i,writes=0i 1484916036000000000 +> diskio,name=mmcblk1boot0 io_time=212i,read_bytes=90112i,read_time=212i,reads=22i,write_bytes=0i,write_time=0i,writes=0i 1484916036000000000 +> diskio,name=mmcblk0 io_time=1855380i,read_bytes=135861248i,read_time=58484i,reads=4081i,write_bytes=364068864i,write_time=7128792i,writes=18019i 1484916036000000000 +> diskio,name=mmcblk0p1 io_time=1855256i,read_bytes=134915072i,read_time=58256i,reads=3958i,write_bytes=364068864i,write_time=7128792i,writes=18019i 1484916036000000000 +> diskio,name=mmcblk1 io_time=384i,read_bytes=2633728i,read_time=728i,reads=323i,write_bytes=0i,write_time=0i,writes=0i 1484916036000000000 +> diskio,name=mmcblk1p1 io_time=216i,read_bytes=860160i,read_time=288i,reads=106i,write_bytes=0i,write_time=0i,writes=0i 1484916036000000000 +``` From c15504c509752b688f853fce97eeff2e01e33cd5 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 23 Jan 2017 11:19:51 -0800 Subject: [PATCH 0072/1302] opentsdb: add tcp:// prefix if not present closes #2299 --- CHANGELOG.md | 1 + plugins/outputs/opentsdb/opentsdb.go | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c0c96476a..a3c26132a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -81,6 +81,7 @@ plugins, not just statsd. - [#1975](https://github.com/influxdata/telegraf/issues/1975) & [#2102](https://github.com/influxdata/telegraf/issues/2102): Fix thread-safety when using multiple instances of the statsd input plugin. - [#2027](https://github.com/influxdata/telegraf/issues/2027): docker input: interface conversion panic fix. - [#1814](https://github.com/influxdata/telegraf/issues/1814): snmp: ensure proper context is present on error messages +- [#2299](https://github.com/influxdata/telegraf/issues/2299): opentsdb: add tcp:// prefix if no scheme provided. ## v1.1.2 [2016-12-12] diff --git a/plugins/outputs/opentsdb/opentsdb.go b/plugins/outputs/opentsdb/opentsdb.go index 84ff99058..ac4d1224e 100644 --- a/plugins/outputs/opentsdb/opentsdb.go +++ b/plugins/outputs/opentsdb/opentsdb.go @@ -59,6 +59,9 @@ func ToLineFormat(tags map[string]string) string { } func (o *OpenTSDB) Connect() error { + if !strings.HasPrefix(o.Host, "http") && !strings.HasPrefix(o.Host, "tcp") { + o.Host = "tcp://" + o.Host + } // Test Connection to OpenTSDB Server u, err := url.Parse(o.Host) if err != nil { @@ -68,11 +71,11 @@ func (o *OpenTSDB) Connect() error { uri := fmt.Sprintf("%s:%d", u.Host, o.Port) tcpAddr, err := net.ResolveTCPAddr("tcp", uri) if err != nil { - return fmt.Errorf("OpenTSDB: TCP address cannot be resolved") + return fmt.Errorf("OpenTSDB TCP address cannot be resolved: %s", err) } connection, err := net.DialTCP("tcp", nil, tcpAddr) if err != nil { - return fmt.Errorf("OpenTSDB: Telnet connect fail") + return fmt.Errorf("OpenTSDB Telnet connect fail: %s", err) } defer connection.Close() return nil From 22340ad98462d2c9092ea93a510db920963d422b Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 23 Jan 2017 13:50:52 -0800 Subject: [PATCH 0073/1302] Add newline to influx line-protocol if not present closes #2297 --- CHANGELOG.md | 3 ++- plugins/inputs/http_listener/http_listener.go | 3 --- plugins/parsers/influx/parser.go | 3 +++ plugins/parsers/influx/parser_test.go | 21 +++++++++++++++---- 4 files changed, 22 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a3c26132a..c9c85953f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -80,8 +80,9 @@ plugins, not just statsd. - [#1973](https://github.com/influxdata/telegraf/issues/1973): Partial fix: logparser CLF pattern with IPv6 addresses. - [#1975](https://github.com/influxdata/telegraf/issues/1975) & [#2102](https://github.com/influxdata/telegraf/issues/2102): Fix thread-safety when using multiple instances of the statsd input plugin. - [#2027](https://github.com/influxdata/telegraf/issues/2027): docker input: interface conversion panic fix. -- [#1814](https://github.com/influxdata/telegraf/issues/1814): snmp: ensure proper context is present on error messages +- [#1814](https://github.com/influxdata/telegraf/issues/1814): snmp: ensure proper context is present on error messages. - [#2299](https://github.com/influxdata/telegraf/issues/2299): opentsdb: add tcp:// prefix if no scheme provided. +- [#2297](https://github.com/influxdata/telegraf/issues/2297): influx parser: parse line-protocol without newlines. ## v1.1.2 [2016-12-12] diff --git a/plugins/inputs/http_listener/http_listener.go b/plugins/inputs/http_listener/http_listener.go index 05551a966..0f426f809 100644 --- a/plugins/inputs/http_listener/http_listener.go +++ b/plugins/inputs/http_listener/http_listener.go @@ -300,9 +300,6 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { } func (h *HTTPListener) parse(b []byte, t time.Time) error { - if !bytes.HasSuffix(b, []byte("\n")) { - b = append(b, '\n') - } metrics, err := h.parser.ParseWithDefaultTime(b, t) for _, m := range metrics { diff --git a/plugins/parsers/influx/parser.go b/plugins/parsers/influx/parser.go index f04058552..c15c503f7 100644 --- a/plugins/parsers/influx/parser.go +++ b/plugins/parsers/influx/parser.go @@ -16,6 +16,9 @@ type InfluxParser struct { } func (p *InfluxParser) ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) { + if !bytes.HasSuffix(buf, []byte("\n")) { + buf = append(buf, '\n') + } // parse even if the buffer begins with a newline buf = bytes.TrimPrefix(buf, []byte("\n")) metrics, err := metric.ParseWithDefaultTime(buf, t) diff --git a/plugins/parsers/influx/parser_test.go b/plugins/parsers/influx/parser_test.go index 477cea36e..58531ff90 100644 --- a/plugins/parsers/influx/parser_test.go +++ b/plugins/parsers/influx/parser_test.go @@ -18,10 +18,11 @@ var ( ) const ( - validInflux = "cpu_load_short,cpu=cpu0 value=10 1257894000000000000\n" - validInfluxNewline = "\ncpu_load_short,cpu=cpu0 value=10 1257894000000000000\n" - invalidInflux = "I don't think this is line protocol\n" - invalidInflux2 = "{\"a\": 5, \"b\": {\"c\": 6}}\n" + validInflux = "cpu_load_short,cpu=cpu0 value=10 1257894000000000000\n" + validInfluxNewline = "\ncpu_load_short,cpu=cpu0 value=10 1257894000000000000\n" + validInfluxNoNewline = "cpu_load_short,cpu=cpu0 value=10 1257894000000000000" + invalidInflux = "I don't think this is line protocol\n" + invalidInflux2 = "{\"a\": 5, \"b\": {\"c\": 6}}\n" ) const influxMulti = ` @@ -69,6 +70,18 @@ func TestParseValidInflux(t *testing.T) { "cpu": "cpu0", }, metrics[0].Tags()) assert.Equal(t, exptime, metrics[0].Time().UnixNano()) + + metrics, err = parser.Parse([]byte(validInfluxNoNewline)) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + assert.Equal(t, "cpu_load_short", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "value": float64(10), + }, metrics[0].Fields()) + assert.Equal(t, map[string]string{ + "cpu": "cpu0", + }, metrics[0].Tags()) + assert.Equal(t, exptime, metrics[0].Time().UnixNano()) } func TestParseLineValidInflux(t *testing.T) { From 6df3f0fdae91881167c27589e12cb99d0867f65f Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 17 Jan 2017 15:01:12 -0800 Subject: [PATCH 0074/1302] Run scheduled flushes in background doing this unblocks incoming metrics while waiting for a flush to take place. we have to create a semaphore so that we can 'skip' flushes that try to run while a flush is already running. closes #2262 --- CHANGELOG.md | 1 + agent/agent.go | 15 +++++++++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c9c85953f..fd37abcb8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ ### Bugfixes - [#2077](https://github.com/influxdata/telegraf/issues/2077): SQL Server Input - Arithmetic overflow error converting numeric to data type int. +- [#2262](https://github.com/influxdata/telegraf/issues/2262): Flush jitter can inhibit metric collection. ## v1.2 [2017-01-00] diff --git a/agent/agent.go b/agent/agent.go index ab64154e0..a9e42643a 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -286,6 +286,7 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er }() ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration) + semaphore := make(chan struct{}, 1) for { select { case <-shutdown: @@ -295,8 +296,18 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er a.flush() return nil case <-ticker.C: - internal.RandomSleep(a.Config.Agent.FlushJitter.Duration, shutdown) - a.flush() + go func() { + select { + case semaphore <- struct{}{}: + internal.RandomSleep(a.Config.Agent.FlushJitter.Duration, shutdown) + a.flush() + <-semaphore + default: + // skipping this flush because one is already happening + log.Println("W! Skipping a scheduled flush because there is" + + " already a flush ongoing.") + } + }() case metric := <-metricC: // NOTE potential bottleneck here as we put each metric through the // processors serially. From 80411f99f06293f48352b8dcc6b804dc2ef859cb Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 23 Jan 2017 16:38:07 -0800 Subject: [PATCH 0075/1302] influxdb output: treat field type conflicts as a successful write If we write a batch of points and get a "field type conflict" error message in return, we should drop the entire batch of points because this indicates that one or more points have a type that doesnt match the database. These errors will never go away on their own, and InfluxDB will successfully write the points that dont have a conflict. closes #2245 --- CHANGELOG.md | 1 + plugins/outputs/influxdb/influxdb.go | 11 +++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fd37abcb8..d321e89b9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -84,6 +84,7 @@ plugins, not just statsd. - [#1814](https://github.com/influxdata/telegraf/issues/1814): snmp: ensure proper context is present on error messages. - [#2299](https://github.com/influxdata/telegraf/issues/2299): opentsdb: add tcp:// prefix if no scheme provided. - [#2297](https://github.com/influxdata/telegraf/issues/2297): influx parser: parse line-protocol without newlines. +- [#2245](https://github.com/influxdata/telegraf/issues/2245): influxdb output: fix field type conflict blocking output buffer. ## v1.1.2 [2016-12-12] diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 8c23b2c5a..999e1bc6f 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -200,8 +200,6 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { p := rand.Perm(len(i.conns)) for _, n := range p { if e := i.conns[n].Write(bp); e != nil { - // Log write failure - log.Printf("E! InfluxDB Output Error: %s", e) // If the database was not found, try to recreate it if strings.Contains(e.Error(), "database not found") { if errc := createDatabase(i.conns[n], i.Database); errc != nil { @@ -209,6 +207,15 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { i.Database) } } + if strings.Contains(e.Error(), "field type conflict") { + log.Printf("E! Field type conflict, dropping conflicted points: %s", e) + // setting err to nil, otherwise we will keep retrying and points + // w/ conflicting types will get stuck in the buffer forever. + err = nil + break + } + // Log write failure + log.Printf("E! InfluxDB Output Error: %s", e) } else { err = nil break From c882570983018c3d2aceeff1e3d2cd992c0d549f Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 23 Jan 2017 20:28:13 -0800 Subject: [PATCH 0076/1302] 32-bit binary for windows and freebsd closes #1346 closes #2218 --- scripts/build.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/build.py b/scripts/build.py index 0221cf3c4..57208bf7f 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -83,9 +83,9 @@ targets = { } supported_builds = { - "windows": [ "amd64" ], + "windows": [ "amd64", "i386" ], "linux": [ "amd64", "i386", "armhf", "armel", "arm64", "static_amd64" ], - "freebsd": [ "amd64" ] + "freebsd": [ "amd64", "i386" ] } supported_packages = { From b9ae3d6a57dbcf27aeaaeeea994e818a27f6a9be Mon Sep 17 00:00:00 2001 From: James Date: Tue, 24 Jan 2017 15:36:36 -0500 Subject: [PATCH 0077/1302] fix postgresql 'name', and 'oid' data types by switching to a driver (#1750) that handles them properly --- CHANGELOG.md | 3 + Godeps | 2 +- plugins/inputs/postgresql/README.md | 4 +- plugins/inputs/postgresql/connect.go | 99 +++++++++++++++++++ plugins/inputs/postgresql/postgresql.go | 14 +-- plugins/inputs/postgresql/postgresql_test.go | 31 +++++- .../postgresql_extensible.go | 23 +++-- .../postgresql_extensible_test.go | 25 +++++ testutil/accumulator.go | 38 ++++++- 9 files changed, 211 insertions(+), 28 deletions(-) create mode 100644 plugins/inputs/postgresql/connect.go diff --git a/CHANGELOG.md b/CHANGELOG.md index d321e89b9..68d43f2f9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -235,8 +235,11 @@ which can be installed via evaluated at every flush interval, rather than once at startup. This makes it consistent with the behavior of `collection_jitter`. +- postgresql plugins now handle oid and name typed columns seamlessly, previously they were ignored/skipped. + ### Features +- [#1617](https://github.com/influxdata/telegraf/pull/1617): postgresql_extensible now handles name and oid types correctly. - [#1413](https://github.com/influxdata/telegraf/issues/1413): Separate container_version from container_image tag. - [#1525](https://github.com/influxdata/telegraf/pull/1525): Support setting per-device and total metrics for Docker network and blockio. - [#1466](https://github.com/influxdata/telegraf/pull/1466): MongoDB input plugin: adding per DB stats from db.stats() diff --git a/Godeps b/Godeps index 885213c96..99606414e 100644 --- a/Godeps +++ b/Godeps @@ -33,7 +33,6 @@ github.com/kardianos/osext 29ae4ffbc9a6fe9fb2bc5029050ce6996ea1d3bc github.com/kardianos/service 5e335590050d6d00f3aa270217d288dda1c94d0a github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142 github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720 -github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36 github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453 github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504 @@ -63,3 +62,4 @@ gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715 gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886 gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4 +github.com/jackc/pgx bb73d8427902891bbad7b949b9c60b32949d935f diff --git a/plugins/inputs/postgresql/README.md b/plugins/inputs/postgresql/README.md index e5e9a8961..e309aa80f 100644 --- a/plugins/inputs/postgresql/README.md +++ b/plugins/inputs/postgresql/README.md @@ -4,8 +4,8 @@ This postgresql plugin provides metrics for your postgres database. It currently ``` pg version 9.2+ 9.1 8.3-9.0 8.1-8.2 7.4-8.0(unsupported) --- --- --- ------- ------- ------- -datid* x x x x -datname* x x x x +datid x x x x +datname x x x x numbackends x x x x x xact_commit x x x x x xact_rollback x x x x x diff --git a/plugins/inputs/postgresql/connect.go b/plugins/inputs/postgresql/connect.go new file mode 100644 index 000000000..77858cda2 --- /dev/null +++ b/plugins/inputs/postgresql/connect.go @@ -0,0 +1,99 @@ +package postgresql + +import ( + "database/sql" + "fmt" + "net" + "net/url" + "sort" + "strings" + + "github.com/jackc/pgx" + "github.com/jackc/pgx/stdlib" +) + +// pulled from lib/pq +// ParseURL no longer needs to be used by clients of this library since supplying a URL as a +// connection string to sql.Open() is now supported: +// +// sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full") +// +// It remains exported here for backwards-compatibility. +// +// ParseURL converts a url to a connection string for driver.Open. +// Example: +// +// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full" +// +// converts to: +// +// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full" +// +// A minimal example: +// +// "postgres://" +// +// This will be blank, causing driver.Open to use all of the defaults +func ParseURL(uri string) (string, error) { + u, err := url.Parse(uri) + if err != nil { + return "", err + } + + if u.Scheme != "postgres" && u.Scheme != "postgresql" { + return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme) + } + + var kvs []string + escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`) + accrue := func(k, v string) { + if v != "" { + kvs = append(kvs, k+"="+escaper.Replace(v)) + } + } + + if u.User != nil { + v := u.User.Username() + accrue("user", v) + + v, _ = u.User.Password() + accrue("password", v) + } + + if host, port, err := net.SplitHostPort(u.Host); err != nil { + accrue("host", u.Host) + } else { + accrue("host", host) + accrue("port", port) + } + + if u.Path != "" { + accrue("dbname", u.Path[1:]) + } + + q := u.Query() + for k := range q { + accrue(k, q.Get(k)) + } + + sort.Strings(kvs) // Makes testing easier (not a performance concern) + return strings.Join(kvs, " "), nil +} + +func Connect(address string) (*sql.DB, error) { + if strings.HasPrefix(address, "postgres://") || strings.HasPrefix(address, "postgresql://") { + return sql.Open("pgx", address) + } + + config, err := pgx.ParseDSN(address) + if err != nil { + return nil, err + } + + pool, err := pgx.NewConnPool(pgx.ConnPoolConfig{ConnConfig: config}) + if err != nil { + return nil, err + } + + return stdlib.OpenFromConnPool(pool) +} diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index 0e7cdb509..7019762ed 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -2,7 +2,6 @@ package postgresql import ( "bytes" - "database/sql" "fmt" "regexp" "sort" @@ -10,8 +9,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - - "github.com/lib/pq" ) type Postgresql struct { @@ -23,7 +20,7 @@ type Postgresql struct { sanitizedAddress string } -var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true} +var ignoredColumns = map[string]bool{"stats_reset": true} var sampleConfig = ` ## specify address via a url matching: @@ -71,7 +68,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { p.Address = localhost } - db, err := sql.Open("postgres", p.Address) + db, err := Connect(p.Address) if err != nil { return err } @@ -149,7 +146,7 @@ var passwordKVMatcher, _ = regexp.Compile("password=\\S+ ?") func (p *Postgresql) SanitizedAddress() (_ string, err error) { var canonicalizedAddress string if strings.HasPrefix(p.Address, "postgres://") || strings.HasPrefix(p.Address, "postgresql://") { - canonicalizedAddress, err = pq.ParseURL(p.Address) + canonicalizedAddress, err = ParseURL(p.Address) if err != nil { return p.sanitizedAddress, err } @@ -185,10 +182,7 @@ func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator) error { } if columnMap["datname"] != nil { // extract the database name from the column map - dbnameChars := (*columnMap["datname"]).([]uint8) - for i := 0; i < len(dbnameChars); i++ { - dbname.WriteString(string(dbnameChars[i])) - } + dbname.WriteString((*columnMap["datname"]).(string)) } else { dbname.WriteString("postgres") } diff --git a/plugins/inputs/postgresql/postgresql_test.go b/plugins/inputs/postgresql/postgresql_test.go index 64926f61e..a0690961d 100644 --- a/plugins/inputs/postgresql/postgresql_test.go +++ b/plugins/inputs/postgresql/postgresql_test.go @@ -28,6 +28,7 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { for _, col := range p.AllColumns { availableColumns[col] = true } + intMetrics := []string{ "xact_commit", "xact_rollback", @@ -42,7 +43,6 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { "temp_files", "temp_bytes", "deadlocks", - "numbackends", "buffers_alloc", "buffers_backend", "buffers_backend_fsync", @@ -53,9 +53,20 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { "maxwritten_clean", } + int32Metrics := []string{ + "numbackends", + } + floatMetrics := []string{ "blk_read_time", "blk_write_time", + "checkpoint_write_time", + "checkpoint_sync_time", + } + + stringMetrics := []string{ + "datname", + "datid", } metricsCounted := 0 @@ -68,6 +79,14 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { } } + for _, metric := range int32Metrics { + _, ok := availableColumns[metric] + if ok { + assert.True(t, acc.HasInt32Field("postgresql", metric)) + metricsCounted++ + } + } + for _, metric := range floatMetrics { _, ok := availableColumns[metric] if ok { @@ -76,8 +95,16 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { } } + for _, metric := range stringMetrics { + _, ok := availableColumns[metric] + if ok { + assert.True(t, acc.HasStringField("postgresql", metric)) + metricsCounted++ + } + } + assert.True(t, metricsCounted > 0) - //assert.Equal(t, len(availableColumns)-len(p.IgnoredColumns()), metricsCounted) + assert.Equal(t, len(availableColumns)-len(p.IgnoredColumns()), metricsCounted) } func TestPostgresqlTagsMetricsWithDatabaseName(t *testing.T) { diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index beb010fce..00729bf75 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -2,7 +2,6 @@ package postgresql_extensible import ( "bytes" - "database/sql" "fmt" "log" "regexp" @@ -10,8 +9,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - - "github.com/lib/pq" + "github.com/influxdata/telegraf/plugins/inputs/postgresql" ) type Postgresql struct { @@ -40,7 +38,7 @@ type query []struct { Measurement string } -var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true} +var ignoredColumns = map[string]bool{"stats_reset": true} var sampleConfig = ` ## specify address via a url matching: @@ -126,7 +124,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { p.Address = localhost } - db, err := sql.Open("postgres", p.Address) + db, err := postgresql.Connect(p.Address) if err != nil { return err } @@ -212,7 +210,7 @@ func (p *Postgresql) SanitizedAddress() (_ string, err error) { } var canonicalizedAddress string if strings.HasPrefix(p.Address, "postgres://") || strings.HasPrefix(p.Address, "postgresql://") { - canonicalizedAddress, err = pq.ParseURL(p.Address) + canonicalizedAddress, err = postgresql.ParseURL(p.Address) if err != nil { return p.sanitizedAddress, err } @@ -248,10 +246,7 @@ func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumula } if columnMap["datname"] != nil { // extract the database name from the column map - dbnameChars := (*columnMap["datname"]).([]uint8) - for i := 0; i < len(dbnameChars); i++ { - dbname.WriteString(string(dbnameChars[i])) - } + dbname.WriteString((*columnMap["datname"]).(string)) } else { dbname.WriteString("postgres") } @@ -275,19 +270,23 @@ COLUMN: if ignore || *val == nil { continue } + for _, tag := range p.AdditionalTags { if col != tag { continue } switch v := (*val).(type) { + case string: + tags[col] = v case []byte: tags[col] = string(v) - case int64: + case int64, int32, int: tags[col] = fmt.Sprintf("%d", v) + default: + log.Println("failed to add additional tag", col) } continue COLUMN } - if v, ok := (*val).([]byte); ok { fields[col] = string(v) } else { diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go index 7fd907102..f92284ee4 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go @@ -33,6 +33,7 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { for _, col := range p.AllColumns { availableColumns[col] = true } + intMetrics := []string{ "xact_commit", "xact_rollback", @@ -47,6 +48,9 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { "temp_files", "temp_bytes", "deadlocks", + } + + int32Metrics := []string{ "numbackends", } @@ -55,6 +59,11 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { "blk_write_time", } + stringMetrics := []string{ + "datname", + "datid", + } + metricsCounted := 0 for _, metric := range intMetrics { @@ -65,6 +74,14 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { } } + for _, metric := range int32Metrics { + _, ok := availableColumns[metric] + if ok { + assert.True(t, acc.HasInt32Field("postgresql", metric)) + metricsCounted++ + } + } + for _, metric := range floatMetrics { _, ok := availableColumns[metric] if ok { @@ -73,6 +90,14 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { } } + for _, metric := range stringMetrics { + _, ok := availableColumns[metric] + if ok { + assert.True(t, acc.HasStringField("postgresql", metric)) + metricsCounted++ + } + } + assert.True(t, metricsCounted > 0) assert.Equal(t, len(availableColumns)-len(p.IgnoredColumns()), metricsCounted) } diff --git a/testutil/accumulator.go b/testutil/accumulator.go index 2efee5572..4f131ec8f 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -221,7 +221,7 @@ func (a *Accumulator) AssertDoesNotContainMeasurement(t *testing.T, measurement } } -// HasIntValue returns true if the measurement has an Int value +// HasIntField returns true if the measurement has an Int value func (a *Accumulator) HasIntField(measurement string, field string) bool { a.Lock() defer a.Unlock() @@ -239,6 +239,42 @@ func (a *Accumulator) HasIntField(measurement string, field string) bool { return false } +// HasInt32Field returns true if the measurement has an Int value +func (a *Accumulator) HasInt32Field(measurement string, field string) bool { + a.Lock() + defer a.Unlock() + for _, p := range a.Metrics { + if p.Measurement == measurement { + for fieldname, value := range p.Fields { + if fieldname == field { + _, ok := value.(int32) + return ok + } + } + } + } + + return false +} + +// HasStringField returns true if the measurement has an String value +func (a *Accumulator) HasStringField(measurement string, field string) bool { + a.Lock() + defer a.Unlock() + for _, p := range a.Metrics { + if p.Measurement == measurement { + for fieldname, value := range p.Fields { + if fieldname == field { + _, ok := value.(string) + return ok + } + } + } + } + + return false +} + // HasUIntValue returns true if the measurement has a UInt value func (a *Accumulator) HasUIntField(measurement string, field string) bool { a.Lock() From be10b19760f9c9596d5d974741bf2e8570e0c5cc Mon Sep 17 00:00:00 2001 From: Pierre Fersing Date: Tue, 24 Jan 2017 21:38:10 +0100 Subject: [PATCH 0078/1302] Added more Windows metrics (#2290) Signed-off-by: Pierre Fersing --- etc/telegraf_windows.conf | 66 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 64 insertions(+), 2 deletions(-) diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index ca0357ef3..7380ab8a3 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -105,10 +105,11 @@ "% Privileged Time", "% User Time", "% Processor Time", + "% DPC Time", ] Measurement = "win_cpu" # Set to true to include _Total instance when querying for all (*). - #IncludeTotal=false + IncludeTotal=true [[inputs.win_perf_counters.object]] # Disk times and queues @@ -118,19 +119,51 @@ "% Idle Time", "% Disk Time","% Disk Read Time", "% Disk Write Time", - "% User Time", "Current Disk Queue Length", + "% Free Space", + "Free Megabytes", ] Measurement = "win_disk" # Set to true to include _Total instance when querying for all (*). #IncludeTotal=false + [[inputs.win_perf_counters.object]] + ObjectName = "PhysicalDisk" + Instances = ["*"] + Counters = [ + "Disk Read Bytes/sec", + "Disk Write Bytes/sec", + "Current Disk Queue Length", + "Disk Reads/sec", + "Disk Writes/sec", + "% Disk Time", + "% Disk Read Time", + "% Disk Write Time", + ] + Measurement = "win_diskio" + + [[inputs.win_perf_counters.object]] + ObjectName = "Network Interface" + Instances = ["*"] + Counters = [ + "Bytes Received/sec", + "Bytes Sent/sec", + "Packets Received/sec", + "Packets Sent/sec", + "Packets Received Discarded", + "Packets Outbound Discarded", + "Packets Received Errors", + "Packets Outbound Errors", + ] + Measurement = "win_net" + [[inputs.win_perf_counters.object]] ObjectName = "System" Counters = [ "Context Switches/sec", "System Calls/sec", "Processor Queue Length", + "System Up Time", ] Instances = ["------"] Measurement = "win_system" @@ -150,6 +183,10 @@ "Transition Faults/sec", "Pool Nonpaged Bytes", "Pool Paged Bytes", + "Standby Cache Reserve Bytes", + "Standby Cache Normal Priority Bytes", + "Standby Cache Core Bytes", + ] # Use 6 x - to remove the Instance bit from the query. Instances = ["------"] @@ -157,6 +194,31 @@ # Set to true to include _Total instance when querying for all (*). #IncludeTotal=false + [[inputs.win_perf_counters.object]] + # Example query where the Instance portion must be removed to get data back, + # such as from the Paging File object. + ObjectName = "Paging File" + Counters = [ + "% Usage", + ] + Instances = ["_Total"] + Measurement = "win_swap" + + [[inputs.win_perf_counters.object]] + ObjectName = "Network Interface" + Instances = ["*"] + Counters = [ + "Bytes Sent/sec", + "Bytes Received/sec", + "Packets Sent/sec", + "Packets Received/sec", + "Packets Received Discarded", + "Packets Received Errors", + "Packets Outbound Discarded", + "Packets Outbound Errors", + ] + + # Windows system plugins using WMI (disabled by default, using # win_perf_counters over WMI is recommended) From a505123e60df6d260395fe2c18787ff723b2b783 Mon Sep 17 00:00:00 2001 From: Pierre Fersing Date: Tue, 24 Jan 2017 21:46:06 +0100 Subject: [PATCH 0079/1302] Improve win_perf_counters on non English systems (#2261) --- CHANGELOG.md | 2 +- .../win_perf_counters/win_perf_counters.go | 60 ++++++------------- 2 files changed, 20 insertions(+), 42 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 68d43f2f9..58912b2fe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -68,7 +68,7 @@ plugins, not just statsd. - [#1775](https://github.com/influxdata/telegraf/issues/1775): Cache & expire metrics for delivery to prometheus. - [#2146](https://github.com/influxdata/telegraf/issues/2146): Fix potential panic in aggregator plugin metric maker. - [#1843](https://github.com/influxdata/telegraf/pull/1843) & [#1668](https://github.com/influxdata/telegraf/issues/1668): Add optional ability to define PID as a tag. -- [#1730](https://github.com/influxdata/telegraf/issues/1730): Fix win_perf_counters not gathering non-English counters. +- [#1730](https://github.com/influxdata/telegraf/issues/1730) & [#2261](https://github.com/influxdata/telegraf/pull/2261): Fix win_perf_counters not gathering non-English counters. - [#2061](https://github.com/influxdata/telegraf/issues/2061): Fix panic when file stat info cannot be collected due to permissions or other issue(s). - [#2045](https://github.com/influxdata/telegraf/issues/2045): Graylog output should set short_message field. - [#1904](https://github.com/influxdata/telegraf/issues/1904): Hddtemp always put the value in the field temperature. diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index 691f67a01..da59c3040 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -110,13 +110,23 @@ var sanitizedChars = strings.NewReplacer("/sec", "_persec", "/Sec", "_persec", " ", "_", "%", "Percent", `\`, "") func (m *Win_PerfCounters) AddItem(metrics *itemList, query string, objectName string, counter string, instance string, - measurement string, include_total bool) { + measurement string, include_total bool) error { var handle PDH_HQUERY var counterHandle PDH_HCOUNTER ret := PdhOpenQuery(0, 0, &handle) - ret = PdhAddCounter(handle, query, 0, &counterHandle) - _ = ret + if m.PreVistaSupport { + ret = PdhAddCounter(handle, query, 0, &counterHandle) + } else { + ret = PdhAddEnglishCounter(handle, query, 0, &counterHandle) + } + + // Call PdhCollectQueryData one time to check existance of the counter + ret = PdhCollectQueryData(handle) + if ret != ERROR_SUCCESS { + ret = PdhCloseQuery(handle) + return errors.New("Invalid query for Performance Counters") + } temp := &item{query, objectName, counter, instance, measurement, include_total, handle, counterHandle} @@ -127,39 +137,6 @@ func (m *Win_PerfCounters) AddItem(metrics *itemList, query string, objectName s metrics.items = make(map[int]*item) } metrics.items[index] = temp -} - -func (m *Win_PerfCounters) InvalidObject(exists uint32, query string, PerfObject perfobject, instance string, counter string) error { - if exists == 3221228472 { // PDH_CSTATUS_NO_OBJECT - if PerfObject.FailOnMissing { - err := errors.New("Performance object does not exist") - return err - } else { - fmt.Printf("Performance Object '%s' does not exist in query: %s\n", PerfObject.ObjectName, query) - } - } else if exists == 3221228473 { // PDH_CSTATUS_NO_COUNTER - - if PerfObject.FailOnMissing { - err := errors.New("Counter in Performance object does not exist") - return err - } else { - fmt.Printf("Counter '%s' does not exist in query: %s\n", counter, query) - } - } else if exists == 2147485649 { // PDH_CSTATUS_NO_INSTANCE - if PerfObject.FailOnMissing { - err := errors.New("Instance in Performance object does not exist") - return err - } else { - fmt.Printf("Instance '%s' does not exist in query: %s\n", instance, query) - - } - } else { - fmt.Printf("Invalid result: %v, query: %s\n", exists, query) - if PerfObject.FailOnMissing { - err := errors.New("Invalid query for Performance Counters") - return err - } - } return nil } @@ -188,17 +165,18 @@ func (m *Win_PerfCounters) ParseConfig(metrics *itemList) error { query = "\\" + objectname + "(" + instance + ")\\" + counter } - var exists uint32 = PdhValidatePath(query) + err := m.AddItem(metrics, query, objectname, counter, instance, + PerfObject.Measurement, PerfObject.IncludeTotal) - if exists == ERROR_SUCCESS { + if err == nil { if m.PrintValid { fmt.Printf("Valid: %s\n", query) } - m.AddItem(metrics, query, objectname, counter, instance, - PerfObject.Measurement, PerfObject.IncludeTotal) } else { if PerfObject.FailOnMissing || PerfObject.WarnOnMissing { - err := m.InvalidObject(exists, query, PerfObject, instance, counter) + fmt.Printf("Invalid query: %s\n", query) + } + if PerfObject.FailOnMissing { return err } } From d7a8bb22145a68c36a3e01a718393ec442481ece Mon Sep 17 00:00:00 2001 From: Will Pearson Date: Tue, 24 Jan 2017 20:50:29 +0000 Subject: [PATCH 0080/1302] Fix problem with graphite talking to closed connections (#2171) We were having problems with telegraf talking to carbon-relay-ng using the graphite output. When the carbon-relay-ng server restarted the connection the telegraf side would go into CLOSE_WAIT but telegraf would continue to send statistics through the connection. Reading around it seems you need to a read from the connection and see a EOF error. We've implemented this and added a test that replicates roughly the error we were having. Pair: @whpearson @joshmyers --- plugins/outputs/graphite/graphite.go | 29 ++++++++++++++- plugins/outputs/graphite/graphite_test.go | 43 +++++++++++++++++++---- 2 files changed, 65 insertions(+), 7 deletions(-) diff --git a/plugins/outputs/graphite/graphite.go b/plugins/outputs/graphite/graphite.go index 24f8e08d0..8d4447cdd 100644 --- a/plugins/outputs/graphite/graphite.go +++ b/plugins/outputs/graphite/graphite.go @@ -2,6 +2,7 @@ package graphite import ( "errors" + "io" "log" "math/rand" "net" @@ -71,6 +72,31 @@ func (g *Graphite) Description() string { return "Configuration for Graphite server to send metrics to" } +// We need check eof as we can write to nothing without noticing anything is wrong +// the connection stays in a close_wait +// We can detect that by finding an eof +// if not for this, we can happily write and flush without getting errors (in Go) but getting RST tcp packets back (!) +// props to Tv via the authors of carbon-relay-ng` for this trick. +func checkEOF(conn net.Conn) { + b := make([]byte, 1024) + conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) + num, err := conn.Read(b) + if err == io.EOF { + log.Printf("E! Conn %s is closed. closing conn explicitly", conn) + conn.Close() + return + } + // just in case i misunderstand something or the remote behaves badly + if num != 0 { + log.Printf("I! conn %s .conn.Read data? did not expect that. data: %s\n", conn, b[:num]) + } + // Log non-timeout errors or close. + if e, ok := err.(net.Error); !(ok && e.Timeout()) { + log.Printf("E! conn %s checkEOF .conn.Read returned err != EOF, which is unexpected. closing conn. error: %s\n", conn, err) + conn.Close() + } +} + // Choose a random server in the cluster to write to until a successful write // occurs, logging each unsuccessful. If all servers fail, return error. func (g *Graphite) Write(metrics []telegraf.Metric) error { @@ -91,13 +117,13 @@ func (g *Graphite) Write(metrics []telegraf.Metric) error { // This will get set to nil if a successful write occurs err = errors.New("Could not write to any Graphite server in cluster\n") - // Send data to a random server p := rand.Perm(len(g.conns)) for _, n := range p { if g.Timeout > 0 { g.conns[n].SetWriteDeadline(time.Now().Add(time.Duration(g.Timeout) * time.Second)) } + checkEOF(g.conns[n]) if _, e := g.conns[n].Write(batch); e != nil { // Error log.Println("E! Graphite Error: " + e.Error()) @@ -110,6 +136,7 @@ func (g *Graphite) Write(metrics []telegraf.Metric) error { } // try to reconnect if err != nil { + log.Println("E! Reconnecting: ") g.Connect() } return err diff --git a/plugins/outputs/graphite/graphite_test.go b/plugins/outputs/graphite/graphite_test.go index c4f132725..4f1f2fef6 100644 --- a/plugins/outputs/graphite/graphite_test.go +++ b/plugins/outputs/graphite/graphite_test.go @@ -43,7 +43,8 @@ func TestGraphiteOK(t *testing.T) { var wg sync.WaitGroup // Start TCP server wg.Add(1) - go TCPServer(t, &wg) + t.Log("Starting server") + go TCPServer1(t, &wg) // Give the fake graphite TCP server some time to start: time.Sleep(time.Millisecond * 100) @@ -51,6 +52,7 @@ func TestGraphiteOK(t *testing.T) { g := Graphite{ Prefix: "my.prefix", } + // Init metrics m1, _ := metric.New( "mymeasurement", @@ -72,29 +74,58 @@ func TestGraphiteOK(t *testing.T) { ) // Prepare point list - metrics := []telegraf.Metric{m1, m2, m3} + metrics := []telegraf.Metric{m1} + metrics2 := []telegraf.Metric{m2, m3} err1 := g.Connect() require.NoError(t, err1) // Send Data + t.Log("Send first data") err2 := g.Write(metrics) require.NoError(t, err2) // Waiting TCPserver wg.Wait() + t.Log("Finished Waiting for first data") + var wg2 sync.WaitGroup + // Start TCP server + time.Sleep(time.Millisecond * 100) + wg2.Add(1) + go TCPServer2(t, &wg2) + time.Sleep(time.Millisecond * 100) + //Write but expect an error, but reconnect + g.Write(metrics2) + err3 := g.Write(metrics2) + t.Log("Finished writing second data, it should have failed") + //Actually write the new metrics + + require.NoError(t, err3) + t.Log("Finished writing third data") + wg2.Wait() g.Close() } -func TCPServer(t *testing.T, wg *sync.WaitGroup) { - tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") +func TCPServer1(t *testing.T, wg *sync.WaitGroup) { defer wg.Done() - conn, _ := tcpServer.Accept() + tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + conn, _ := (tcpServer).Accept() reader := bufio.NewReader(conn) tp := textproto.NewReader(reader) data1, _ := tp.ReadLine() assert.Equal(t, "my.prefix.192_168_0_1.mymeasurement.myfield 3.14 1289430000", data1) + conn.Close() + tcpServer.Close() +} + +func TCPServer2(t *testing.T, wg *sync.WaitGroup) { + defer wg.Done() + tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + conn2, _ := (tcpServer).Accept() + reader := bufio.NewReader(conn2) + tp := textproto.NewReader(reader) data2, _ := tp.ReadLine() assert.Equal(t, "my.prefix.192_168_0_1.mymeasurement 3.14 1289430000", data2) data3, _ := tp.ReadLine() assert.Equal(t, "my.prefix.192_168_0_1.my_measurement 3.14 1289430000", data3) - conn.Close() + conn2.Close() + tcpServer.Close() } From 822333690f60cfdbf7859317a36b39657dd27ad6 Mon Sep 17 00:00:00 2001 From: Kali Hernandez Date: Tue, 24 Jan 2017 23:54:19 +0100 Subject: [PATCH 0081/1302] Debian package: check for group before useradd (#2107) Fixes #2106 --- scripts/post-install.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/scripts/post-install.sh b/scripts/post-install.sh index 95045be1f..45a19d26c 100644 --- a/scripts/post-install.sh +++ b/scripts/post-install.sh @@ -26,7 +26,12 @@ function install_chkconfig { id telegraf &>/dev/null if [[ $? -ne 0 ]]; then - useradd -r -K USERGROUPS_ENAB=yes -M telegraf -s /bin/false -d /etc/telegraf + grep "^telegraf:" /etc/group &>/dev/null + if [[ $? -ne 0 ]]; then + useradd -r -K USERGROUPS_ENAB=yes -M telegraf -s /bin/false -d /etc/telegraf + else + useradd -r -K USERGROUPS_ENAB=yes -M telegraf -s /bin/false -d /etc/telegraf -g telegraf + fi fi test -d $LOG_DIR || mkdir -p $LOG_DIR From c4d4185fb5155d2d4298ef3e34500a5e5f4e341d Mon Sep 17 00:00:00 2001 From: Jonas Hahnfeld Date: Tue, 24 Jan 2017 23:57:43 +0100 Subject: [PATCH 0082/1302] snmp: Allow lines with empty or missing tags (#2172) The changes in #1848 resulted in lines being dropped if they had an empty tag. Let's allow all lines that have empty or missing tags! --- plugins/inputs/snmp/snmp.go | 27 +++++++++++---------------- plugins/inputs/snmp/snmp_test.go | 17 ++++++++++++++++- 2 files changed, 27 insertions(+), 17 deletions(-) diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 6f515e227..9296bc043 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -433,9 +433,7 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { if err != nil { return nil, Errorf(err, "converting %q (OID %s) for field %s", ent.Value, ent.Name, f.Name) } - if fvs, ok := fv.(string); !ok || fvs != "" { - ifv[""] = fv - } + ifv[""] = fv } } else { err := gs.Walk(oid, func(ent gosnmp.SnmpPDU) error { @@ -456,9 +454,7 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { if err != nil { return Errorf(err, "converting %q (OID %s) for field %s", ent.Value, ent.Name, f.Name) } - if fvs, ok := fv.(string); !ok || fvs != "" { - ifv[idx] = fv - } + ifv[idx] = fv return nil }) if err != nil { @@ -476,14 +472,17 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { rtr.Fields = map[string]interface{}{} rows[i] = rtr } - if f.IsTag { - if vs, ok := v.(string); ok { - rtr.Tags[f.Name] = vs + // don't add an empty string + if vs, ok := v.(string); !ok || vs != "" { + if f.IsTag { + if ok { + rtr.Tags[f.Name] = vs + } else { + rtr.Tags[f.Name] = fmt.Sprintf("%v", v) + } } else { - rtr.Tags[f.Name] = fmt.Sprintf("%v", v) + rtr.Fields[f.Name] = v } - } else { - rtr.Fields[f.Name] = v } } } @@ -494,10 +493,6 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { Rows: make([]RTableRow, 0, len(rows)), } for _, r := range rows { - if len(r.Tags) < tagCount { - // don't add rows which are missing tags, as without tags you can't filter - continue - } rt.Rows = append(rt.Rows, r) } return &rt, nil diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index ed01508f2..62b19fcea 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -457,9 +457,24 @@ func TestTableBuild_walk(t *testing.T) { "myfield4": 22, }, } - assert.Len(t, tb.Rows, 2) + rtr3 := RTableRow{ + Tags: map[string]string{}, + Fields: map[string]interface{}{ + "myfield2": 0, + "myfield3": float64(0.0), + }, + } + rtr4 := RTableRow{ + Tags: map[string]string{}, + Fields: map[string]interface{}{ + "myfield3": float64(9.999), + }, + } + assert.Len(t, tb.Rows, 4) assert.Contains(t, tb.Rows, rtr1) assert.Contains(t, tb.Rows, rtr2) + assert.Contains(t, tb.Rows, rtr3) + assert.Contains(t, tb.Rows, rtr4) } func TestTableBuild_noWalk(t *testing.T) { From 168270ea5faf4eadb84942fb4531adeb12ac482f Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 24 Jan 2017 15:27:44 -0800 Subject: [PATCH 0083/1302] ntpq: correct number of seconds in an hour closes #2256 --- plugins/inputs/ntpq/ntpq.go | 2 +- plugins/inputs/ntpq/ntpq_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/ntpq/ntpq.go b/plugins/inputs/ntpq/ntpq.go index 674cd7216..601d5b2df 100644 --- a/plugins/inputs/ntpq/ntpq.go +++ b/plugins/inputs/ntpq/ntpq.go @@ -136,7 +136,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { continue } // seconds in an hour - mFields[key] = int64(m) * 360 + mFields[key] = int64(m) * 3600 continue case strings.HasSuffix(when, "d"): m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "d")) diff --git a/plugins/inputs/ntpq/ntpq_test.go b/plugins/inputs/ntpq/ntpq_test.go index 4b6489949..68abab7be 100644 --- a/plugins/inputs/ntpq/ntpq_test.go +++ b/plugins/inputs/ntpq/ntpq_test.go @@ -171,7 +171,7 @@ func TestHoursNTPQ(t *testing.T) { assert.NoError(t, n.Gather(&acc)) fields := map[string]interface{}{ - "when": int64(720), + "when": int64(7200), "poll": int64(256), "reach": int64(37), "delay": float64(51.016), From 4a5d3136934bf6d25ea2c6fb0b8d0aedc13baa18 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Sun, 4 Dec 2016 20:18:13 +0000 Subject: [PATCH 0084/1302] Improve the InfluxDB through-put performance This changes the current use of the InfluxDB client to instead use a baked-in client that uses the fasthttp library. This allows for significantly smaller allocations, the re-use of http body buffers, and the re-use of the actual bytes of the line-protocol metric representations. --- Godeps | 2 + metric.go | 9 +- metric/metric.go | 42 ++ metric/reader.go | 155 ++++++ metric/reader_test.go | 487 ++++++++++++++++++ plugins/inputs/http_listener/http_listener.go | 3 + plugins/outputs/influxdb/client/client.go | 22 + plugins/outputs/influxdb/client/http.go | 258 ++++++++++ plugins/outputs/influxdb/client/http_test.go | 343 ++++++++++++ plugins/outputs/influxdb/client/udp.go | 99 ++++ plugins/outputs/influxdb/client/udp_test.go | 163 ++++++ plugins/outputs/influxdb/influxdb.go | 124 ++--- plugins/outputs/influxdb/influxdb_test.go | 113 +++- 13 files changed, 1735 insertions(+), 85 deletions(-) create mode 100644 metric/reader.go create mode 100644 metric/reader_test.go create mode 100644 plugins/outputs/influxdb/client/client.go create mode 100644 plugins/outputs/influxdb/client/http.go create mode 100644 plugins/outputs/influxdb/client/http_test.go create mode 100644 plugins/outputs/influxdb/client/udp.go create mode 100644 plugins/outputs/influxdb/client/udp_test.go diff --git a/Godeps b/Godeps index 99606414e..83b9e4561 100644 --- a/Godeps +++ b/Godeps @@ -50,6 +50,8 @@ github.com/shirou/gopsutil 1516eb9ddc5e61ba58874047a98f8b44b5e585e8 github.com/soniah/gosnmp 3fe3beb30fa9700988893c56a63b1df8e1b68c26 github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744 github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c +github.com/valyala/bytebufferpool e746df99fe4a3986f4d4f79e13c1e0117ce9c2f7 +github.com/valyala/fasthttp 2f4876aaf2b591786efc9b49f34b86ad44c25074 github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2 github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8 diff --git a/metric.go b/metric.go index cb230512f..b1ab1b29f 100644 --- a/metric.go +++ b/metric.go @@ -19,8 +19,15 @@ const ( ) type Metric interface { + // Serialize serializes the metric into a line-protocol byte buffer, + // including a newline at the end. Serialize() []byte - String() string // convenience function for string(Serialize()) + // same as Serialize, but avoids an allocation. + // returns number of bytes copied into dst. + SerializeTo(dst []byte) int + // String is the same as Serialize, but returns a string. + String() string + // Copy deep-copies the metric. Copy() Metric // Split will attempt to return multiple metrics with the same timestamp // whose string representations are no longer than maxSize. diff --git a/metric/metric.go b/metric/metric.go index 8a18c0f2c..4fbee0ad1 100644 --- a/metric/metric.go +++ b/metric/metric.go @@ -178,6 +178,48 @@ func (m *metric) Serialize() []byte { return tmp } +func (m *metric) SerializeTo(dst []byte) int { + i := 0 + if i >= len(dst) { + return i + } + + i += copy(dst[i:], m.name) + if i >= len(dst) { + return i + } + + i += copy(dst[i:], m.tags) + if i >= len(dst) { + return i + } + + dst[i] = ' ' + i++ + if i >= len(dst) { + return i + } + + i += copy(dst[i:], m.fields) + if i >= len(dst) { + return i + } + + dst[i] = ' ' + i++ + if i >= len(dst) { + return i + } + + i += copy(dst[i:], m.t) + if i >= len(dst) { + return i + } + dst[i] = '\n' + + return i + 1 +} + func (m *metric) Split(maxSize int) []telegraf.Metric { if m.Len() < maxSize { return []telegraf.Metric{m} diff --git a/metric/reader.go b/metric/reader.go new file mode 100644 index 000000000..df0729963 --- /dev/null +++ b/metric/reader.go @@ -0,0 +1,155 @@ +package metric + +import ( + "io" + + "github.com/influxdata/telegraf" +) + +type state int + +const ( + _ state = iota + // normal state copies whole metrics into the given buffer until we can't + // fit the next metric. + normal + // split state means that we have a metric that we were able to split, so + // that we can fit it into multiple metrics (and calls to Read) + split + // overflow state means that we have a metric that didn't fit into a single + // buffer, and needs to be split across multiple calls to Read. + overflow + // splitOverflow state means that a split metric didn't fit into a single + // buffer, and needs to be split across multiple calls to Read. + splitOverflow + // done means we're done reading metrics, and now always return (0, io.EOF) + done +) + +type reader struct { + metrics []telegraf.Metric + splitMetrics []telegraf.Metric + buf []byte + state state + + // metric index + iM int + // split metric index + iSM int + // buffer index + iB int +} + +func NewReader(metrics []telegraf.Metric) io.Reader { + return &reader{ + metrics: metrics, + state: normal, + } +} + +func (r *reader) Read(p []byte) (n int, err error) { + var i int + switch r.state { + case done: + return 0, io.EOF + case normal: + for { + // this for-loop is the sunny-day scenario, where we are given a + // buffer that is large enough to hold at least a single metric. + // all of the cases below it are edge-cases. + if r.metrics[r.iM].Len() < len(p[i:]) { + i += r.metrics[r.iM].SerializeTo(p[i:]) + } else { + break + } + r.iM++ + if r.iM == len(r.metrics) { + r.state = done + return i, io.EOF + } + } + + // if we haven't written any bytes, check if we can split the current + // metric into multiple full metrics at a smaller size. + if i == 0 { + tmp := r.metrics[r.iM].Split(len(p)) + if len(tmp) > 1 { + r.splitMetrics = tmp + r.state = split + if r.splitMetrics[0].Len() < len(p) { + i += r.splitMetrics[0].SerializeTo(p) + r.iSM = 1 + } else { + // splitting didn't quite work, so we'll drop down and + // overflow the metric. + r.state = normal + r.iSM = 0 + } + } + } + + // if we haven't written any bytes and we're not at the end of the metrics + // slice, then it means we have a single metric that is larger than the + // provided buffer. + if i == 0 { + r.buf = r.metrics[r.iM].Serialize() + i += copy(p, r.buf[r.iB:]) + r.iB += i + r.state = overflow + } + + case split: + if r.splitMetrics[r.iSM].Len() < len(p) { + // write the current split metric + i += r.splitMetrics[r.iSM].SerializeTo(p) + r.iSM++ + if r.iSM >= len(r.splitMetrics) { + // done writing the current split metrics + r.iSM = 0 + r.iM++ + if r.iM == len(r.metrics) { + r.state = done + return i, io.EOF + } + r.state = normal + } + } else { + // This would only happen if we split the metric, and then a + // subsequent buffer was smaller than the initial one given, + // so that our split metric no longer fits. + r.buf = r.splitMetrics[r.iSM].Serialize() + i += copy(p, r.buf[r.iB:]) + r.iB += i + r.state = splitOverflow + } + + case splitOverflow: + i = copy(p, r.buf[r.iB:]) + r.iB += i + if r.iB >= len(r.buf) { + r.iB = 0 + r.iSM++ + if r.iSM == len(r.splitMetrics) { + r.iM++ + r.state = normal + } else { + r.state = split + } + } + + case overflow: + i = copy(p, r.buf[r.iB:]) + r.iB += i + if r.iB >= len(r.buf) { + r.iB = 0 + r.iM++ + if r.iM == len(r.metrics) { + r.state = done + return i, io.EOF + } + r.state = normal + } + } + + return i, nil +} diff --git a/metric/reader_test.go b/metric/reader_test.go new file mode 100644 index 000000000..a1c864ad5 --- /dev/null +++ b/metric/reader_test.go @@ -0,0 +1,487 @@ +package metric + +import ( + "io" + "io/ioutil" + "regexp" + "testing" + "time" + + "github.com/influxdata/telegraf" + + "github.com/stretchr/testify/assert" +) + +func BenchmarkMetricReader(b *testing.B) { + metrics := make([]telegraf.Metric, 10) + for i := 0; i < 10; i++ { + metrics[i], _ = New("foo", map[string]string{}, + map[string]interface{}{"value": int64(1)}, time.Now()) + } + for n := 0; n < b.N; n++ { + r := NewReader(metrics) + io.Copy(ioutil.Discard, r) + } +} + +func TestMetricReader(t *testing.T) { + ts := time.Unix(1481032190, 0) + metrics := make([]telegraf.Metric, 10) + for i := 0; i < 10; i++ { + metrics[i], _ = New("foo", map[string]string{}, + map[string]interface{}{"value": int64(1)}, ts) + } + + r := NewReader(metrics) + + buf := make([]byte, 35) + for i := 0; i < 10; i++ { + n, err := r.Read(buf) + if err != nil { + assert.True(t, err == io.EOF, err.Error()) + } + assert.Equal(t, 33, n) + assert.Equal(t, "foo value=1i 1481032190000000000\n", string(buf[0:n])) + } + + // reader should now be done, and always return 0, io.EOF + for i := 0; i < 10; i++ { + n, err := r.Read(buf) + assert.True(t, err == io.EOF, err.Error()) + assert.Equal(t, 0, n) + } +} + +func TestMetricReader_OverflowMetric(t *testing.T) { + ts := time.Unix(1481032190, 0) + m, _ := New("foo", map[string]string{}, + map[string]interface{}{"value": int64(10)}, ts) + metrics := []telegraf.Metric{m} + + r := NewReader(metrics) + buf := make([]byte, 5) + + tests := []struct { + exp string + err error + n int + }{ + { + "foo v", + nil, + 5, + }, + { + "alue=", + nil, + 5, + }, + { + "10i 1", + nil, + 5, + }, + { + "48103", + nil, + 5, + }, + { + "21900", + nil, + 5, + }, + { + "00000", + nil, + 5, + }, + { + "000\n", + io.EOF, + 4, + }, + { + "", + io.EOF, + 0, + }, + } + + for _, test := range tests { + n, err := r.Read(buf) + assert.Equal(t, test.n, n) + assert.Equal(t, test.exp, string(buf[0:n])) + assert.Equal(t, test.err, err) + } +} + +func TestMetricReader_OverflowMultipleMetrics(t *testing.T) { + ts := time.Unix(1481032190, 0) + m, _ := New("foo", map[string]string{}, + map[string]interface{}{"value": int64(10)}, ts) + metrics := []telegraf.Metric{m, m.Copy()} + + r := NewReader(metrics) + buf := make([]byte, 10) + + tests := []struct { + exp string + err error + n int + }{ + { + "foo value=", + nil, + 10, + }, + { + "10i 148103", + nil, + 10, + }, + { + "2190000000", + nil, + 10, + }, + { + "000\n", + nil, + 4, + }, + { + "foo value=", + nil, + 10, + }, + { + "10i 148103", + nil, + 10, + }, + { + "2190000000", + nil, + 10, + }, + { + "000\n", + io.EOF, + 4, + }, + { + "", + io.EOF, + 0, + }, + } + + for _, test := range tests { + n, err := r.Read(buf) + assert.Equal(t, test.n, n) + assert.Equal(t, test.exp, string(buf[0:n])) + assert.Equal(t, test.err, err) + } +} + +// test splitting a metric +func TestMetricReader_SplitMetric(t *testing.T) { + ts := time.Unix(1481032190, 0) + m1, _ := New("foo", map[string]string{}, + map[string]interface{}{ + "value1": int64(10), + "value2": int64(10), + "value3": int64(10), + "value4": int64(10), + "value5": int64(10), + "value6": int64(10), + }, + ts, + ) + metrics := []telegraf.Metric{m1} + + r := NewReader(metrics) + buf := make([]byte, 60) + + tests := []struct { + expRegex string + err error + n int + }{ + { + `foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`, + nil, + 57, + }, + { + `foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`, + io.EOF, + 57, + }, + { + "", + io.EOF, + 0, + }, + } + + for _, test := range tests { + n, err := r.Read(buf) + assert.Equal(t, test.n, n) + re := regexp.MustCompile(test.expRegex) + assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n])) + assert.Equal(t, test.err, err) + } +} + +// test an array with one split metric and one unsplit +func TestMetricReader_SplitMetric2(t *testing.T) { + ts := time.Unix(1481032190, 0) + m1, _ := New("foo", map[string]string{}, + map[string]interface{}{ + "value1": int64(10), + "value2": int64(10), + "value3": int64(10), + "value4": int64(10), + "value5": int64(10), + "value6": int64(10), + }, + ts, + ) + m2, _ := New("foo", map[string]string{}, + map[string]interface{}{ + "value1": int64(10), + }, + ts, + ) + metrics := []telegraf.Metric{m1, m2} + + r := NewReader(metrics) + buf := make([]byte, 60) + + tests := []struct { + expRegex string + err error + n int + }{ + { + `foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`, + nil, + 57, + }, + { + `foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`, + nil, + 57, + }, + { + `foo value1=10i 1481032190000000000\n`, + io.EOF, + 35, + }, + { + "", + io.EOF, + 0, + }, + } + + for _, test := range tests { + n, err := r.Read(buf) + assert.Equal(t, test.n, n) + re := regexp.MustCompile(test.expRegex) + assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n])) + assert.Equal(t, test.err, err) + } +} + +// test split that results in metrics that are still too long, which results in +// the reader falling back to regular overflow. +func TestMetricReader_SplitMetricTooLong(t *testing.T) { + ts := time.Unix(1481032190, 0) + m1, _ := New("foo", map[string]string{}, + map[string]interface{}{ + "value1": int64(10), + "value2": int64(10), + }, + ts, + ) + metrics := []telegraf.Metric{m1} + + r := NewReader(metrics) + buf := make([]byte, 30) + + tests := []struct { + expRegex string + err error + n int + }{ + { + `foo value\d=10i,value\d=10i 1481`, + nil, + 30, + }, + { + `032190000000000\n`, + io.EOF, + 16, + }, + { + "", + io.EOF, + 0, + }, + } + + for _, test := range tests { + n, err := r.Read(buf) + assert.Equal(t, test.n, n) + re := regexp.MustCompile(test.expRegex) + assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n])) + assert.Equal(t, test.err, err) + } +} + +// test split with a changing buffer size in the middle of subsequent calls +// to Read +func TestMetricReader_SplitMetricChangingBuffer(t *testing.T) { + ts := time.Unix(1481032190, 0) + m1, _ := New("foo", map[string]string{}, + map[string]interface{}{ + "value1": int64(10), + "value2": int64(10), + "value3": int64(10), + }, + ts, + ) + m2, _ := New("foo", map[string]string{}, + map[string]interface{}{ + "value1": int64(10), + }, + ts, + ) + metrics := []telegraf.Metric{m1, m2} + + r := NewReader(metrics) + + tests := []struct { + expRegex string + err error + n int + buf []byte + }{ + { + `foo value\d=10i 1481032190000000000\n`, + nil, + 35, + make([]byte, 36), + }, + { + `foo value\d=10i 148103219000000`, + nil, + 30, + make([]byte, 30), + }, + { + `0000\n`, + nil, + 5, + make([]byte, 30), + }, + { + `foo value\d=10i 1481032190000000000\n`, + nil, + 35, + make([]byte, 36), + }, + { + `foo value1=10i 1481032190000000000\n`, + io.EOF, + 35, + make([]byte, 36), + }, + { + "", + io.EOF, + 0, + make([]byte, 36), + }, + } + + for _, test := range tests { + n, err := r.Read(test.buf) + assert.Equal(t, test.n, n, test.expRegex) + re := regexp.MustCompile(test.expRegex) + assert.True(t, re.MatchString(string(test.buf[0:n])), string(test.buf[0:n])) + assert.Equal(t, test.err, err, test.expRegex) + } +} + +// test split with a changing buffer size in the middle of subsequent calls +// to Read +func TestMetricReader_SplitMetricChangingBuffer2(t *testing.T) { + ts := time.Unix(1481032190, 0) + m1, _ := New("foo", map[string]string{}, + map[string]interface{}{ + "value1": int64(10), + "value2": int64(10), + }, + ts, + ) + m2, _ := New("foo", map[string]string{}, + map[string]interface{}{ + "value1": int64(10), + }, + ts, + ) + metrics := []telegraf.Metric{m1, m2} + + r := NewReader(metrics) + + tests := []struct { + expRegex string + err error + n int + buf []byte + }{ + { + `foo value\d=10i 1481032190000000000\n`, + nil, + 35, + make([]byte, 36), + }, + { + `foo value\d=10i 148103219000000`, + nil, + 30, + make([]byte, 30), + }, + { + `0000\n`, + nil, + 5, + make([]byte, 30), + }, + { + `foo value1=10i 1481032190000000000\n`, + io.EOF, + 35, + make([]byte, 36), + }, + { + "", + io.EOF, + 0, + make([]byte, 36), + }, + } + + for _, test := range tests { + n, err := r.Read(test.buf) + assert.Equal(t, test.n, n, test.expRegex) + re := regexp.MustCompile(test.expRegex) + assert.True(t, re.MatchString(string(test.buf[0:n])), string(test.buf[0:n])) + assert.Equal(t, test.err, err, test.expRegex) + } +} diff --git a/plugins/inputs/http_listener/http_listener.go b/plugins/inputs/http_listener/http_listener.go index 0f426f809..05551a966 100644 --- a/plugins/inputs/http_listener/http_listener.go +++ b/plugins/inputs/http_listener/http_listener.go @@ -300,6 +300,9 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { } func (h *HTTPListener) parse(b []byte, t time.Time) error { + if !bytes.HasSuffix(b, []byte("\n")) { + b = append(b, '\n') + } metrics, err := h.parser.ParseWithDefaultTime(b, t) for _, m := range metrics { diff --git a/plugins/outputs/influxdb/client/client.go b/plugins/outputs/influxdb/client/client.go new file mode 100644 index 000000000..3f52752ad --- /dev/null +++ b/plugins/outputs/influxdb/client/client.go @@ -0,0 +1,22 @@ +package client + +import "io" + +type Client interface { + Query(command string) error + + Write(b []byte) (int, error) + WriteWithParams(b []byte, params WriteParams) (int, error) + + WriteStream(b io.Reader, contentLength int) (int, error) + WriteStreamWithParams(b io.Reader, contentLength int, params WriteParams) (int, error) + + Close() error +} + +type WriteParams struct { + Database string + RetentionPolicy string + Precision string + Consistency string +} diff --git a/plugins/outputs/influxdb/client/http.go b/plugins/outputs/influxdb/client/http.go new file mode 100644 index 000000000..68cc3e094 --- /dev/null +++ b/plugins/outputs/influxdb/client/http.go @@ -0,0 +1,258 @@ +package client + +import ( + "crypto/tls" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/url" + "time" + + "github.com/valyala/fasthttp" +) + +var ( + defaultRequestTimeout = time.Second * 5 +) + +// +func NewHTTP(config HTTPConfig, defaultWP WriteParams) (Client, error) { + // validate required parameters: + if len(config.URL) == 0 { + return nil, fmt.Errorf("config.URL is required to create an HTTP client") + } + if len(defaultWP.Database) == 0 { + return nil, fmt.Errorf("A default database is required to create an HTTP client") + } + + // set defaults: + if config.Timeout == 0 { + config.Timeout = defaultRequestTimeout + } + + // parse URL: + u, err := url.Parse(config.URL) + if err != nil { + return nil, fmt.Errorf("error parsing config.URL: %s", err) + } + if u.Scheme != "http" && u.Scheme != "https" { + return nil, fmt.Errorf("config.URL scheme must be http(s), got %s", u.Scheme) + } + + wu := writeURL(u, defaultWP) + return &httpClient{ + writeURL: []byte(wu), + config: config, + url: u, + client: &fasthttp.Client{ + TLSConfig: config.TLSConfig, + }, + }, nil +} + +type HTTPConfig struct { + // URL should be of the form "http://host:port" (REQUIRED) + URL string + + // UserAgent sets the User-Agent header. + UserAgent string + + // Timeout is the time to wait for a response to each HTTP request (writes + // and queries). + Timeout time.Duration + + // Username is the basic auth username for the server. + Username string + // Password is the basic auth password for the server. + Password string + + // TLSConfig is the tls auth settings to use for each request. + TLSConfig *tls.Config + + // Gzip, if true, compresses each payload using gzip. + // TODO + // Gzip bool +} + +// Response represents a list of statement results. +type Response struct { + // ignore Results: + Results []interface{} `json:"-"` + Err string `json:"error,omitempty"` +} + +// Error returns the first error from any statement. +// Returns nil if no errors occurred on any statements. +func (r *Response) Error() error { + if r.Err != "" { + return fmt.Errorf(r.Err) + } + return nil +} + +type httpClient struct { + writeURL []byte + config HTTPConfig + client *fasthttp.Client + url *url.URL +} + +func (c *httpClient) Query(command string) error { + req := c.makeRequest() + req.Header.SetRequestURI(queryURL(c.url, command)) + + return c.doRequest(req, fasthttp.StatusOK) +} + +func (c *httpClient) Write(b []byte) (int, error) { + req := c.makeWriteRequest(len(b), c.writeURL) + req.SetBody(b) + + err := c.doRequest(req, fasthttp.StatusNoContent) + if err == nil { + return len(b), nil + } + return 0, err +} + +func (c *httpClient) WriteWithParams(b []byte, wp WriteParams) (int, error) { + req := c.makeWriteRequest(len(b), []byte(writeURL(c.url, wp))) + req.SetBody(b) + + err := c.doRequest(req, fasthttp.StatusNoContent) + if err == nil { + return len(b), nil + } + return 0, err +} + +func (c *httpClient) WriteStream(r io.Reader, contentLength int) (int, error) { + req := c.makeWriteRequest(contentLength, c.writeURL) + req.SetBodyStream(r, contentLength) + + err := c.doRequest(req, fasthttp.StatusNoContent) + if err == nil { + return contentLength, nil + } + return 0, err +} + +func (c *httpClient) WriteStreamWithParams( + r io.Reader, + contentLength int, + wp WriteParams, +) (int, error) { + req := c.makeWriteRequest(contentLength, []byte(writeURL(c.url, wp))) + req.SetBodyStream(r, contentLength) + + err := c.doRequest(req, fasthttp.StatusNoContent) + if err == nil { + return contentLength, nil + } + return 0, err +} + +func (c *httpClient) doRequest( + req *fasthttp.Request, + expectedCode int, +) error { + resp := fasthttp.AcquireResponse() + + err := c.client.DoTimeout(req, resp, c.config.Timeout) + + code := resp.StatusCode() + // If it's a "no content" response, then release and return nil + if code == fasthttp.StatusNoContent { + fasthttp.ReleaseResponse(resp) + fasthttp.ReleaseRequest(req) + return nil + } + + // not a "no content" response, so parse the result: + var response Response + decErr := json.Unmarshal(resp.Body(), &response) + + // If we got a JSON decode error, send that back + if decErr != nil { + err = fmt.Errorf("Unable to decode json: received status code %d err: %s", code, decErr) + } + // Unexpected response code OR error in JSON response body overrides + // a JSON decode error: + if code != expectedCode || response.Error() != nil { + err = fmt.Errorf("Response Error: Status Code [%d], expected [%d], [%v]", + code, expectedCode, response.Error()) + } + + fasthttp.ReleaseResponse(resp) + fasthttp.ReleaseRequest(req) + + return err +} + +func (c *httpClient) makeWriteRequest( + contentLength int, + writeURL []byte, +) *fasthttp.Request { + req := c.makeRequest() + req.Header.SetContentLength(contentLength) + req.Header.SetRequestURIBytes(writeURL) + // TODO + // if gzip { + // req.Header.SetBytesKV([]byte("Content-Encoding"), []byte("gzip")) + // } + return req +} + +func (c *httpClient) makeRequest() *fasthttp.Request { + req := fasthttp.AcquireRequest() + req.Header.SetContentTypeBytes([]byte("text/plain")) + req.Header.SetMethodBytes([]byte("POST")) + req.Header.SetUserAgent(c.config.UserAgent) + if c.config.Username != "" && c.config.Password != "" { + req.Header.Set("Authorization", "Basic "+basicAuth(c.config.Username, c.config.Password)) + } + return req +} + +func (c *httpClient) Close() error { + // Nothing to do. + return nil +} + +func writeURL(u *url.URL, wp WriteParams) string { + params := url.Values{} + params.Set("db", wp.Database) + if wp.RetentionPolicy != "" { + params.Set("rp", wp.RetentionPolicy) + } + if wp.Precision != "n" && wp.Precision != "" { + params.Set("precision", wp.Precision) + } + if wp.Consistency != "one" && wp.Consistency != "" { + params.Set("consistency", wp.Consistency) + } + + u.RawQuery = params.Encode() + u.Path = "write" + return u.String() +} + +func queryURL(u *url.URL, command string) string { + params := url.Values{} + params.Set("q", command) + + u.RawQuery = params.Encode() + u.Path = "query" + return u.String() +} + +// See 2 (end of page 4) http://www.ietf.org/rfc/rfc2617.txt +// "To receive authorization, the httpClient sends the userid and password, +// separated by a single colon (":") character, within a base64 +// encoded string in the credentials." +// It is not meant to be urlencoded. +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} diff --git a/plugins/outputs/influxdb/client/http_test.go b/plugins/outputs/influxdb/client/http_test.go new file mode 100644 index 000000000..8fa02dd22 --- /dev/null +++ b/plugins/outputs/influxdb/client/http_test.go @@ -0,0 +1,343 @@ +package client + +import ( + "bytes" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestHTTPClient_Write(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + // test form values: + if r.FormValue("db") != "test" { + w.WriteHeader(http.StatusTeapot) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintln(w, `{"results":[{}],"error":"wrong db name"}`) + } + if r.FormValue("rp") != "policy" { + w.WriteHeader(http.StatusTeapot) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintln(w, `{"results":[{}],"error":"wrong rp name"}`) + } + if r.FormValue("precision") != "ns" { + w.WriteHeader(http.StatusTeapot) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintln(w, `{"results":[{}],"error":"wrong precision"}`) + } + if r.FormValue("consistency") != "all" { + w.WriteHeader(http.StatusTeapot) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintln(w, `{"results":[{}],"error":"wrong consistency"}`) + } + // test that user agent is set properly + if r.UserAgent() != "test-agent" { + w.WriteHeader(http.StatusTeapot) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintln(w, `{"results":[{}],"error":"wrong agent name"}`) + } + // test basic auth params + user, pass, ok := r.BasicAuth() + if !ok { + w.WriteHeader(http.StatusTeapot) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintln(w, `{"results":[{}],"error":"basic auth not set"}`) + } + if user != "test-user" || pass != "test-password" { + w.WriteHeader(http.StatusTeapot) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintln(w, `{"results":[{}],"error":"basic auth incorrect"}`) + } + + // Validate Content-Length Header + if r.ContentLength != 13 { + w.WriteHeader(http.StatusTeapot) + w.Header().Set("Content-Type", "application/json") + msg := fmt.Sprintf(`{"results":[{}],"error":"Content-Length: expected [13], got [%d]"}`, r.ContentLength) + fmt.Fprintln(w, msg) + } + + // Validate the request body: + buf := make([]byte, 100) + n, _ := r.Body.Read(buf) + expected := "cpu value=99" + got := string(buf[0 : n-1]) + if expected != got { + w.WriteHeader(http.StatusTeapot) + w.Header().Set("Content-Type", "application/json") + msg := fmt.Sprintf(`{"results":[{}],"error":"expected [%s], got [%s]"}`, expected, got) + fmt.Fprintln(w, msg) + } + + w.WriteHeader(http.StatusNoContent) + w.Header().Set("Content-Type", "application/json") + case "/query": + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintln(w, `{"results":[{}]}`) + } + })) + defer ts.Close() + + config := HTTPConfig{ + URL: ts.URL, + UserAgent: "test-agent", + Username: "test-user", + Password: "test-password", + } + wp := WriteParams{ + Database: "test", + RetentionPolicy: "policy", + Precision: "ns", + Consistency: "all", + } + client, err := NewHTTP(config, wp) + defer client.Close() + assert.NoError(t, err) + n, err := client.Write([]byte("cpu value=99\n")) + assert.Equal(t, 13, n) + assert.NoError(t, err) + + _, err = client.WriteStream(bytes.NewReader([]byte("cpu value=99\n")), 13) + assert.NoError(t, err) +} + +func TestHTTPClient_WriteParamsOverride(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + // test that database is set properly + if r.FormValue("db") != "override" { + w.WriteHeader(http.StatusTeapot) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintln(w, `{"results":[{}],"error":"wrong db name"}`) + } + + // Validate the request body: + buf := make([]byte, 100) + n, _ := r.Body.Read(buf) + expected := "cpu value=99" + got := string(buf[0 : n-1]) + if expected != got { + w.WriteHeader(http.StatusTeapot) + w.Header().Set("Content-Type", "application/json") + msg := fmt.Sprintf(`{"results":[{}],"error":"expected [%s], got [%s]"}`, expected, got) + fmt.Fprintln(w, msg) + } + + w.WriteHeader(http.StatusNoContent) + w.Header().Set("Content-Type", "application/json") + case "/query": + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintln(w, `{"results":[{}]}`) + } + })) + defer ts.Close() + + config := HTTPConfig{ + URL: ts.URL, + } + defaultWP := WriteParams{ + Database: "test", + } + client, err := NewHTTP(config, defaultWP) + defer client.Close() + assert.NoError(t, err) + + // test that WriteWithParams overrides the default write params + wp := WriteParams{ + Database: "override", + } + n, err := client.WriteWithParams([]byte("cpu value=99\n"), wp) + assert.Equal(t, 13, n) + assert.NoError(t, err) + + _, err = client.WriteStreamWithParams(bytes.NewReader([]byte("cpu value=99\n")), 13, wp) + assert.NoError(t, err) +} + +func TestHTTPClient_Write_Errors(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + w.WriteHeader(http.StatusTeapot) + case "/query": + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintln(w, `{"results":[{}]}`) + } + })) + defer ts.Close() + + config := HTTPConfig{ + URL: ts.URL, + } + defaultWP := WriteParams{ + Database: "test", + } + client, err := NewHTTP(config, defaultWP) + defer client.Close() + assert.NoError(t, err) + + lp := []byte("cpu value=99\n") + n, err := client.Write(lp) + assert.Equal(t, 0, n) + assert.Error(t, err) + + n, err = client.WriteStream(bytes.NewReader(lp), 13) + assert.Equal(t, 0, n) + assert.Error(t, err) + + wp := WriteParams{ + Database: "override", + } + n, err = client.WriteWithParams(lp, wp) + assert.Equal(t, 0, n) + assert.Error(t, err) + + n, err = client.WriteStreamWithParams(bytes.NewReader(lp), 13, wp) + assert.Equal(t, 0, n) + assert.Error(t, err) +} + +func TestNewHTTPErrors(t *testing.T) { + // No URL: + config := HTTPConfig{} + defaultWP := WriteParams{ + Database: "test", + } + client, err := NewHTTP(config, defaultWP) + assert.Error(t, err) + assert.Nil(t, client) + + // No Database: + config = HTTPConfig{ + URL: "http://localhost:8086", + } + defaultWP = WriteParams{} + client, err = NewHTTP(config, defaultWP) + assert.Nil(t, client) + assert.Error(t, err) + + // Invalid URL: + config = HTTPConfig{ + URL: "http://192.168.0.%31:8080/", + } + defaultWP = WriteParams{ + Database: "test", + } + client, err = NewHTTP(config, defaultWP) + assert.Nil(t, client) + assert.Error(t, err) + + // Invalid URL scheme: + config = HTTPConfig{ + URL: "mailto://localhost:8086", + } + defaultWP = WriteParams{ + Database: "test", + } + client, err = NewHTTP(config, defaultWP) + assert.Nil(t, client) + assert.Error(t, err) +} + +func TestHTTPClient_Query(t *testing.T) { + command := "CREATE DATABASE test" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + w.WriteHeader(http.StatusNoContent) + case "/query": + // validate the create database command is correct + got := r.FormValue("q") + if got != command { + w.WriteHeader(http.StatusTeapot) + w.Header().Set("Content-Type", "application/json") + msg := fmt.Sprintf(`{"results":[{}],"error":"got %s, expected %s"}`, got, command) + fmt.Fprintln(w, msg) + } + + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintln(w, `{"results":[{}]}`) + } + })) + defer ts.Close() + + config := HTTPConfig{ + URL: ts.URL, + } + defaultWP := WriteParams{ + Database: "test", + } + client, err := NewHTTP(config, defaultWP) + defer client.Close() + assert.NoError(t, err) + err = client.Query(command) + assert.NoError(t, err) +} + +func TestHTTPClient_Query_ResponseError(t *testing.T) { + command := "CREATE DATABASE test" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + w.WriteHeader(http.StatusNoContent) + case "/query": + w.WriteHeader(http.StatusTeapot) + w.Header().Set("Content-Type", "application/json") + msg := fmt.Sprintf(`{"results":[{}],"error":"couldnt create database"}`) + fmt.Fprintln(w, msg) + } + })) + defer ts.Close() + + config := HTTPConfig{ + URL: ts.URL, + } + defaultWP := WriteParams{ + Database: "test", + } + client, err := NewHTTP(config, defaultWP) + defer client.Close() + assert.NoError(t, err) + err = client.Query(command) + assert.Error(t, err) +} + +func TestHTTPClient_Query_JSONDecodeError(t *testing.T) { + command := "CREATE DATABASE test" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + w.WriteHeader(http.StatusNoContent) + case "/query": + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + // write JSON missing a ']' + msg := fmt.Sprintf(`{"results":[{}}`) + fmt.Fprintln(w, msg) + } + })) + defer ts.Close() + + config := HTTPConfig{ + URL: ts.URL, + } + defaultWP := WriteParams{ + Database: "test", + } + client, err := NewHTTP(config, defaultWP) + defer client.Close() + assert.NoError(t, err) + err = client.Query(command) + assert.Error(t, err) + assert.Contains(t, err.Error(), "json") +} diff --git a/plugins/outputs/influxdb/client/udp.go b/plugins/outputs/influxdb/client/udp.go new file mode 100644 index 000000000..d542ecf63 --- /dev/null +++ b/plugins/outputs/influxdb/client/udp.go @@ -0,0 +1,99 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "net" + "net/url" +) + +const ( + // UDPPayloadSize is a reasonable default payload size for UDP packets that + // could be travelling over the internet. + UDPPayloadSize = 512 +) + +// UDPConfig is the config data needed to create a UDP Client +type UDPConfig struct { + // URL should be of the form "udp://host:port" + // or "udp://[ipv6-host%zone]:port". + URL string + + // PayloadSize is the maximum size of a UDP client message, optional + // Tune this based on your network. Defaults to UDPPayloadSize. + PayloadSize int +} + +func NewUDP(config UDPConfig) (Client, error) { + p, err := url.Parse(config.URL) + if err != nil { + return nil, fmt.Errorf("Error parsing UDP url [%s]: %s", config.URL, err) + } + + udpAddr, err := net.ResolveUDPAddr("udp", p.Host) + if err != nil { + return nil, fmt.Errorf("Error resolving UDP Address [%s]: %s", p.Host, err) + } + + conn, err := net.DialUDP("udp", nil, udpAddr) + if err != nil { + return nil, fmt.Errorf("Error dialing UDP address [%s]: %s", + udpAddr.String(), err) + } + + size := config.PayloadSize + if size == 0 { + size = UDPPayloadSize + } + buf := make([]byte, size) + return &udpClient{conn: conn, buffer: buf}, nil +} + +type udpClient struct { + conn *net.UDPConn + buffer []byte +} + +func (c *udpClient) Query(command string) error { + return nil +} + +func (c *udpClient) Write(b []byte) (int, error) { + return c.WriteStream(bytes.NewReader(b), -1) +} + +// write params are ignored by the UDP client +func (c *udpClient) WriteWithParams(b []byte, wp WriteParams) (int, error) { + return c.WriteStream(bytes.NewReader(b), -1) +} + +// contentLength is ignored by the UDP client. +func (c *udpClient) WriteStream(r io.Reader, contentLength int) (int, error) { + var totaln int + for { + nR, err := r.Read(c.buffer) + if nR == 0 { + break + } + if err != io.EOF && err != nil { + return totaln, err + } + nW, err := c.conn.Write(c.buffer[0:nR]) + totaln += nW + if err != nil { + return totaln, err + } + } + return totaln, nil +} + +// contentLength is ignored by the UDP client. +// write params are ignored by the UDP client +func (c *udpClient) WriteStreamWithParams(r io.Reader, contentLength int, wp WriteParams) (int, error) { + return c.WriteStream(r, -1) +} + +func (c *udpClient) Close() error { + return c.conn.Close() +} diff --git a/plugins/outputs/influxdb/client/udp_test.go b/plugins/outputs/influxdb/client/udp_test.go new file mode 100644 index 000000000..31196ddca --- /dev/null +++ b/plugins/outputs/influxdb/client/udp_test.go @@ -0,0 +1,163 @@ +package client + +import ( + "bytes" + "net" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + + "github.com/stretchr/testify/assert" +) + +func TestUDPClient(t *testing.T) { + config := UDPConfig{ + URL: "udp://localhost:8089", + } + client, err := NewUDP(config) + assert.NoError(t, err) + + err = client.Query("ANY QUERY RETURNS NIL") + assert.NoError(t, err) + + assert.NoError(t, client.Close()) +} + +func TestNewUDPClient_Errors(t *testing.T) { + // url.Parse Error + config := UDPConfig{ + URL: "udp://localhost%35:8089", + } + _, err := NewUDP(config) + assert.Error(t, err) + + // ResolveUDPAddr Error + config = UDPConfig{ + URL: "udp://localhost:999999", + } + _, err = NewUDP(config) + assert.Error(t, err) +} + +func TestUDPClient_Write(t *testing.T) { + config := UDPConfig{ + URL: "udp://localhost:8199", + } + client, err := NewUDP(config) + assert.NoError(t, err) + + packets := make(chan string, 100) + address, err := net.ResolveUDPAddr("udp", "localhost:8199") + assert.NoError(t, err) + listener, err := net.ListenUDP("udp", address) + defer listener.Close() + assert.NoError(t, err) + go func() { + buf := make([]byte, 200) + for { + n, _, err := listener.ReadFromUDP(buf) + if err != nil { + packets <- err.Error() + } + packets <- string(buf[0:n]) + } + }() + + // test sending simple metric + time.Sleep(time.Second) + n, err := client.Write([]byte("cpu value=99\n")) + assert.Equal(t, n, 13) + assert.NoError(t, err) + pkt := <-packets + assert.Equal(t, "cpu value=99\n", pkt) + + metrics := `cpu value=99 +cpu value=55 +cpu value=44 +cpu value=101 +cpu value=91 +cpu value=92 +` + // test sending packet with 6 metrics in a stream. + reader := bytes.NewReader([]byte(metrics)) + // contentLength is ignored: + n, err = client.WriteStream(reader, 10) + assert.Equal(t, n, len(metrics)) + assert.NoError(t, err) + pkt = <-packets + assert.Equal(t, "cpu value=99\ncpu value=55\ncpu value=44\ncpu value=101\ncpu value=91\ncpu value=92\n", pkt) + + // + // Test that UDP packets get broken up properly: + config2 := UDPConfig{ + URL: "udp://localhost:8199", + PayloadSize: 25, + } + client2, err := NewUDP(config2) + assert.NoError(t, err) + + wp := WriteParams{} + + // + // Using Write(): + buf := []byte(metrics) + n, err = client2.WriteWithParams(buf, wp) + assert.Equal(t, n, len(metrics)) + assert.NoError(t, err) + pkt = <-packets + assert.Equal(t, "cpu value=99\ncpu value=55", pkt) + pkt = <-packets + assert.Equal(t, "\ncpu value=44\ncpu value=1", pkt) + pkt = <-packets + assert.Equal(t, "01\ncpu value=91\ncpu value", pkt) + pkt = <-packets + assert.Equal(t, "=92\n", pkt) + + // + // Using WriteStream(): + reader = bytes.NewReader([]byte(metrics)) + n, err = client2.WriteStreamWithParams(reader, 10, wp) + assert.Equal(t, n, len(metrics)) + assert.NoError(t, err) + pkt = <-packets + assert.Equal(t, "cpu value=99\ncpu value=55", pkt) + pkt = <-packets + assert.Equal(t, "\ncpu value=44\ncpu value=1", pkt) + pkt = <-packets + assert.Equal(t, "01\ncpu value=91\ncpu value", pkt) + pkt = <-packets + assert.Equal(t, "=92\n", pkt) + + // + // Using WriteStream() & a metric.Reader: + config3 := UDPConfig{ + URL: "udp://localhost:8199", + PayloadSize: 40, + } + client3, err := NewUDP(config3) + assert.NoError(t, err) + + now := time.Unix(1484142942, 0) + m1, _ := metric.New("test", map[string]string{}, + map[string]interface{}{"value": 1.1}, now) + m2, _ := metric.New("test", map[string]string{}, + map[string]interface{}{"value": 1.1}, now) + m3, _ := metric.New("test", map[string]string{}, + map[string]interface{}{"value": 1.1}, now) + ms := []telegraf.Metric{m1, m2, m3} + mReader := metric.NewReader(ms) + n, err = client3.WriteStreamWithParams(mReader, 10, wp) + // 3 metrics at 35 bytes each (including the newline) + assert.Equal(t, 105, n) + assert.NoError(t, err) + pkt = <-packets + assert.Equal(t, "test value=1.1 1484142942000000000\n", pkt) + pkt = <-packets + assert.Equal(t, "test value=1.1 1484142942000000000\n", pkt) + pkt = <-packets + assert.Equal(t, "test value=1.1 1484142942000000000\n", pkt) + + assert.NoError(t, client.Close()) +} diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 999e1bc6f..06d8bd042 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -1,19 +1,18 @@ package influxdb import ( - "errors" "fmt" "log" "math/rand" - "net/url" "strings" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/plugins/outputs/influxdb/client" ) type InfluxDB struct { @@ -41,7 +40,7 @@ type InfluxDB struct { // Precision is only here for legacy support. It will be ignored. Precision string - conns []client.Client + clients []client.Client } var sampleConfig = ` @@ -88,79 +87,56 @@ func (i *InfluxDB) Connect() error { urls = append(urls, i.URL) } - tlsCfg, err := internal.GetTLSConfig( + tlsConfig, err := internal.GetTLSConfig( i.SSLCert, i.SSLKey, i.SSLCA, i.InsecureSkipVerify) if err != nil { return err } - var conns []client.Client for _, u := range urls { switch { case strings.HasPrefix(u, "udp"): - parsed_url, err := url.Parse(u) - if err != nil { - return err - } - - if i.UDPPayload == 0 { - i.UDPPayload = client.UDPPayloadSize - } - c, err := client.NewUDPClient(client.UDPConfig{ - Addr: parsed_url.Host, + config := client.UDPConfig{ + URL: u, PayloadSize: i.UDPPayload, - }) - if err != nil { - return err + c, err := client.NewUDP(config) } - conns = append(conns, c) + if err != nil { + return fmt.Errorf("Error creating UDP Client [%s]: %s", u, err) + } + i.clients = append(i.clients, c) default: // If URL doesn't start with "udp", assume HTTP client - c, err := client.NewHTTPClient(client.HTTPConfig{ - Addr: u, - Username: i.Username, - Password: i.Password, - UserAgent: i.UserAgent, + config := client.HTTPConfig{ + URL: u, Timeout: i.Timeout.Duration, - TLSConfig: tlsCfg, - }) - if err != nil { - return err + TLSConfig: tlsConfig, + UserAgent: i.UserAgent, } + wp := client.WriteParams{ + Database: i.Database, + RetentionPolicy: i.RetentionPolicy, + Consistency: i.WriteConsistency, + } + c, err := client.NewHTTP(config, wp) + if err != nil { + return fmt.Errorf("Error creating HTTP Client [%s]: %s", u, err) + } + i.clients = append(i.clients, c) - err = createDatabase(c, i.Database) + err = c.Query("CREATE DATABASE " + i.Database) if err != nil { log.Println("E! Database creation failed: " + err.Error()) continue } - - conns = append(conns, c) } } - i.conns = conns rand.Seed(time.Now().UnixNano()) return nil } -func createDatabase(c client.Client, database string) error { - // Create Database if it doesn't exist - _, err := c.Query(client.Query{ - Command: fmt.Sprintf("CREATE DATABASE \"%s\"", database), - }) - return err -} - func (i *InfluxDB) Close() error { - var errS string - for j, _ := range i.conns { - if err := i.conns[j].Close(); err != nil { - errS += err.Error() - } - } - if errS != "" { - return fmt.Errorf("output influxdb close failed: %s", errS) - } return nil } @@ -175,34 +151,24 @@ func (i *InfluxDB) Description() string { // Choose a random server in the cluster to write to until a successful write // occurs, logging each unsuccessful. If all servers fail, return error. func (i *InfluxDB) Write(metrics []telegraf.Metric) error { - if len(i.conns) == 0 { - err := i.Connect() - if err != nil { - return err - } - } - bp, err := client.NewBatchPoints(client.BatchPointsConfig{ - Database: i.Database, - RetentionPolicy: i.RetentionPolicy, - WriteConsistency: i.WriteConsistency, - }) - if err != nil { - return err - } - - for _, metric := range metrics { - bp.AddPoint(metric.Point()) + bufsize := 0 + for _, m := range metrics { + bufsize += m.Len() + r := metric.NewReader(metrics) } // This will get set to nil if a successful write occurs - err = errors.New("Could not write to any InfluxDB server in cluster") + err := fmt.Errorf("Could not write to any InfluxDB server in cluster") - p := rand.Perm(len(i.conns)) + p := rand.Perm(len(i.clients)) for _, n := range p { - if e := i.conns[n].Write(bp); e != nil { - // If the database was not found, try to recreate it + if _, e := i.clients[n].WriteStream(r, bufsize); e != nil { + // Log write failure: + log.Printf("E! InfluxDB Output Error: %s", e) + + // If the database was not found, try to recreate it: if strings.Contains(e.Error(), "database not found") { - if errc := createDatabase(i.conns[n], i.Database); errc != nil { + if errc := i.clients[n].Query("CREATE DATABASE " + i.Database); errc != nil { log.Printf("E! Error: Database %s not found and failed to recreate\n", i.Database) } @@ -225,10 +191,12 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { return err } -func init() { - outputs.Add("influxdb", func() telegraf.Output { - return &InfluxDB{ - Timeout: internal.Duration{Duration: time.Second * 5}, - } - }) +func newInflux() *InfluxDB { + return &InfluxDB{ + Timeout: internal.Duration{Duration: time.Second * 5}, + } +} + +func init() { + outputs.Add("influxdb", func() telegraf.Output { return newInflux() }) } diff --git a/plugins/outputs/influxdb/influxdb_test.go b/plugins/outputs/influxdb/influxdb_test.go index 1414fa839..db2cd5ec7 100644 --- a/plugins/outputs/influxdb/influxdb_test.go +++ b/plugins/outputs/influxdb/influxdb_test.go @@ -20,22 +20,123 @@ func TestUDPInflux(t *testing.T) { require.NoError(t, err) err = i.Write(testutil.MockMetrics()) require.NoError(t, err) + require.NoError(t, i.Close()) } func TestHTTPInflux(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - w.Header().Set("Content-Type", "application/json") - fmt.Fprintln(w, `{"results":[{}]}`) + switch r.URL.Path { + case "/write": + // test that database is set properly + if r.FormValue("db") != "test" { + w.WriteHeader(http.StatusTeapot) + w.Header().Set("Content-Type", "application/json") + } + // test that user agent is set properly + if r.UserAgent() != "telegraf" { + w.WriteHeader(http.StatusTeapot) + w.Header().Set("Content-Type", "application/json") + } + w.WriteHeader(http.StatusNoContent) + w.Header().Set("Content-Type", "application/json") + case "/query": + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintln(w, `{"results":[{}]}`) + } })) defer ts.Close() - i := InfluxDB{ - URLs: []string{ts.URL}, - } + i := newInflux() + i.URLs = []string{ts.URL} + i.Database = "test" + i.UserAgent = "telegraf" err := i.Connect() require.NoError(t, err) err = i.Write(testutil.MockMetrics()) require.NoError(t, err) + require.NoError(t, i.Close()) +} + +func TestUDPConnectError(t *testing.T) { + i := InfluxDB{ + URLs: []string{"udp://foobar:8089"}, + } + + err := i.Connect() + require.Error(t, err) + + i = InfluxDB{ + URLs: []string{"udp://localhost:9999999"}, + } + + err = i.Connect() + require.Error(t, err) +} + +func TestHTTPConnectError_InvalidURL(t *testing.T) { + i := InfluxDB{ + URLs: []string{"http://foobar:8089"}, + } + + err := i.Connect() + require.Error(t, err) + + i = InfluxDB{ + URLs: []string{"http://localhost:9999999"}, + } + + err = i.Connect() + require.Error(t, err) +} + +func TestHTTPConnectError_DatabaseCreateFail(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/query": + w.WriteHeader(http.StatusNotFound) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintln(w, `{"results":[{}],"error":"test error"}`) + } + })) + defer ts.Close() + + i := InfluxDB{ + URLs: []string{ts.URL}, + Database: "test", + } + + // database creation errors do not return an error from Connect + // they are only logged. + err := i.Connect() + require.NoError(t, err) + require.NoError(t, i.Close()) +} + +func TestHTTPError_DatabaseNotFound(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + w.WriteHeader(http.StatusNotFound) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintln(w, `{"results":[{}],"error":"database not found"}`) + case "/query": + w.WriteHeader(http.StatusNotFound) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintln(w, `{"results":[{}],"error":"database not found"}`) + } + })) + defer ts.Close() + + i := InfluxDB{ + URLs: []string{ts.URL}, + Database: "test", + } + + err := i.Connect() + require.NoError(t, err) + err = i.Write(testutil.MockMetrics()) + require.Error(t, err) + require.NoError(t, i.Close()) } From c9e87a39f8129aa7aacf45a6342d781f50be69fa Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 11 Jan 2017 16:01:32 +0000 Subject: [PATCH 0085/1302] Revert using fasthttp library to net/http --- CHANGELOG.md | 1 + Godeps | 2 - plugins/inputs/http_listener/http_listener.go | 3 - plugins/outputs/influxdb/client/http.go | 139 ++++++++++-------- plugins/outputs/influxdb/influxdb.go | 7 +- plugins/outputs/influxdb/influxdb_test.go | 24 +++ 6 files changed, 102 insertions(+), 74 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 58912b2fe..7ebea6184 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ - [#2137](https://github.com/influxdata/telegraf/pull/2137): Added userstats to mysql input plugin. - [#2179](https://github.com/influxdata/telegraf/pull/2179): Added more InnoDB metric to MySQL plugin. +- [#2251](https://github.com/influxdata/telegraf/pull/2251): InfluxDB output: use own client for improved through-put and less allocations. ### Bugfixes diff --git a/Godeps b/Godeps index 83b9e4561..99606414e 100644 --- a/Godeps +++ b/Godeps @@ -50,8 +50,6 @@ github.com/shirou/gopsutil 1516eb9ddc5e61ba58874047a98f8b44b5e585e8 github.com/soniah/gosnmp 3fe3beb30fa9700988893c56a63b1df8e1b68c26 github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744 github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c -github.com/valyala/bytebufferpool e746df99fe4a3986f4d4f79e13c1e0117ce9c2f7 -github.com/valyala/fasthttp 2f4876aaf2b591786efc9b49f34b86ad44c25074 github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2 github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8 diff --git a/plugins/inputs/http_listener/http_listener.go b/plugins/inputs/http_listener/http_listener.go index 05551a966..0f426f809 100644 --- a/plugins/inputs/http_listener/http_listener.go +++ b/plugins/inputs/http_listener/http_listener.go @@ -300,9 +300,6 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { } func (h *HTTPListener) parse(b []byte, t time.Time) error { - if !bytes.HasSuffix(b, []byte("\n")) { - b = append(b, '\n') - } metrics, err := h.parser.ParseWithDefaultTime(b, t) for _, m := range metrics { diff --git a/plugins/outputs/influxdb/client/http.go b/plugins/outputs/influxdb/client/http.go index 68cc3e094..62ca1315b 100644 --- a/plugins/outputs/influxdb/client/http.go +++ b/plugins/outputs/influxdb/client/http.go @@ -1,15 +1,15 @@ package client import ( + "bytes" "crypto/tls" - "encoding/base64" "encoding/json" "fmt" "io" + "io/ioutil" + "net/http" "net/url" "time" - - "github.com/valyala/fasthttp" ) var ( @@ -40,13 +40,15 @@ func NewHTTP(config HTTPConfig, defaultWP WriteParams) (Client, error) { return nil, fmt.Errorf("config.URL scheme must be http(s), got %s", u.Scheme) } - wu := writeURL(u, defaultWP) return &httpClient{ - writeURL: []byte(wu), + writeURL: writeURL(u, defaultWP), config: config, url: u, - client: &fasthttp.Client{ - TLSConfig: config.TLSConfig, + client: &http.Client{ + Timeout: config.Timeout, + Transport: &http.Transport{ + TLSClientConfig: config.TLSConfig, + }, }, }, nil } @@ -58,8 +60,13 @@ type HTTPConfig struct { // UserAgent sets the User-Agent header. UserAgent string - // Timeout is the time to wait for a response to each HTTP request (writes - // and queries). + // Timeout specifies a time limit for requests made by this + // Client. The timeout includes connection time, any + // redirects, and reading the response body. The timer remains + // running after Get, Head, Post, or Do return and will + // interrupt reading of the Response.Body. + // + // A Timeout of zero means no timeout. Timeout time.Duration // Username is the basic auth username for the server. @@ -92,24 +99,27 @@ func (r *Response) Error() error { } type httpClient struct { - writeURL []byte + writeURL string config HTTPConfig - client *fasthttp.Client + client *http.Client url *url.URL } func (c *httpClient) Query(command string) error { - req := c.makeRequest() - req.Header.SetRequestURI(queryURL(c.url, command)) - - return c.doRequest(req, fasthttp.StatusOK) + req, err := c.makeRequest(queryURL(c.url, command), bytes.NewReader([]byte(""))) + if err != nil { + return err + } + return c.doRequest(req, http.StatusOK) } func (c *httpClient) Write(b []byte) (int, error) { - req := c.makeWriteRequest(len(b), c.writeURL) - req.SetBody(b) + req, err := c.makeWriteRequest(bytes.NewReader(b), len(b), c.writeURL) + if err != nil { + return 0, nil + } - err := c.doRequest(req, fasthttp.StatusNoContent) + err = c.doRequest(req, http.StatusNoContent) if err == nil { return len(b), nil } @@ -117,10 +127,12 @@ func (c *httpClient) Write(b []byte) (int, error) { } func (c *httpClient) WriteWithParams(b []byte, wp WriteParams) (int, error) { - req := c.makeWriteRequest(len(b), []byte(writeURL(c.url, wp))) - req.SetBody(b) + req, err := c.makeWriteRequest(bytes.NewReader(b), len(b), writeURL(c.url, wp)) + if err != nil { + return 0, nil + } - err := c.doRequest(req, fasthttp.StatusNoContent) + err = c.doRequest(req, http.StatusNoContent) if err == nil { return len(b), nil } @@ -128,10 +140,12 @@ func (c *httpClient) WriteWithParams(b []byte, wp WriteParams) (int, error) { } func (c *httpClient) WriteStream(r io.Reader, contentLength int) (int, error) { - req := c.makeWriteRequest(contentLength, c.writeURL) - req.SetBodyStream(r, contentLength) + req, err := c.makeWriteRequest(r, contentLength, c.writeURL) + if err != nil { + return 0, nil + } - err := c.doRequest(req, fasthttp.StatusNoContent) + err = c.doRequest(req, http.StatusNoContent) if err == nil { return contentLength, nil } @@ -143,10 +157,12 @@ func (c *httpClient) WriteStreamWithParams( contentLength int, wp WriteParams, ) (int, error) { - req := c.makeWriteRequest(contentLength, []byte(writeURL(c.url, wp))) - req.SetBodyStream(r, contentLength) + req, err := c.makeWriteRequest(r, contentLength, writeURL(c.url, wp)) + if err != nil { + return 0, nil + } - err := c.doRequest(req, fasthttp.StatusNoContent) + err = c.doRequest(req, http.StatusNoContent) if err == nil { return contentLength, nil } @@ -154,24 +170,27 @@ func (c *httpClient) WriteStreamWithParams( } func (c *httpClient) doRequest( - req *fasthttp.Request, + req *http.Request, expectedCode int, ) error { - resp := fasthttp.AcquireResponse() + resp, err := c.client.Do(req) + if err != nil { + return err + } - err := c.client.DoTimeout(req, resp, c.config.Timeout) - - code := resp.StatusCode() + code := resp.StatusCode // If it's a "no content" response, then release and return nil - if code == fasthttp.StatusNoContent { - fasthttp.ReleaseResponse(resp) - fasthttp.ReleaseRequest(req) + if code == http.StatusNoContent { return nil } // not a "no content" response, so parse the result: var response Response - decErr := json.Unmarshal(resp.Body(), &response) + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("Fatal error reading body: %s", err) + } + decErr := json.Unmarshal(body, &response) // If we got a JSON decode error, send that back if decErr != nil { @@ -184,35 +203,37 @@ func (c *httpClient) doRequest( code, expectedCode, response.Error()) } - fasthttp.ReleaseResponse(resp) - fasthttp.ReleaseRequest(req) - return err } func (c *httpClient) makeWriteRequest( + body io.Reader, contentLength int, - writeURL []byte, -) *fasthttp.Request { - req := c.makeRequest() - req.Header.SetContentLength(contentLength) - req.Header.SetRequestURIBytes(writeURL) + writeURL string, +) (*http.Request, error) { + req, err := c.makeRequest(writeURL, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Length", fmt.Sprint(contentLength)) // TODO // if gzip { - // req.Header.SetBytesKV([]byte("Content-Encoding"), []byte("gzip")) + // req.Header.Set("Content-Encoding", "gzip") // } - return req + return req, nil } -func (c *httpClient) makeRequest() *fasthttp.Request { - req := fasthttp.AcquireRequest() - req.Header.SetContentTypeBytes([]byte("text/plain")) - req.Header.SetMethodBytes([]byte("POST")) - req.Header.SetUserAgent(c.config.UserAgent) - if c.config.Username != "" && c.config.Password != "" { - req.Header.Set("Authorization", "Basic "+basicAuth(c.config.Username, c.config.Password)) +func (c *httpClient) makeRequest(uri string, body io.Reader) (*http.Request, error) { + req, err := http.NewRequest("POST", uri, body) + if err != nil { + return nil, err } - return req + req.Header.Set("Content-Type", "text/plain") + req.Header.Set("User-Agent", c.config.UserAgent) + if c.config.Username != "" && c.config.Password != "" { + req.SetBasicAuth(c.config.Username, c.config.Password) + } + return req, nil } func (c *httpClient) Close() error { @@ -246,13 +267,3 @@ func queryURL(u *url.URL, command string) string { u.Path = "query" return u.String() } - -// See 2 (end of page 4) http://www.ietf.org/rfc/rfc2617.txt -// "To receive authorization, the httpClient sends the userid and password, -// separated by a single colon (":") character, within a base64 -// encoded string in the credentials." -// It is not meant to be urlencoded. -func basicAuth(username, password string) string { - auth := username + ":" + password - return base64.StdEncoding.EncodeToString([]byte(auth)) -} diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 06d8bd042..5a5899a60 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -99,8 +99,8 @@ func (i *InfluxDB) Connect() error { config := client.UDPConfig{ URL: u, PayloadSize: i.UDPPayload, - c, err := client.NewUDP(config) } + c, err := client.NewUDP(config) if err != nil { return fmt.Errorf("Error creating UDP Client [%s]: %s", u, err) } @@ -154,8 +154,8 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { bufsize := 0 for _, m := range metrics { bufsize += m.Len() - r := metric.NewReader(metrics) } + r := metric.NewReader(metrics) // This will get set to nil if a successful write occurs err := fmt.Errorf("Could not write to any InfluxDB server in cluster") @@ -163,9 +163,6 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { p := rand.Perm(len(i.clients)) for _, n := range p { if _, e := i.clients[n].WriteStream(r, bufsize); e != nil { - // Log write failure: - log.Printf("E! InfluxDB Output Error: %s", e) - // If the database was not found, try to recreate it: if strings.Contains(e.Error(), "database not found") { if errc := i.clients[n].Query("CREATE DATABASE " + i.Database); errc != nil { diff --git a/plugins/outputs/influxdb/influxdb_test.go b/plugins/outputs/influxdb/influxdb_test.go index db2cd5ec7..0ece8a1c2 100644 --- a/plugins/outputs/influxdb/influxdb_test.go +++ b/plugins/outputs/influxdb/influxdb_test.go @@ -140,3 +140,27 @@ func TestHTTPError_DatabaseNotFound(t *testing.T) { require.Error(t, err) require.NoError(t, i.Close()) } + +// field type conflict does not return an error, instead we +func TestHTTPError_FieldTypeConflict(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + w.WriteHeader(http.StatusNotFound) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintln(w, `{"results":[{}],"error":"field type conflict: input field \"value\" on measurement \"test\" is type integer, already exists as type float dropped=1"}`) + } + })) + defer ts.Close() + + i := InfluxDB{ + URLs: []string{ts.URL}, + Database: "test", + } + + err := i.Connect() + require.NoError(t, err) + err = i.Write(testutil.MockMetrics()) + require.NoError(t, err) + require.NoError(t, i.Close()) +} From 3fa37a9212dfdec35b38b6436ba26622492e930c Mon Sep 17 00:00:00 2001 From: Fabio Berchtold Date: Fri, 27 Jan 2017 23:54:59 +0100 Subject: [PATCH 0086/1302] Rewriting Riemann output plugin (#1900) * rename to riemann_legacy Signed-off-by: Fabio Berchtold * initial draft for Riemann output plugin rewrite Signed-off-by: Fabio Berchtold * add unit tests Signed-off-by: Fabio Berchtold * add option to send string metrics as states Signed-off-by: Fabio Berchtold * add integration tests Signed-off-by: Fabio Berchtold * add plugin README.md Signed-off-by: Fabio Berchtold * bump riemann library * clarify settings description Signed-off-by: Fabio Berchtold * update Readme.md with updated description Signed-off-by: Fabio Berchtold * add Riemann event examples Signed-off-by: Fabio Berchtold * use full URL for Riemann server address Signed-off-by: Fabio Berchtold closes #1878 --- Godeps | 2 +- Makefile | 4 +- README.md | 1 + etc/telegraf.conf | 33 ++- plugins/outputs/all/all.go | 1 + plugins/outputs/riemann/README.md | 83 ++++++++ plugins/outputs/riemann/riemann.go | 183 +++++++++++------ plugins/outputs/riemann/riemann_test.go | 194 +++++++++++++++++- plugins/outputs/riemann_legacy/riemann.go | 156 ++++++++++++++ .../outputs/riemann_legacy/riemann_test.go | 27 +++ 10 files changed, 615 insertions(+), 69 deletions(-) create mode 100644 plugins/outputs/riemann/README.md create mode 100644 plugins/outputs/riemann_legacy/riemann.go create mode 100644 plugins/outputs/riemann_legacy/riemann_test.go diff --git a/Godeps b/Godeps index 99606414e..c033159c3 100644 --- a/Godeps +++ b/Godeps @@ -1,7 +1,7 @@ github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9 github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc github.com/aerospike/aerospike-client-go 7f3a312c3b2a60ac083ec6da296091c52c795c63 -github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687 +github.com/amir/raidman c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985 github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857 github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4 github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99 diff --git a/Makefile b/Makefile index 6c75b9295..79276f887 100644 --- a/Makefile +++ b/Makefile @@ -58,7 +58,7 @@ docker-run: docker run --name redis -p "6379:6379" -d redis docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt - docker run --name riemann -p "5555:5555" -d blalor/riemann + docker run --name riemann -p "5555:5555" -d stealthly/docker-riemann docker run --name nats -p "4222:4222" -d nats # Run docker containers necessary for CircleCI unit tests @@ -71,7 +71,7 @@ docker-run-circle: -d spotify/kafka docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt - docker run --name riemann -p "5555:5555" -d blalor/riemann + docker run --name riemann -p "5555:5555" -d stealthly/docker-riemann docker run --name nats -p "4222:4222" -d nats # Kill all docker containers, ignore errors diff --git a/README.md b/README.md index b758609d3..29892426c 100644 --- a/README.md +++ b/README.md @@ -219,6 +219,7 @@ Telegraf can also collect metrics via the following service plugins: * [opentsdb](./plugins/outputs/opentsdb) * [prometheus](./plugins/outputs/prometheus_client) * [riemann](./plugins/outputs/riemann) +* [riemann_legacy](./plugins/outputs/riemann_legacy) ## Contributing diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 3d0cdfd3a..aabdf180e 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -443,8 +443,39 @@ # # expiration_interval = "60s" -# # Configuration for the Riemann server to send metrics to +# # Configuration for Riemann server to send metrics to # [[outputs.riemann]] +# ## The full TCP or UDP URL of the Riemann server +# url = "tcp://localhost:5555" +# +# ## Riemann event TTL, floating-point time in seconds. +# ## Defines how long that an event is considered valid for in Riemann +# # ttl = 30.0 +# +# ## Separator to use between measurement and field name in Riemann service name +# ## This does not have any effect if 'measurement_as_attribute' is set to 'true' +# separator = "/" +# +# ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name +# # measurement_as_attribute = false +# +# ## Send string metrics as Riemann event states. +# ## Unless enabled all string metrics will be ignored +# # string_as_state = false +# +# ## A list of tag keys whose values get sent as Riemann tags. +# ## If empty, all Telegraf tag values will be sent as tags +# # tag_keys = ["telegraf","custom_tag"] +# +# ## Additional Riemann tags to send. +# # tags = ["telegraf-output"] +# +# ## Description for Riemann event +# # description_text = "metrics collected from telegraf" + + +# # Configuration for the legacy Riemann plugin +# [[outputs.riemann_legacy]] # ## URL of server # url = "localhost:5555" # ## transport protocol to use either tcp or udp diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index 96091b2ad..c10e00f78 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -20,4 +20,5 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/opentsdb" _ "github.com/influxdata/telegraf/plugins/outputs/prometheus_client" _ "github.com/influxdata/telegraf/plugins/outputs/riemann" + _ "github.com/influxdata/telegraf/plugins/outputs/riemann_legacy" ) diff --git a/plugins/outputs/riemann/README.md b/plugins/outputs/riemann/README.md new file mode 100644 index 000000000..2338a00dc --- /dev/null +++ b/plugins/outputs/riemann/README.md @@ -0,0 +1,83 @@ +# Riemann Output Plugin + +This plugin writes to [Riemann](http://riemann.io/) via TCP or UDP. + +### Configuration: + +```toml +# Configuration for Riemann to send metrics to +[[outputs.riemann]] + ## The full TCP or UDP URL of the Riemann server + url = "tcp://localhost:5555" + + ## Riemann event TTL, floating-point time in seconds. + ## Defines how long that an event is considered valid for in Riemann + # ttl = 30.0 + + ## Separator to use between measurement and field name in Riemann service name + ## This does not have any effect if 'measurement_as_attribute' is set to 'true' + separator = "/" + + ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name + # measurement_as_attribute = false + + ## Send string metrics as Riemann event states. + ## Unless enabled all string metrics will be ignored + # string_as_state = false + + ## A list of tag keys whose values get sent as Riemann tags. + ## If empty, all Telegraf tag values will be sent as tags + # tag_keys = ["telegraf","custom_tag"] + + ## Additional Riemann tags to send. + # tags = ["telegraf-output"] + + ## Description for Riemann event + # description_text = "metrics collected from telegraf" +``` + +### Required parameters: + +* `url`: The full TCP or UDP URL of the Riemann server to send events to. + +### Optional parameters: + +* `ttl`: Riemann event TTL, floating-point time in seconds. Defines how long that an event is considered valid for in Riemann. +* `separator`: Separator to use between measurement and field name in Riemann service name. +* `measurement_as_attribute`: Set measurement name as a Riemann attribute, instead of prepending it to the Riemann service name. +* `string_as_state`: Send string metrics as Riemann event states. If this is not enabled then all string metrics will be ignored. +* `tag_keys`: A list of tag keys whose values get sent as Riemann tags. If empty, all Telegraf tag values will be sent as tags. +* `tags`: Additional Riemann tags that will be sent. +* `description_text`: Description text for Riemann event. + +### Example Events: + +Riemann event emitted by Telegraf with default configuration: +``` +#riemann.codec.Event{ +:host "postgresql-1e612b44-e92f-4d27-9f30-5e2f53947870", :state nil, :description nil, :ttl 30.0, +:service "disk/used_percent", :metric 73.16736001949994, :path "/boot", :fstype "ext4", :time 1475605021} +``` + +Telegraf emitting the same Riemann event with `measurement_as_attribute` set to `true`: +``` +#riemann.codec.Event{ ... +:measurement "disk", :service "used_percent", :metric 73.16736001949994, +... :time 1475605021} +``` + +Telegraf emitting the same Riemann event with additional Riemann tags defined: +``` +#riemann.codec.Event{ +:host "postgresql-1e612b44-e92f-4d27-9f30-5e2f53947870", :state nil, :description nil, :ttl 30.0, +:service "disk/used_percent", :metric 73.16736001949994, :path "/boot", :fstype "ext4", :time 1475605021, +:tags ["telegraf" "postgres_cluster"]} +``` + +Telegraf emitting a Riemann event with a status text and `string_as_state` set to `true`, and a `description_text` defined: +``` +#riemann.codec.Event{ +:host "postgresql-1e612b44-e92f-4d27-9f30-5e2f53947870", :state "Running", :ttl 30.0, +:description "PostgreSQL master node is up and running", +:service "status", :time 1475605021} +``` diff --git a/plugins/outputs/riemann/riemann.go b/plugins/outputs/riemann/riemann.go index fa150e097..25cf3011a 100644 --- a/plugins/outputs/riemann/riemann.go +++ b/plugins/outputs/riemann/riemann.go @@ -3,6 +3,7 @@ package riemann import ( "fmt" "log" + "net/url" "os" "sort" "strings" @@ -12,44 +13,70 @@ import ( "github.com/influxdata/telegraf/plugins/outputs" ) -const deprecationMsg = "I! WARNING: this Riemann output plugin will be deprecated in a future release, see https://github.com/influxdata/telegraf/issues/1878 for more details & discussion." - type Riemann struct { - URL string - Transport string - Separator string + URL string + TTL float32 + Separator string + MeasurementAsAttribute bool + StringAsState bool + TagKeys []string + Tags []string + DescriptionText string client *raidman.Client } var sampleConfig = ` - ## URL of server - url = "localhost:5555" - ## transport protocol to use either tcp or udp - transport = "tcp" - ## separator to use between input name and field name in Riemann service name - separator = " " + ## The full TCP or UDP URL of the Riemann server + url = "tcp://localhost:5555" + + ## Riemann event TTL, floating-point time in seconds. + ## Defines how long that an event is considered valid for in Riemann + # ttl = 30.0 + + ## Separator to use between measurement and field name in Riemann service name + ## This does not have any effect if 'measurement_as_attribute' is set to 'true' + separator = "/" + + ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name + # measurement_as_attribute = false + + ## Send string metrics as Riemann event states. + ## Unless enabled all string metrics will be ignored + # string_as_state = false + + ## A list of tag keys whose values get sent as Riemann tags. + ## If empty, all Telegraf tag values will be sent as tags + # tag_keys = ["telegraf","custom_tag"] + + ## Additional Riemann tags to send. + # tags = ["telegraf-output"] + + ## Description for Riemann event + # description_text = "metrics collected from telegraf" ` func (r *Riemann) Connect() error { - log.Printf(deprecationMsg) - c, err := raidman.Dial(r.Transport, r.URL) + parsed_url, err := url.Parse(r.URL) + if err != nil { + return err + } + client, err := raidman.Dial(parsed_url.Scheme, parsed_url.Host) if err != nil { r.client = nil return err } - r.client = c + r.client = client return nil } func (r *Riemann) Close() error { - if r.client == nil { - return nil + if r.client != nil { + r.client.Close() + r.client = nil } - r.client.Close() - r.client = nil return nil } @@ -62,91 +89,125 @@ func (r *Riemann) Description() string { } func (r *Riemann) Write(metrics []telegraf.Metric) error { - log.Printf(deprecationMsg) if len(metrics) == 0 { return nil } if r.client == nil { - err := r.Connect() - if err != nil { - return fmt.Errorf("FAILED to (re)connect to Riemann. Error: %s\n", err) + if err := r.Connect(); err != nil { + return fmt.Errorf("Failed to (re)connect to Riemann: %s", err.Error()) } } + // build list of Riemann events to send var events []*raidman.Event - for _, p := range metrics { - evs := buildEvents(p, r.Separator) + for _, m := range metrics { + evs := r.buildRiemannEvents(m) for _, ev := range evs { events = append(events, ev) } } - var senderr = r.client.SendMulti(events) - if senderr != nil { - r.Close() // always retuns nil - return fmt.Errorf("FAILED to send riemann message (will try to reconnect). Error: %s\n", - senderr) + if err := r.client.SendMulti(events); err != nil { + r.Close() + return fmt.Errorf("Failed to send riemann message: %s", err) } - return nil } -func buildEvents(p telegraf.Metric, s string) []*raidman.Event { +func (r *Riemann) buildRiemannEvents(m telegraf.Metric) []*raidman.Event { events := []*raidman.Event{} - for fieldName, value := range p.Fields() { - host, ok := p.Tags()["host"] + for fieldName, value := range m.Fields() { + // get host for Riemann event + host, ok := m.Tags()["host"] if !ok { - hostname, err := os.Hostname() - if err != nil { - host = "unknown" - } else { + if hostname, err := os.Hostname(); err == nil { host = hostname + } else { + host = "unknown" } } event := &raidman.Event{ - Host: host, - Service: serviceName(s, p.Name(), p.Tags(), fieldName), + Host: host, + Ttl: r.TTL, + Description: r.DescriptionText, + Time: m.Time().Unix(), + + Attributes: r.attributes(m.Name(), m.Tags()), + Service: r.service(m.Name(), fieldName), + Tags: r.tags(m.Tags()), } switch value.(type) { case string: + // only send string metrics if explicitly enabled, skip otherwise + if !r.StringAsState { + log.Printf("D! Riemann event states disabled, skipping metric value [%s]\n", value) + continue + } event.State = value.(string) - default: + case int, int64, uint64, float32, float64: event.Metric = value + default: + log.Printf("D! Riemann does not support metric value [%s]\n", value) + continue } events = append(events, event) } - return events } -func serviceName(s string, n string, t map[string]string, f string) string { - serviceStrings := []string{} - serviceStrings = append(serviceStrings, n) - - // we'll skip the 'host' tag - tagStrings := []string{} - tagNames := []string{} - - for tagName := range t { - tagNames = append(tagNames, tagName) +func (r *Riemann) attributes(name string, tags map[string]string) map[string]string { + if r.MeasurementAsAttribute { + tags["measurement"] = name } - sort.Strings(tagNames) - for _, tagName := range tagNames { - if tagName != "host" { - tagStrings = append(tagStrings, t[tagName]) + delete(tags, "host") // exclude 'host' tag + return tags +} + +func (r *Riemann) service(name string, field string) string { + var serviceStrings []string + + // if measurement is not enabled as an attribute then prepend it to service name + if !r.MeasurementAsAttribute { + serviceStrings = append(serviceStrings, name) + } + serviceStrings = append(serviceStrings, field) + + return strings.Join(serviceStrings, r.Separator) +} + +func (r *Riemann) tags(tags map[string]string) []string { + // always add specified Riemann tags + values := r.Tags + + // if tag_keys are specified, add those and return tag list + if len(r.TagKeys) > 0 { + for _, tagName := range r.TagKeys { + value, ok := tags[tagName] + if ok { + values = append(values, value) + } + } + return values + } + + // otherwise add all values from telegraf tag key/value pairs + var keys []string + for key := range tags { + keys = append(keys, key) + } + sort.Strings(keys) + + for _, key := range keys { + if key != "host" { // exclude 'host' tag + values = append(values, tags[key]) } } - var tagString string = strings.Join(tagStrings, s) - if tagString != "" { - serviceStrings = append(serviceStrings, tagString) - } - serviceStrings = append(serviceStrings, f) - return strings.Join(serviceStrings, s) + return values } func init() { diff --git a/plugins/outputs/riemann/riemann_test.go b/plugins/outputs/riemann/riemann_test.go index b599cdf60..10f89e786 100644 --- a/plugins/outputs/riemann/riemann_test.go +++ b/plugins/outputs/riemann/riemann_test.go @@ -1,22 +1,180 @@ package riemann import ( + "fmt" "testing" + "time" + "github.com/amir/raidman" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) +func TestAttributes(t *testing.T) { + tags := map[string]string{"tag1": "value1", "tag2": "value2"} + + r := &Riemann{} + require.Equal(t, + map[string]string{"tag1": "value1", "tag2": "value2"}, + r.attributes("test", tags)) + + // enable measurement as attribute, should now be included + r.MeasurementAsAttribute = true + require.Equal(t, + map[string]string{"tag1": "value1", "tag2": "value2", "measurement": "test"}, + r.attributes("test", tags)) +} + +func TestService(t *testing.T) { + r := &Riemann{ + Separator: "/", + } + require.Equal(t, "test/value", r.service("test", "value")) + + // enable measurement as attribute, should not be part of service name anymore + r.MeasurementAsAttribute = true + require.Equal(t, "value", r.service("test", "value")) +} + +func TestTags(t *testing.T) { + tags := map[string]string{"tag1": "value1", "tag2": "value2"} + + // all tag values plus additional tag should be present + r := &Riemann{ + Tags: []string{"test"}, + } + require.Equal(t, + []string{"test", "value1", "value2"}, + r.tags(tags)) + + // only tag2 value plus additional tag should be present + r.TagKeys = []string{"tag2"} + require.Equal(t, + []string{"test", "value2"}, + r.tags(tags)) + + // only tag1 value should be present + r.Tags = nil + r.TagKeys = []string{"tag1"} + require.Equal(t, + []string{"value1"}, + r.tags(tags)) +} + +func TestMetricEvents(t *testing.T) { + r := &Riemann{ + TTL: 20.0, + Separator: "/", + MeasurementAsAttribute: false, + DescriptionText: "metrics from telegraf", + Tags: []string{"telegraf"}, + } + + // build a single event + metric, _ := telegraf.NewMetric( + "test1", + map[string]string{"tag1": "value1", "host": "abc123"}, + map[string]interface{}{"value": 5.6}, + time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + events := r.buildRiemannEvents(metric) + require.Len(t, events, 1) + + // is event as expected? + expectedEvent := &raidman.Event{ + Ttl: 20.0, + Time: 1257894000, + Tags: []string{"telegraf", "value1"}, + Host: "abc123", + State: "", + Service: "test1/value", + Metric: 5.6, + Description: "metrics from telegraf", + Attributes: map[string]string{"tag1": "value1"}, + } + require.Equal(t, expectedEvent, events[0]) + + // build 2 events + metric, _ = telegraf.NewMetric( + "test2", + map[string]string{"host": "xyz987"}, + map[string]interface{}{"point": 1}, + time.Date(2012, time.November, 2, 3, 0, 0, 0, time.UTC), + ) + + events = append(events, r.buildRiemannEvents(metric)...) + require.Len(t, events, 2) + + // first event should still be the same + require.Equal(t, expectedEvent, events[0]) + + // second event + expectedEvent = &raidman.Event{ + Ttl: 20.0, + Time: 1351825200, + Tags: []string{"telegraf"}, + Host: "xyz987", + State: "", + Service: "test2/point", + Metric: int64(1), + Description: "metrics from telegraf", + Attributes: map[string]string{}, + } + require.Equal(t, expectedEvent, events[1]) +} + +func TestStateEvents(t *testing.T) { + r := &Riemann{ + MeasurementAsAttribute: true, + } + + // string metrics will be skipped unless explicitly enabled + metric, _ := telegraf.NewMetric( + "test", + map[string]string{"host": "host"}, + map[string]interface{}{"value": "running"}, + time.Date(2015, time.November, 9, 22, 0, 0, 0, time.UTC), + ) + + events := r.buildRiemannEvents(metric) + // no event should be present + require.Len(t, events, 0) + + // enable string metrics as event states + r.StringAsState = true + events = r.buildRiemannEvents(metric) + require.Len(t, events, 1) + + // is event as expected? + expectedEvent := &raidman.Event{ + Ttl: 0, + Time: 1447106400, + Tags: nil, + Host: "host", + State: "running", + Service: "value", + Metric: nil, + Description: "", + Attributes: map[string]string{"measurement": "test"}, + } + require.Equal(t, expectedEvent, events[0]) +} + func TestConnectAndWrite(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } - url := testutil.GetLocalHost() + ":5555" - r := &Riemann{ - URL: url, - Transport: "tcp", + URL: fmt.Sprintf("tcp://%s:5555", testutil.GetLocalHost()), + TTL: 15.0, + Separator: "/", + MeasurementAsAttribute: false, + StringAsState: true, + DescriptionText: "metrics from telegraf", + Tags: []string{"docker"}, } err := r.Connect() @@ -24,4 +182,32 @@ func TestConnectAndWrite(t *testing.T) { err = r.Write(testutil.MockMetrics()) require.NoError(t, err) + + metrics := make([]telegraf.Metric, 0) + metrics = append(metrics, testutil.TestMetric(2)) + metrics = append(metrics, testutil.TestMetric(3.456789)) + metrics = append(metrics, testutil.TestMetric(uint(0))) + metrics = append(metrics, testutil.TestMetric("ok")) + metrics = append(metrics, testutil.TestMetric("running")) + err = r.Write(metrics) + require.NoError(t, err) + + time.Sleep(200 * time.Millisecond) + + // are there any "docker" tagged events in Riemann? + events, err := r.client.Query(`tagged "docker"`) + require.NoError(t, err) + require.NotZero(t, len(events)) + + // get Riemann events with state = "running", should be 1 event + events, err = r.client.Query(`state = "running"`) + require.NoError(t, err) + require.Len(t, events, 1) + + // is event as expected? + require.Equal(t, []string{"docker", "value1"}, events[0].Tags) + require.Equal(t, "running", events[0].State) + require.Equal(t, "test1/value", events[0].Service) + require.Equal(t, "metrics from telegraf", events[0].Description) + require.Equal(t, map[string]string{"tag1": "value1"}, events[0].Attributes) } diff --git a/plugins/outputs/riemann_legacy/riemann.go b/plugins/outputs/riemann_legacy/riemann.go new file mode 100644 index 000000000..69de7f521 --- /dev/null +++ b/plugins/outputs/riemann_legacy/riemann.go @@ -0,0 +1,156 @@ +package riemann_legacy + +import ( + "fmt" + "log" + "os" + "sort" + "strings" + + "github.com/amir/raidman" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs" +) + +const deprecationMsg = "E! Error: this Riemann output plugin will be deprecated in a future release, see https://github.com/influxdata/telegraf/issues/1878 for more details & discussion." + +type Riemann struct { + URL string + Transport string + Separator string + + client *raidman.Client +} + +var sampleConfig = ` + ## URL of server + url = "localhost:5555" + ## transport protocol to use either tcp or udp + transport = "tcp" + ## separator to use between input name and field name in Riemann service name + separator = " " +` + +func (r *Riemann) Connect() error { + log.Printf(deprecationMsg) + c, err := raidman.Dial(r.Transport, r.URL) + + if err != nil { + r.client = nil + return err + } + + r.client = c + return nil +} + +func (r *Riemann) Close() error { + if r.client == nil { + return nil + } + r.client.Close() + r.client = nil + return nil +} + +func (r *Riemann) SampleConfig() string { + return sampleConfig +} + +func (r *Riemann) Description() string { + return "Configuration for the Riemann server to send metrics to" +} + +func (r *Riemann) Write(metrics []telegraf.Metric) error { + log.Printf(deprecationMsg) + if len(metrics) == 0 { + return nil + } + + if r.client == nil { + err := r.Connect() + if err != nil { + return fmt.Errorf("FAILED to (re)connect to Riemann. Error: %s\n", err) + } + } + + var events []*raidman.Event + for _, p := range metrics { + evs := buildEvents(p, r.Separator) + for _, ev := range evs { + events = append(events, ev) + } + } + + var senderr = r.client.SendMulti(events) + if senderr != nil { + r.Close() // always retuns nil + return fmt.Errorf("FAILED to send riemann message (will try to reconnect). Error: %s\n", + senderr) + } + + return nil +} + +func buildEvents(p telegraf.Metric, s string) []*raidman.Event { + events := []*raidman.Event{} + for fieldName, value := range p.Fields() { + host, ok := p.Tags()["host"] + if !ok { + hostname, err := os.Hostname() + if err != nil { + host = "unknown" + } else { + host = hostname + } + } + + event := &raidman.Event{ + Host: host, + Service: serviceName(s, p.Name(), p.Tags(), fieldName), + } + + switch value.(type) { + case string: + event.State = value.(string) + default: + event.Metric = value + } + + events = append(events, event) + } + + return events +} + +func serviceName(s string, n string, t map[string]string, f string) string { + serviceStrings := []string{} + serviceStrings = append(serviceStrings, n) + + // we'll skip the 'host' tag + tagStrings := []string{} + tagNames := []string{} + + for tagName := range t { + tagNames = append(tagNames, tagName) + } + sort.Strings(tagNames) + + for _, tagName := range tagNames { + if tagName != "host" { + tagStrings = append(tagStrings, t[tagName]) + } + } + var tagString string = strings.Join(tagStrings, s) + if tagString != "" { + serviceStrings = append(serviceStrings, tagString) + } + serviceStrings = append(serviceStrings, f) + return strings.Join(serviceStrings, s) +} + +func init() { + outputs.Add("riemann_legacy", func() telegraf.Output { + return &Riemann{} + }) +} diff --git a/plugins/outputs/riemann_legacy/riemann_test.go b/plugins/outputs/riemann_legacy/riemann_test.go new file mode 100644 index 000000000..e57cbb43c --- /dev/null +++ b/plugins/outputs/riemann_legacy/riemann_test.go @@ -0,0 +1,27 @@ +package riemann_legacy + +import ( + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestConnectAndWrite(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + url := testutil.GetLocalHost() + ":5555" + + r := &Riemann{ + URL: url, + Transport: "tcp", + } + + err := r.Connect() + require.NoError(t, err) + + err = r.Write(testutil.MockMetrics()) + require.NoError(t, err) +} From e9decadf75cc87e7b0eb4adaf0fdbeb2233640b6 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 27 Jan 2017 14:59:35 -0800 Subject: [PATCH 0087/1302] Riemann rewrite changelog update --- CHANGELOG.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ebea6184..e0d7f635e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,11 +2,19 @@ ### Release Notes +- The [Riemann output plugin](./plugins/outputs/riemann) has been rewritten +and the previous riemann plugin is _incompatible_ with the new one. The reasons +for this are outlined in issue [#1878](https://github.com/influxdata/telegraf/issues/1878). +The previous riemann output will still be available using +`outputs.riemann_legacy` if needed, but that will eventually be deprecated. +It is highly recommended that all users migrate to the new riemann output plugin. + ### Features - [#2137](https://github.com/influxdata/telegraf/pull/2137): Added userstats to mysql input plugin. - [#2179](https://github.com/influxdata/telegraf/pull/2179): Added more InnoDB metric to MySQL plugin. - [#2251](https://github.com/influxdata/telegraf/pull/2251): InfluxDB output: use own client for improved through-put and less allocations. +- [#1900](https://github.com/influxdata/telegraf/pull/1900): Riemann plugin rewrite. ### Bugfixes From 1d864ebd407c937ac18d7b9e7914e14a02a97f52 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 27 Jan 2017 15:08:21 -0800 Subject: [PATCH 0088/1302] Fix riemann output unit tests --- plugins/outputs/riemann/riemann_test.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/plugins/outputs/riemann/riemann_test.go b/plugins/outputs/riemann/riemann_test.go index 10f89e786..e03d720ce 100644 --- a/plugins/outputs/riemann/riemann_test.go +++ b/plugins/outputs/riemann/riemann_test.go @@ -7,6 +7,7 @@ import ( "github.com/amir/raidman" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -72,14 +73,14 @@ func TestMetricEvents(t *testing.T) { } // build a single event - metric, _ := telegraf.NewMetric( + m, _ := metric.New( "test1", map[string]string{"tag1": "value1", "host": "abc123"}, map[string]interface{}{"value": 5.6}, time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), ) - events := r.buildRiemannEvents(metric) + events := r.buildRiemannEvents(m) require.Len(t, events, 1) // is event as expected? @@ -97,14 +98,14 @@ func TestMetricEvents(t *testing.T) { require.Equal(t, expectedEvent, events[0]) // build 2 events - metric, _ = telegraf.NewMetric( + m, _ = metric.New( "test2", map[string]string{"host": "xyz987"}, map[string]interface{}{"point": 1}, time.Date(2012, time.November, 2, 3, 0, 0, 0, time.UTC), ) - events = append(events, r.buildRiemannEvents(metric)...) + events = append(events, r.buildRiemannEvents(m)...) require.Len(t, events, 2) // first event should still be the same @@ -131,20 +132,20 @@ func TestStateEvents(t *testing.T) { } // string metrics will be skipped unless explicitly enabled - metric, _ := telegraf.NewMetric( + m, _ := metric.New( "test", map[string]string{"host": "host"}, map[string]interface{}{"value": "running"}, time.Date(2015, time.November, 9, 22, 0, 0, 0, time.UTC), ) - events := r.buildRiemannEvents(metric) + events := r.buildRiemannEvents(m) // no event should be present require.Len(t, events, 0) // enable string metrics as event states r.StringAsState = true - events = r.buildRiemannEvents(metric) + events = r.buildRiemannEvents(m) require.Len(t, events, 1) // is event as expected? From 074e6d177cd35861a9677898a7a04e8f5eb4b421 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Mon, 4 Jul 2016 18:48:37 -0400 Subject: [PATCH 0089/1302] add support for diskio name templates & udev tags closes #1453 closes #1386 closes #1428 --- CHANGELOG.md | 1 + plugins/inputs/system/disk.go | 85 ++++++++++++++++++- plugins/inputs/system/disk_linux.go | 66 +++++++++++++++ plugins/inputs/system/disk_linux_test.go | 101 +++++++++++++++++++++++ plugins/inputs/system/disk_other.go | 9 ++ 5 files changed, 261 insertions(+), 1 deletion(-) create mode 100644 plugins/inputs/system/disk_linux.go create mode 100644 plugins/inputs/system/disk_linux_test.go create mode 100644 plugins/inputs/system/disk_other.go diff --git a/CHANGELOG.md b/CHANGELOG.md index e0d7f635e..777eefbdf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ It is highly recommended that all users migrate to the new riemann output plugin - [#2179](https://github.com/influxdata/telegraf/pull/2179): Added more InnoDB metric to MySQL plugin. - [#2251](https://github.com/influxdata/telegraf/pull/2251): InfluxDB output: use own client for improved through-put and less allocations. - [#1900](https://github.com/influxdata/telegraf/pull/1900): Riemann plugin rewrite. +- [#1453](https://github.com/influxdata/telegraf/pull/1453): diskio: add support for name templates and udev tags. ### Bugfixes diff --git a/plugins/inputs/system/disk.go b/plugins/inputs/system/disk.go index 548a9ce23..3f6d83c1c 100644 --- a/plugins/inputs/system/disk.go +++ b/plugins/inputs/system/disk.go @@ -2,6 +2,7 @@ package system import ( "fmt" + "regexp" "strings" "github.com/influxdata/telegraf" @@ -82,7 +83,11 @@ type DiskIOStats struct { ps PS Devices []string + DeviceTags []string + NameTemplates []string SkipSerialNumber bool + + infoCache map[string]diskInfoCache } func (_ *DiskIOStats) Description() string { @@ -96,6 +101,23 @@ var diskIoSampleConfig = ` # devices = ["sda", "sdb"] ## Uncomment the following line if you need disk serial numbers. # skip_serial_number = false + # + ## On systems which support it, device metadata can be added in the form of + ## tags. + ## Currently only Linux is supported via udev properties. You can view + ## available properties for a device by running: + ## 'udevadm info -q property -n /dev/sda' + # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] + # + ## Using the same metadata source as device_tags, you can also customize the + ## name of the device via templates. + ## The 'name_templates' parameter is a list of templates to try and apply to + ## the device. The template may contain variables in the form of '$PROPERTY' or + ## '${PROPERTY}'. The first template which does not contain any variables not + ## present for the device is used as the device name tag. + ## The typical use case is for LVM volumes, to get the VG/LV name instead of + ## the near-meaningless DM-0 name. + # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] ` func (_ *DiskIOStats) SampleConfig() string { @@ -123,7 +145,10 @@ func (s *DiskIOStats) Gather(acc telegraf.Accumulator) error { continue } tags := map[string]string{} - tags["name"] = io.Name + tags["name"] = s.diskName(io.Name) + for t, v := range s.diskTags(io.Name) { + tags[t] = v + } if !s.SkipSerialNumber { if len(io.SerialNumber) != 0 { tags["serial"] = io.SerialNumber @@ -148,6 +173,64 @@ func (s *DiskIOStats) Gather(acc telegraf.Accumulator) error { return nil } +var varRegex = regexp.MustCompile(`\$(?:\w+|\{\w+\})`) + +func (s *DiskIOStats) diskName(devName string) string { + di, err := s.diskInfo(devName) + if err != nil { + // discard error :-( + // We can't return error because it's non-fatal to the Gather(). + // And we have no logger, so we can't log it. + return devName + } + if di == nil { + return devName + } + + for _, nt := range s.NameTemplates { + miss := false + name := varRegex.ReplaceAllStringFunc(nt, func(sub string) string { + sub = sub[1:] // strip leading '$' + if sub[0] == '{' { + sub = sub[1 : len(sub)-1] // strip leading & trailing '{' '}' + } + if v, ok := di[sub]; ok { + return v + } + miss = true + return "" + }) + + if !miss { + return name + } + } + + return devName +} + +func (s *DiskIOStats) diskTags(devName string) map[string]string { + di, err := s.diskInfo(devName) + if err != nil { + // discard error :-( + // We can't return error because it's non-fatal to the Gather(). + // And we have no logger, so we can't log it. + return nil + } + if di == nil { + return nil + } + + tags := map[string]string{} + for _, dt := range s.DeviceTags { + if v, ok := di[dt]; ok { + tags[dt] = v + } + } + + return tags +} + func init() { inputs.Add("disk", func() telegraf.Input { return &DiskStats{ps: &systemPS{}} diff --git a/plugins/inputs/system/disk_linux.go b/plugins/inputs/system/disk_linux.go new file mode 100644 index 000000000..e5a0cff55 --- /dev/null +++ b/plugins/inputs/system/disk_linux.go @@ -0,0 +1,66 @@ +package system + +import ( + "bufio" + "fmt" + "os" + "strings" + "syscall" +) + +type diskInfoCache struct { + stat syscall.Stat_t + values map[string]string +} + +var udevPath = "/run/udev/data" + +func (s *DiskIOStats) diskInfo(devName string) (map[string]string, error) { + fi, err := os.Stat("/dev/" + devName) + if err != nil { + return nil, err + } + stat, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil, nil + } + + if s.infoCache == nil { + s.infoCache = map[string]diskInfoCache{} + } + ic, ok := s.infoCache[devName] + if ok { + return ic.values, nil + } else { + ic = diskInfoCache{ + stat: *stat, + values: map[string]string{}, + } + s.infoCache[devName] = ic + } + di := ic.values + + major := stat.Rdev >> 8 & 0xff + minor := stat.Rdev & 0xff + + f, err := os.Open(fmt.Sprintf("%s/b%d:%d", udevPath, major, minor)) + if err != nil { + return nil, err + } + defer f.Close() + scnr := bufio.NewScanner(f) + + for scnr.Scan() { + l := scnr.Text() + if len(l) < 4 || l[:2] != "E:" { + continue + } + kv := strings.SplitN(l[2:], "=", 2) + if len(kv) < 2 { + continue + } + di[kv[0]] = kv[1] + } + + return di, nil +} diff --git a/plugins/inputs/system/disk_linux_test.go b/plugins/inputs/system/disk_linux_test.go new file mode 100644 index 000000000..801ad328a --- /dev/null +++ b/plugins/inputs/system/disk_linux_test.go @@ -0,0 +1,101 @@ +// +build linux + +package system + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var nullDiskInfo = []byte(` +E:MY_PARAM_1=myval1 +E:MY_PARAM_2=myval2 +`) + +// setupNullDisk sets up fake udev info as if /dev/null were a disk. +func setupNullDisk(t *testing.T) func() error { + td, err := ioutil.TempDir("", ".telegraf.TestDiskInfo") + require.NoError(t, err) + + origUdevPath := udevPath + + cleanFunc := func() error { + udevPath = origUdevPath + return os.RemoveAll(td) + } + + udevPath = td + err = ioutil.WriteFile(td+"/b1:3", nullDiskInfo, 0644) // 1:3 is the 'null' device + if err != nil { + cleanFunc() + t.Fatal(err) + } + + return cleanFunc +} + +func TestDiskInfo(t *testing.T) { + clean := setupNullDisk(t) + defer clean() + + s := &DiskIOStats{} + di, err := s.diskInfo("null") + require.NoError(t, err) + assert.Equal(t, "myval1", di["MY_PARAM_1"]) + assert.Equal(t, "myval2", di["MY_PARAM_2"]) + + // test that data is cached + err = clean() + require.NoError(t, err) + + di, err = s.diskInfo("null") + require.NoError(t, err) + assert.Equal(t, "myval1", di["MY_PARAM_1"]) + assert.Equal(t, "myval2", di["MY_PARAM_2"]) + + // unfortunately we can't adjust mtime on /dev/null to test cache invalidation +} + +// DiskIOStats.diskName isn't a linux specific function, but dependent +// functions are a no-op on non-Linux. +func TestDiskIOStats_diskName(t *testing.T) { + defer setupNullDisk(t)() + + tests := []struct { + templates []string + expected string + }{ + {[]string{"$MY_PARAM_1"}, "myval1"}, + {[]string{"${MY_PARAM_1}"}, "myval1"}, + {[]string{"x$MY_PARAM_1"}, "xmyval1"}, + {[]string{"x${MY_PARAM_1}x"}, "xmyval1x"}, + {[]string{"$MISSING", "$MY_PARAM_1"}, "myval1"}, + {[]string{"$MY_PARAM_1", "$MY_PARAM_2"}, "myval1"}, + {[]string{"$MISSING"}, "null"}, + {[]string{"$MY_PARAM_1/$MY_PARAM_2"}, "myval1/myval2"}, + {[]string{"$MY_PARAM_2/$MISSING"}, "null"}, + } + + for _, tc := range tests { + s := DiskIOStats{ + NameTemplates: tc.templates, + } + assert.Equal(t, tc.expected, s.diskName("null"), "Templates: %#v", tc.templates) + } +} + +// DiskIOStats.diskTags isn't a linux specific function, but dependent +// functions are a no-op on non-Linux. +func TestDiskIOStats_diskTags(t *testing.T) { + defer setupNullDisk(t)() + + s := &DiskIOStats{ + DeviceTags: []string{"MY_PARAM_2"}, + } + dt := s.diskTags("null") + assert.Equal(t, map[string]string{"MY_PARAM_2": "myval2"}, dt) +} diff --git a/plugins/inputs/system/disk_other.go b/plugins/inputs/system/disk_other.go new file mode 100644 index 000000000..fa9121cdf --- /dev/null +++ b/plugins/inputs/system/disk_other.go @@ -0,0 +1,9 @@ +// +build !linux + +package system + +type diskInfoCache struct{} + +func (s *DiskIOStats) diskInfo(devName string) (map[string]string, error) { + return nil, nil +} From 738cbbdbb6ff9bb9fa1a561270ed4e60e5d7127c Mon Sep 17 00:00:00 2001 From: John Engelman Date: Sat, 28 Jan 2017 18:47:25 -0600 Subject: [PATCH 0090/1302] Add numerical representation of Consul health check state. (#2277) --- CHANGELOG.md | 1 + plugins/inputs/consul/README.md | 11 +++++++++-- plugins/inputs/consul/consul.go | 5 +++++ plugins/inputs/consul/consul_test.go | 3 +++ 4 files changed, 18 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 777eefbdf..f3e0626d8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ It is highly recommended that all users migrate to the new riemann output plugin - [#2251](https://github.com/influxdata/telegraf/pull/2251): InfluxDB output: use own client for improved through-put and less allocations. - [#1900](https://github.com/influxdata/telegraf/pull/1900): Riemann plugin rewrite. - [#1453](https://github.com/influxdata/telegraf/pull/1453): diskio: add support for name templates and udev tags. +- [#2277](https://github.com/influxdata/telegraf/pull/2277): add integer metrics for Consul check health state. ### Bugfixes diff --git a/plugins/inputs/consul/README.md b/plugins/inputs/consul/README.md index 01a39cbf7..dbb576421 100644 --- a/plugins/inputs/consul/README.md +++ b/plugins/inputs/consul/README.md @@ -35,12 +35,19 @@ Fields: - check_name - service_id - status +- passing +- critical +- warning + +`passing`, `critical`, and `warning` are integer representations of the health +check state. A value of `1` represents that the status was the state of the +the health check at this sample. ## Example output ``` $ telegraf --config ./telegraf.conf -input-filter consul -test * Plugin: consul, Collection 1 -> consul_health_checks,host=wolfpit,node=consul-server-node,check_id="serfHealth" check_name="Serf Health Status",service_id="",status="passing" 1464698464486439902 -> consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com,check_id="service:www-example-com.test01" check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical" 1464698464486519036 +> consul_health_checks,host=wolfpit,node=consul-server-node,check_id="serfHealth" check_name="Serf Health Status",service_id="",status="passing",passing=1i,critical=0i,warning=0i 1464698464486439902 +> consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com,check_id="service:www-example-com.test01" check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical",passing=0i,critical=1i,warning=0i 1464698464486519036 ``` diff --git a/plugins/inputs/consul/consul.go b/plugins/inputs/consul/consul.go index 4c28f4d12..0eaa25604 100644 --- a/plugins/inputs/consul/consul.go +++ b/plugins/inputs/consul/consul.go @@ -97,7 +97,12 @@ func (c *Consul) GatherHealthCheck(acc telegraf.Accumulator, checks []*api.Healt record["check_name"] = check.Name record["service_id"] = check.ServiceID + record["status"] = check.Status + record["passing"] = 0 + record["critical"] = 0 + record["warning"] = 0 + record[check.Status] = 1 tags["node"] = check.Node tags["service_name"] = check.ServiceName diff --git a/plugins/inputs/consul/consul_test.go b/plugins/inputs/consul/consul_test.go index f970d4449..d0595508d 100644 --- a/plugins/inputs/consul/consul_test.go +++ b/plugins/inputs/consul/consul_test.go @@ -24,6 +24,9 @@ func TestGatherHealtCheck(t *testing.T) { expectedFields := map[string]interface{}{ "check_name": "foo.health", "status": "passing", + "passing": 1, + "critical": 0, + "warning": 0, "service_id": "foo.123", } From e87ce22af9900064cf15eb0ea6a579d988de06b4 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 1 Feb 2017 08:55:22 +0000 Subject: [PATCH 0091/1302] running output: Drop nil metrics fixes #2317 --- internal/models/running_output.go | 3 +++ internal/models/running_output_test.go | 17 +++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/internal/models/running_output.go b/internal/models/running_output.go index 0ae78c983..0ce756f47 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -90,6 +90,9 @@ func NewRunningOutput( // AddMetric adds a metric to the output. This function can also write cached // points if FlushBufferWhenFull is true. func (ro *RunningOutput) AddMetric(m telegraf.Metric) { + if m == nil { + return + } // Filter any tagexclude/taginclude parameters before adding metric if ro.Config.Filter.IsActive() { // In order to filter out tags, we need to create a new metric, since diff --git a/internal/models/running_output_test.go b/internal/models/running_output_test.go index c55334218..bd39f2f9b 100644 --- a/internal/models/running_output_test.go +++ b/internal/models/running_output_test.go @@ -75,6 +75,23 @@ func BenchmarkRunningOutputAddFailWrites(b *testing.B) { } } +func TestAddingNilMetric(t *testing.T) { + conf := &OutputConfig{ + Filter: Filter{}, + } + + m := &mockOutput{} + ro := NewRunningOutput("test", m, conf, 1000, 10000) + + ro.AddMetric(nil) + ro.AddMetric(nil) + ro.AddMetric(nil) + + err := ro.Write() + assert.NoError(t, err) + assert.Len(t, m.Metrics(), 0) +} + // Test that NameDrop filters ger properly applied. func TestRunningOutput_DropFilter(t *testing.T) { conf := &OutputConfig{ From fb7931591d50a720191787eb429242c630ce7197 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 1 Feb 2017 08:59:54 +0000 Subject: [PATCH 0092/1302] Changelog update --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f3e0626d8..836745df6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,12 @@ It is highly recommended that all users migrate to the new riemann output plugin - [#2077](https://github.com/influxdata/telegraf/issues/2077): SQL Server Input - Arithmetic overflow error converting numeric to data type int. - [#2262](https://github.com/influxdata/telegraf/issues/2262): Flush jitter can inhibit metric collection. +## v1.2.1 [unreleased] + +### Bugfixes + +- [#2317](https://github.com/influxdata/telegraf/issues/2317): Fix segfault on nil metrics with influxdb output. + ## v1.2 [2017-01-00] ### Release Notes From 3e37dda7b0480fe5a206332bccdb34bb2456bf73 Mon Sep 17 00:00:00 2001 From: Martin Date: Wed, 1 Feb 2017 11:07:02 +0100 Subject: [PATCH 0093/1302] Go version 1.7.4 -> 1.7.5 (#2348) --- CHANGELOG.md | 1 + circle.yml | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 836745df6..a750450c9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ It is highly recommended that all users migrate to the new riemann output plugin - [#1900](https://github.com/influxdata/telegraf/pull/1900): Riemann plugin rewrite. - [#1453](https://github.com/influxdata/telegraf/pull/1453): diskio: add support for name templates and udev tags. - [#2277](https://github.com/influxdata/telegraf/pull/2277): add integer metrics for Consul check health state. +- [#2348](https://github.com/influxdata/telegraf/pull/2348): Go version 1.7.4 -> 1.7.5 ### Bugfixes diff --git a/circle.yml b/circle.yml index 31b28602f..c237040a2 100644 --- a/circle.yml +++ b/circle.yml @@ -4,9 +4,9 @@ machine: post: - sudo service zookeeper stop - go version - - go version | grep 1.7.4 || sudo rm -rf /usr/local/go - - wget https://storage.googleapis.com/golang/go1.7.4.linux-amd64.tar.gz - - sudo tar -C /usr/local -xzf go1.7.4.linux-amd64.tar.gz + - go version | grep 1.7.5 || sudo rm -rf /usr/local/go + - wget https://storage.googleapis.com/golang/go1.7.5.linux-amd64.tar.gz + - sudo tar -C /usr/local -xzf go1.7.5.linux-amd64.tar.gz - go version dependencies: From 54cfbb5b87f10cf0229524c840198e78ac7798db Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 1 Feb 2017 09:50:34 +0000 Subject: [PATCH 0094/1302] metric: Fix negative number handling closes #2324 --- metric/metric.go | 2 +- metric/parse_test.go | 23 +++++++++++++++++++ .../mqtt_consumer/mqtt_consumer_test.go | 18 ++++++++++++++- plugins/parsers/influx/parser_test.go | 13 +++++++++++ 4 files changed, 54 insertions(+), 2 deletions(-) diff --git a/metric/metric.go b/metric/metric.go index 4fbee0ad1..936d0907e 100644 --- a/metric/metric.go +++ b/metric/metric.go @@ -305,7 +305,7 @@ func (m *metric) Fields() map[string]interface{} { case '"': // string field fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = unescape(string(m.fields[i:][i2+1:i3-1]), "fieldval") - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': // number field switch m.fields[i:][i3-1] { case 'i': diff --git a/metric/parse_test.go b/metric/parse_test.go index 8b7a8ff4d..40bcf60b8 100644 --- a/metric/parse_test.go +++ b/metric/parse_test.go @@ -44,6 +44,9 @@ cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string" cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string" ` +const negMetrics = `weather,host=local temp=-99i,temp_float=-99.4 1465839830100400200 +` + // some metrics are invalid const someInvalid = `cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 @@ -85,6 +88,26 @@ func TestParse(t *testing.T) { } } +func TestParseNegNumbers(t *testing.T) { + metrics, err := Parse([]byte(negMetrics)) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + + assert.Equal(t, + map[string]interface{}{ + "temp": int64(-99), + "temp_float": float64(-99.4), + }, + metrics[0].Fields(), + ) + assert.Equal(t, + map[string]string{ + "host": "local", + }, + metrics[0].Tags(), + ) +} + func TestParseErrors(t *testing.T) { start := time.Now() metrics, err := Parse([]byte(someInvalid)) diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index 7b8e0b834..2f5276191 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -14,6 +14,7 @@ import ( const ( testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257\n" + testMsgNeg = "cpu_load_short,host=server01 value=-23422.0 1422568543702900257\n" testMsgGraphite = "cpu.load.short.graphite 23422 1454780029" testMsgJSON = "{\"a\": 5, \"b\": {\"c\": 6}}\n" invalidMsg = "cpu_load_short,host=server01 1422568543702900257\n" @@ -76,13 +77,28 @@ func TestPersistentClientIDFail(t *testing.T) { assert.Error(t, err) } -// Test that the parser parses NATS messages into metrics func TestRunParser(t *testing.T) { n, in := newTestMQTTConsumer() acc := testutil.Accumulator{} n.acc = &acc defer close(n.done) + n.parser, _ = parsers.NewInfluxParser() + go n.receiver() + in <- mqttMsg(testMsgNeg) + time.Sleep(time.Millisecond * 250) + + if a := acc.NFields(); a != 1 { + t.Errorf("got %v, expected %v", a, 1) + } +} + +func TestRunParserNegativeNumber(t *testing.T) { + n, in := newTestMQTTConsumer() + acc := testutil.Accumulator{} + n.acc = &acc + defer close(n.done) + n.parser, _ = parsers.NewInfluxParser() go n.receiver() in <- mqttMsg(testMsg) diff --git a/plugins/parsers/influx/parser_test.go b/plugins/parsers/influx/parser_test.go index 58531ff90..6b2ba8d56 100644 --- a/plugins/parsers/influx/parser_test.go +++ b/plugins/parsers/influx/parser_test.go @@ -19,6 +19,7 @@ var ( const ( validInflux = "cpu_load_short,cpu=cpu0 value=10 1257894000000000000\n" + negativeFloat = "cpu_load_short,cpu=cpu0 value=-13.4 1257894000000000000\n" validInfluxNewline = "\ncpu_load_short,cpu=cpu0 value=10 1257894000000000000\n" validInfluxNoNewline = "cpu_load_short,cpu=cpu0 value=10 1257894000000000000" invalidInflux = "I don't think this is line protocol\n" @@ -82,6 +83,18 @@ func TestParseValidInflux(t *testing.T) { "cpu": "cpu0", }, metrics[0].Tags()) assert.Equal(t, exptime, metrics[0].Time().UnixNano()) + + metrics, err = parser.Parse([]byte(negativeFloat)) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + assert.Equal(t, "cpu_load_short", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "value": float64(-13.4), + }, metrics[0].Fields()) + assert.Equal(t, map[string]string{ + "cpu": "cpu0", + }, metrics[0].Tags()) + assert.Equal(t, exptime, metrics[0].Time().UnixNano()) } func TestParseLineValidInflux(t *testing.T) { From 4dad723088c54eef153e875872239fdc1827148e Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 1 Feb 2017 09:52:26 +0000 Subject: [PATCH 0095/1302] Changelog update --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a750450c9..2175b5cc5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,11 +24,12 @@ It is highly recommended that all users migrate to the new riemann output plugin - [#2077](https://github.com/influxdata/telegraf/issues/2077): SQL Server Input - Arithmetic overflow error converting numeric to data type int. - [#2262](https://github.com/influxdata/telegraf/issues/2262): Flush jitter can inhibit metric collection. -## v1.2.1 [unreleased] +## v1.2.1 [2017-02-01] ### Bugfixes - [#2317](https://github.com/influxdata/telegraf/issues/2317): Fix segfault on nil metrics with influxdb output. +- [#2324](https://github.com/influxdata/telegraf/issues/2324): Fix negative number handling. ## v1.2 [2017-01-00] From 19dee32287def9617ffa26dd691d6cf093ce979f Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 1 Feb 2017 10:11:16 +0000 Subject: [PATCH 0096/1302] Go 1.7.5 update cherry-picked to 1.2.1 release --- CHANGELOG.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2175b5cc5..7d1b1bb5b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,7 +17,6 @@ It is highly recommended that all users migrate to the new riemann output plugin - [#1900](https://github.com/influxdata/telegraf/pull/1900): Riemann plugin rewrite. - [#1453](https://github.com/influxdata/telegraf/pull/1453): diskio: add support for name templates and udev tags. - [#2277](https://github.com/influxdata/telegraf/pull/2277): add integer metrics for Consul check health state. -- [#2348](https://github.com/influxdata/telegraf/pull/2348): Go version 1.7.4 -> 1.7.5 ### Bugfixes @@ -31,6 +30,10 @@ It is highly recommended that all users migrate to the new riemann output plugin - [#2317](https://github.com/influxdata/telegraf/issues/2317): Fix segfault on nil metrics with influxdb output. - [#2324](https://github.com/influxdata/telegraf/issues/2324): Fix negative number handling. +### Features + +- [#2348](https://github.com/influxdata/telegraf/pull/2348): Go version 1.7.4 -> 1.7.5 + ## v1.2 [2017-01-00] ### Release Notes From 2932db84804f7f6d02f1779029a903fd778a7ffc Mon Sep 17 00:00:00 2001 From: njwhite Date: Wed, 1 Feb 2017 14:11:39 +0000 Subject: [PATCH 0097/1302] Make Logparser Plugin Check For New Files (#2141) * Make Logparser Plugin Check For New Files Check in the Gather metric to see if any new files matching the glob have appeared. If so, start tailing them from the beginning. * changelog update for #2141 --- CHANGELOG.md | 1 + plugins/inputs/logparser/logparser.go | 36 ++++++++++++++---- plugins/inputs/logparser/logparser_test.go | 43 ++++++++++++++++++++++ 3 files changed, 73 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7d1b1bb5b..8e5865e7c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ It is highly recommended that all users migrate to the new riemann output plugin ### Features +- [#2141](https://github.com/influxdata/telegraf/pull/2141): Logparser handles newly-created files. - [#2137](https://github.com/influxdata/telegraf/pull/2137): Added userstats to mysql input plugin. - [#2179](https://github.com/influxdata/telegraf/pull/2179): Added more InnoDB metric to MySQL plugin. - [#2251](https://github.com/influxdata/telegraf/pull/2251): InfluxDB output: use own client for improved through-put and less allocations. diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index 0778a8a6d..8ec328358 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -26,7 +26,7 @@ type LogParserPlugin struct { Files []string FromBeginning bool - tailers []*tail.Tail + tailers map[string]*tail.Tail lines chan string done chan struct{} wg sync.WaitGroup @@ -46,7 +46,9 @@ const sampleConfig = ` ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log ## /var/log/apache.log -> only tail the apache log file files = ["/var/log/apache/access.log"] - ## Read file from beginning. + ## Read files that currently exist from the beginning. Files that are created + ## while telegraf is running (and that match the "files" globs) will always + ## be read from the beginning. from_beginning = false ## Parse logstash-style "grok" patterns: @@ -77,7 +79,11 @@ func (l *LogParserPlugin) Description() string { } func (l *LogParserPlugin) Gather(acc telegraf.Accumulator) error { - return nil + l.Lock() + defer l.Unlock() + + // always start from the beginning of files that appear while we're running + return l.tailNewfiles(true) } func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error { @@ -87,6 +93,7 @@ func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error { l.acc = acc l.lines = make(chan string, 1000) l.done = make(chan struct{}) + l.tailers = make(map[string]*tail.Tail) // Looks for fields which implement LogParser interface l.parsers = []LogParser{} @@ -121,14 +128,22 @@ func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error { return err } + l.wg.Add(1) + go l.parser() + + return l.tailNewfiles(l.FromBeginning) +} + +// check the globs against files on disk, and start tailing any new files. +// Assumes l's lock is held! +func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error { var seek tail.SeekInfo - if !l.FromBeginning { + if !fromBeginning { seek.Whence = 2 seek.Offset = 0 } - l.wg.Add(1) - go l.parser() + errChan := errchan.New(len(l.Files)) // Create a "tailer" for each file for _, filepath := range l.Files { @@ -139,7 +154,13 @@ func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error { } files := g.Match() errChan = errchan.New(len(files)) + for file, _ := range files { + if _, ok := l.tailers[file]; ok { + // we're already tailing this file + continue + } + tailer, err := tail.TailFile(file, tail.Config{ ReOpen: true, @@ -152,7 +173,7 @@ func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error { // create a goroutine for each "tailer" l.wg.Add(1) go l.receiver(tailer) - l.tailers = append(l.tailers, tailer) + l.tailers[file] = tailer } } @@ -166,6 +187,7 @@ func (l *LogParserPlugin) receiver(tailer *tail.Tail) { var line *tail.Line for line = range tailer.Lines { + if line.Err != nil { log.Printf("E! Error tailing file %s, Error: %s\n", tailer.Filename, line.Err) diff --git a/plugins/inputs/logparser/logparser_test.go b/plugins/inputs/logparser/logparser_test.go index 97f33067e..059bfd266 100644 --- a/plugins/inputs/logparser/logparser_test.go +++ b/plugins/inputs/logparser/logparser_test.go @@ -1,6 +1,8 @@ package logparser import ( + "io/ioutil" + "os" "runtime" "strings" "testing" @@ -80,6 +82,47 @@ func TestGrokParseLogFiles(t *testing.T) { map[string]string{}) } +func TestGrokParseLogFilesAppearLater(t *testing.T) { + emptydir, err := ioutil.TempDir("", "TestGrokParseLogFilesAppearLater") + defer os.RemoveAll(emptydir) + assert.NoError(t, err) + + thisdir := getCurrentDir() + p := &grok.Parser{ + Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, + CustomPatternFiles: []string{thisdir + "grok/testdata/test-patterns"}, + } + + logparser := &LogParserPlugin{ + FromBeginning: true, + Files: []string{emptydir + "/*.log"}, + GrokParser: p, + } + + acc := testutil.Accumulator{} + assert.NoError(t, logparser.Start(&acc)) + + time.Sleep(time.Millisecond * 500) + assert.Equal(t, acc.NFields(), 0) + + os.Symlink( + thisdir+"grok/testdata/test_a.log", + emptydir+"/test_a.log") + assert.NoError(t, logparser.Gather(&acc)) + time.Sleep(time.Millisecond * 500) + + logparser.Stop() + + acc.AssertContainsTaggedFields(t, "logparser_grok", + map[string]interface{}{ + "clientip": "192.168.1.1", + "myfloat": float64(1.25), + "response_time": int64(5432), + "myint": int64(101), + }, + map[string]string{"response_code": "200"}) +} + // Test that test_a.log line gets parsed even though we don't have the correct // pattern available for test_b.log func TestGrokParseLogFilesOneBad(t *testing.T) { From 32e06a489ddbaac11d4b5f2d98af67d41a804da8 Mon Sep 17 00:00:00 2001 From: Pierre Fersing Date: Wed, 1 Feb 2017 15:12:35 +0100 Subject: [PATCH 0098/1302] Keep -config-directory when running as Windows service (#2330) * Keep -config-directory when running as Windows service * Update changelog --- CHANGELOG.md | 2 +- cmd/telegraf/telegraf.go | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8e5865e7c..5131f50eb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,10 +11,10 @@ It is highly recommended that all users migrate to the new riemann output plugin ### Features -- [#2141](https://github.com/influxdata/telegraf/pull/2141): Logparser handles newly-created files. - [#2137](https://github.com/influxdata/telegraf/pull/2137): Added userstats to mysql input plugin. - [#2179](https://github.com/influxdata/telegraf/pull/2179): Added more InnoDB metric to MySQL plugin. - [#2251](https://github.com/influxdata/telegraf/pull/2251): InfluxDB output: use own client for improved through-put and less allocations. +- [#2330](https://github.com/influxdata/telegraf/pull/2330): Keep -config-directory when running as Windows service. - [#1900](https://github.com/influxdata/telegraf/pull/1900): Riemann plugin rewrite. - [#1453](https://github.com/influxdata/telegraf/pull/1453): diskio: add support for name templates and udev tags. - [#2277](https://github.com/influxdata/telegraf/pull/2277): add integer metrics for Consul check health state. diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 0f94c6e2c..398617e09 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -327,6 +327,9 @@ func main() { if *fConfig != "" { (*svcConfig).Arguments = []string{"-config", *fConfig} } + if *fConfigDirectory != "" { + (*svcConfig).Arguments = append((*svcConfig).Arguments, "-config-directory", *fConfigDirectory) + } err := service.Control(s, *fService) if err != nil { log.Fatal("E! " + err.Error()) From 9003efc3fa756cb3550e76b9ec494cae21c00a15 Mon Sep 17 00:00:00 2001 From: Len Smith Date: Wed, 1 Feb 2017 09:21:08 -0500 Subject: [PATCH 0099/1302] http_response : Add in support for looking for substring in response (#2204) * Add in support for looking for substring in response * Add note to CHANGELOG.md * Switch from substring match to regex match * Requested code changes * Make requested changes and refactor to avoid nested if-else. * Convert tabs to space and compile regex once --- CHANGELOG.md | 1 + plugins/inputs/http_response/README.md | 5 ++ plugins/inputs/http_response/http_response.go | 51 +++++++++-- .../http_response/http_response_test.go | 84 +++++++++++++++++++ 4 files changed, 135 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5131f50eb..e123e33a7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,6 +54,7 @@ plugins, not just statsd. ### Features +- [#2204](https://github.com/influxdata/telegraf/pull/2204): Extend http_response to support searching for a substring in response. Return 1 if found, else 0. - [#2123](https://github.com/influxdata/telegraf/pull/2123): Fix improper calculation of CPU percentages - [#1564](https://github.com/influxdata/telegraf/issues/1564): Use RFC3339 timestamps in log output. - [#1997](https://github.com/influxdata/telegraf/issues/1997): Non-default HTTP timeouts for RabbitMQ plugin. diff --git a/plugins/inputs/http_response/README.md b/plugins/inputs/http_response/README.md index ec873ad2b..01dd09536 100644 --- a/plugins/inputs/http_response/README.md +++ b/plugins/inputs/http_response/README.md @@ -23,6 +23,11 @@ This input plugin will test HTTP/HTTPS connections. # {'fake':'data'} # ''' + ## Optional substring or regex match in body of the response + ## response_string_match = "\"service_status\": \"up\"" + ## response_string_match = "ok" + ## response_string_match = "\".*_status\".?:.?\"up\"" + ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" # ssl_cert = "/etc/telegraf/cert.pem" diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index 34eadaa4f..111e35518 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -3,8 +3,11 @@ package http_response import ( "errors" "io" + "io/ioutil" + "log" "net/http" "net/url" + "regexp" "strings" "time" @@ -15,12 +18,14 @@ import ( // HTTPResponse struct type HTTPResponse struct { - Address string - Body string - Method string - ResponseTimeout internal.Duration - Headers map[string]string - FollowRedirects bool + Address string + Body string + Method string + ResponseTimeout internal.Duration + Headers map[string]string + FollowRedirects bool + ResponseStringMatch string + compiledStringMatch *regexp.Regexp // Path to CA file SSLCA string `toml:"ssl_ca"` @@ -54,6 +59,11 @@ var sampleConfig = ` # {'fake':'data'} # ''' + ## Optional substring or regex match in body of the response + ## response_string_match = "\"service_status\": \"up\"" + ## response_string_match = "ok" + ## response_string_match = "\".*_status\".?:.?\"up\"" + ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" # ssl_cert = "/etc/telegraf/cert.pem" @@ -137,6 +147,35 @@ func (h *HTTPResponse) HTTPGather() (map[string]interface{}, error) { } fields["response_time"] = time.Since(start).Seconds() fields["http_response_code"] = resp.StatusCode + + // Check the response for a regex match. + if h.ResponseStringMatch != "" { + + // Compile once and reuse + if h.compiledStringMatch == nil { + h.compiledStringMatch = regexp.MustCompile(h.ResponseStringMatch) + if err != nil { + log.Printf("E! Failed to compile regular expression %s : %s", h.ResponseStringMatch, err) + fields["response_string_match"] = 0 + return fields, nil + } + } + + bodyBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + log.Printf("E! Failed to read body of HTTP Response : %s", err) + fields["response_string_match"] = 0 + return fields, nil + } + + if h.compiledStringMatch.Match(bodyBytes) { + fields["response_string_match"] = 1 + } else { + fields["response_string_match"] = 0 + } + + } + return fields, nil } diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index f0d0040d6..236e5d88b 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -22,6 +22,9 @@ func setUpTestMux() http.Handler { mux.HandleFunc("/good", func(w http.ResponseWriter, req *http.Request) { fmt.Fprintf(w, "hit the good page!") }) + mux.HandleFunc("/jsonresponse", func(w http.ResponseWriter, req *http.Request) { + fmt.Fprintf(w, "\"service_status\": \"up\", \"healthy\" : \"true\"") + }) mux.HandleFunc("/badredirect", func(w http.ResponseWriter, req *http.Request) { http.Redirect(w, req, "/badredirect", http.StatusMovedPermanently) }) @@ -236,6 +239,87 @@ func TestBody(t *testing.T) { } } +func TestStringMatch(t *testing.T) { + mux := setUpTestMux() + ts := httptest.NewServer(mux) + defer ts.Close() + + h := &HTTPResponse{ + Address: ts.URL + "/good", + Body: "{ 'test': 'data'}", + Method: "GET", + ResponseStringMatch: "hit the good page", + ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + Headers: map[string]string{ + "Content-Type": "application/json", + }, + FollowRedirects: true, + } + fields, err := h.HTTPGather() + require.NoError(t, err) + assert.NotEmpty(t, fields) + if assert.NotNil(t, fields["http_response_code"]) { + assert.Equal(t, http.StatusOK, fields["http_response_code"]) + } + assert.Equal(t, 1, fields["response_string_match"]) + assert.NotNil(t, fields["response_time"]) + +} + +func TestStringMatchJson(t *testing.T) { + mux := setUpTestMux() + ts := httptest.NewServer(mux) + defer ts.Close() + + h := &HTTPResponse{ + Address: ts.URL + "/jsonresponse", + Body: "{ 'test': 'data'}", + Method: "GET", + ResponseStringMatch: "\"service_status\": \"up\"", + ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + Headers: map[string]string{ + "Content-Type": "application/json", + }, + FollowRedirects: true, + } + fields, err := h.HTTPGather() + require.NoError(t, err) + assert.NotEmpty(t, fields) + if assert.NotNil(t, fields["http_response_code"]) { + assert.Equal(t, http.StatusOK, fields["http_response_code"]) + } + assert.Equal(t, 1, fields["response_string_match"]) + assert.NotNil(t, fields["response_time"]) + +} + +func TestStringMatchFail(t *testing.T) { + mux := setUpTestMux() + ts := httptest.NewServer(mux) + defer ts.Close() + + h := &HTTPResponse{ + Address: ts.URL + "/good", + Body: "{ 'test': 'data'}", + Method: "GET", + ResponseStringMatch: "hit the bad page", + ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + Headers: map[string]string{ + "Content-Type": "application/json", + }, + FollowRedirects: true, + } + fields, err := h.HTTPGather() + require.NoError(t, err) + assert.NotEmpty(t, fields) + if assert.NotNil(t, fields["http_response_code"]) { + assert.Equal(t, http.StatusOK, fields["http_response_code"]) + } + assert.Equal(t, 0, fields["response_string_match"]) + assert.NotNil(t, fields["response_time"]) + +} + func TestTimeout(t *testing.T) { mux := setUpTestMux() ts := httptest.NewServer(mux) From aeb849d74464a48d870f47a86137a308d24ada08 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 1 Feb 2017 14:22:31 +0000 Subject: [PATCH 0100/1302] changelog fix --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e123e33a7..9834fe1a7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ It is highly recommended that all users migrate to the new riemann output plugin ### Features +- [#2204](https://github.com/influxdata/telegraf/pull/2204): Extend http_response to support searching for a substring in response. Return 1 if found, else 0. - [#2137](https://github.com/influxdata/telegraf/pull/2137): Added userstats to mysql input plugin. - [#2179](https://github.com/influxdata/telegraf/pull/2179): Added more InnoDB metric to MySQL plugin. - [#2251](https://github.com/influxdata/telegraf/pull/2251): InfluxDB output: use own client for improved through-put and less allocations. @@ -54,7 +55,6 @@ plugins, not just statsd. ### Features -- [#2204](https://github.com/influxdata/telegraf/pull/2204): Extend http_response to support searching for a substring in response. Return 1 if found, else 0. - [#2123](https://github.com/influxdata/telegraf/pull/2123): Fix improper calculation of CPU percentages - [#1564](https://github.com/influxdata/telegraf/issues/1564): Use RFC3339 timestamps in log output. - [#1997](https://github.com/influxdata/telegraf/issues/1997): Non-default HTTP timeouts for RabbitMQ plugin. From 07a622393232881f8289b25b4b32e9897e406af2 Mon Sep 17 00:00:00 2001 From: ldep30 Date: Wed, 1 Feb 2017 15:37:18 +0100 Subject: [PATCH 0101/1302] Add lock option to the IPtables input plugin (#2201) * Update README.md * Add lock support to the IPtables input plugin * Update iptables.go Doc cleaning --- plugins/inputs/iptables/README.md | 6 ++++++ plugins/inputs/iptables/iptables.go | 12 ++++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/iptables/README.md b/plugins/inputs/iptables/README.md index f5ebd4780..a711f1d4e 100644 --- a/plugins/inputs/iptables/README.md +++ b/plugins/inputs/iptables/README.md @@ -30,11 +30,17 @@ You may edit your sudo configuration with the following: telegraf ALL=(root) NOPASSWD: /usr/bin/iptables -nvL * ``` +### Using IPtables lock feature + +Defining multiple instances of this plugin in telegraf.conf can lead to concurrent IPtables access resulting in "ERROR in input [inputs.iptables]: exit status 4" messages in telegraf.log and missing metrics. Setting 'use_lock = true' in the plugin configuration will run IPtables with the '-w' switch, allowing a lock usage to prevent this error. + ### Configuration: ```toml # use sudo to run iptables use_sudo = false + # run iptables with the lock option + use_lock = false # defines the table to monitor: table = "filter" # defines the chains to monitor: diff --git a/plugins/inputs/iptables/iptables.go b/plugins/inputs/iptables/iptables.go index 4ceb45230..31b049d9f 100644 --- a/plugins/inputs/iptables/iptables.go +++ b/plugins/inputs/iptables/iptables.go @@ -16,6 +16,7 @@ import ( // Iptables is a telegraf plugin to gather packets and bytes throughput from Linux's iptables packet filter. type Iptables struct { UseSudo bool + UseLock bool Table string Chains []string lister chainLister @@ -32,8 +33,11 @@ func (ipt *Iptables) SampleConfig() string { ## iptables require root access on most systems. ## Setting 'use_sudo' to true will make use of sudo to run iptables. ## Users must configure sudo to allow telegraf user to run iptables with no password. - ## iptables can be restricted to only list command "iptables -nvL" + ## iptables can be restricted to only list command "iptables -nvL" use_sudo = false + ## Setting 'use_lock' to true runs iptables with the "-w" option. + ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl") + use_lock = false ## defines the table to monitor: table = "filter" ## defines the chains to monitor: @@ -75,7 +79,11 @@ func (ipt *Iptables) chainList(table, chain string) (string, error) { name = "sudo" args = append(args, iptablePath) } - args = append(args, "-nvL", chain, "-t", table, "-x") + iptablesBaseArgs := "-nvL" + if ipt.UseLock { + iptablesBaseArgs = "-wnvL" + } + args = append(args, iptablesBaseArgs, chain, "-t", table, "-x") c := exec.Command(name, args...) out, err := c.Output() return string(out), err From c528c53e5b1244aca9ba2df2ef66807924a19153 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 1 Feb 2017 14:39:08 +0000 Subject: [PATCH 0102/1302] iptables changelog update --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9834fe1a7..81b92976a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ It is highly recommended that all users migrate to the new riemann output plugin - [#1900](https://github.com/influxdata/telegraf/pull/1900): Riemann plugin rewrite. - [#1453](https://github.com/influxdata/telegraf/pull/1453): diskio: add support for name templates and udev tags. - [#2277](https://github.com/influxdata/telegraf/pull/2277): add integer metrics for Consul check health state. +- [#2201](https://github.com/influxdata/telegraf/pull/2201): Add lock option to the IPtables input plugin. ### Bugfixes From eafd1dcc7ca88b7a6b36de735be3bb5ae28459bd Mon Sep 17 00:00:00 2001 From: James Gregory Date: Thu, 2 Feb 2017 01:41:04 +1100 Subject: [PATCH 0103/1302] Kubernetes input: Handle null startTime for stopped pods (#2335) --- .../inputs/kubernetes/kubernetes_metrics.go | 2 +- plugins/inputs/kubernetes/kubernetes_test.go | 42 +++++++++++++++++++ 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/kubernetes/kubernetes_metrics.go b/plugins/inputs/kubernetes/kubernetes_metrics.go index a767a604a..96814bcbe 100644 --- a/plugins/inputs/kubernetes/kubernetes_metrics.go +++ b/plugins/inputs/kubernetes/kubernetes_metrics.go @@ -45,7 +45,7 @@ type CPUMetrics struct { // PodMetrics contains metric data on a given pod type PodMetrics struct { PodRef PodReference `json:"podRef"` - StartTime time.Time `json:"startTime"` + StartTime *time.Time `json:"startTime"` Containers []ContainerMetrics `json:"containers"` Network NetworkMetrics `json:"network"` Volumes []VolumeMetrics `json:"volume"` diff --git a/plugins/inputs/kubernetes/kubernetes_test.go b/plugins/inputs/kubernetes/kubernetes_test.go index 14134c150..528299be1 100644 --- a/plugins/inputs/kubernetes/kubernetes_test.go +++ b/plugins/inputs/kubernetes/kubernetes_test.go @@ -92,6 +92,29 @@ func TestKubernetesStats(t *testing.T) { } acc.AssertContainsTaggedFields(t, "kubernetes_pod_container", fields, tags) + fields = map[string]interface{}{ + "cpu_usage_nanocores": int64(846503), + "cpu_usage_core_nanoseconds": int64(56507553554), + "memory_usage_bytes": int64(0), + "memory_working_set_bytes": int64(0), + "memory_rss_bytes": int64(0), + "memory_page_faults": int64(0), + "memory_major_page_faults": int64(0), + "rootfs_available_bytes": int64(0), + "rootfs_capacity_bytes": int64(0), + "rootfs_used_bytes": int64(0), + "logsfs_avaialble_bytes": int64(0), + "logsfs_capacity_bytes": int64(0), + "logsfs_used_bytes": int64(0), + } + tags = map[string]string{ + "node_name": "node1", + "container_name": "stopped-container", + "namespace": "foons", + "pod_name": "stopped-pod", + } + acc.AssertContainsTaggedFields(t, "kubernetes_pod_container", fields, tags) + fields = map[string]interface{}{ "available_bytes": int64(7903948800), "capacity_bytes": int64(7903961088), @@ -284,6 +307,25 @@ var response = ` "name": "volume4" } ] + }, + { + "podRef": { + "name": "stopped-pod", + "namespace": "foons", + "uid": "da7c1865-d67d-4688-b679-c485ed44b2aa" + }, + "startTime": null, + "containers": [ + { + "name": "stopped-container", + "startTime": "2016-09-26T18:46:43Z", + "cpu": { + "time": "2016-09-27T16:57:32Z", + "usageNanoCores": 846503, + "usageCoreNanoSeconds": 56507553554 + } + } + ] } ] }` From 97050e9669e4067ce5ceb2faca1a5bde495f75e9 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 1 Feb 2017 14:41:53 +0000 Subject: [PATCH 0104/1302] changelog update --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 81b92976a..a7c7cde15 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ It is highly recommended that all users migrate to the new riemann output plugin - [#2077](https://github.com/influxdata/telegraf/issues/2077): SQL Server Input - Arithmetic overflow error converting numeric to data type int. - [#2262](https://github.com/influxdata/telegraf/issues/2262): Flush jitter can inhibit metric collection. +- [#2287](https://github.com/influxdata/telegraf/issues/2287): Kubernetes input: Handle null startTime for stopped pods ## v1.2.1 [2017-02-01] From c0bbde03eae6965fc54af25f64958e90a2391961 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Vizcaino?= Date: Wed, 1 Feb 2017 15:47:23 +0100 Subject: [PATCH 0105/1302] Ceph: represent pgmap states using tags (#2229) * ceph: maps are already refs, no need to use a pointer * ceph: pgmap_states are represented in a single metric "count", differenciated by tag * Update CHANGELOG --- CHANGELOG.md | 19 ++++ plugins/inputs/ceph/README.md | 7 +- plugins/inputs/ceph/ceph.go | 88 +++++++++-------- plugins/inputs/ceph/ceph_test.go | 161 +++++++++++++++++++++++++++++-- 4 files changed, 228 insertions(+), 47 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a7c7cde15..aa0ae6056 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,24 @@ ### Release Notes +- Ceph: the `ceph_pgmap_state` metric content has been modified to use a unique field `count`, with each state expressed as a `state` tag. + +Telegraf < 1.3: + +``` +# field_name value +active+clean 123 +active+clean+scrubbing 3 +``` + +Telegraf >= 1.3: + +``` +# field_name value tag +count 123 state=active+clean +count 3 state=active+clean+scrubbing +``` + - The [Riemann output plugin](./plugins/outputs/riemann) has been rewritten and the previous riemann plugin is _incompatible_ with the new one. The reasons for this are outlined in issue [#1878](https://github.com/influxdata/telegraf/issues/1878). @@ -14,6 +32,7 @@ It is highly recommended that all users migrate to the new riemann output plugin - [#2204](https://github.com/influxdata/telegraf/pull/2204): Extend http_response to support searching for a substring in response. Return 1 if found, else 0. - [#2137](https://github.com/influxdata/telegraf/pull/2137): Added userstats to mysql input plugin. - [#2179](https://github.com/influxdata/telegraf/pull/2179): Added more InnoDB metric to MySQL plugin. +- [#2229](https://github.com/influxdata/telegraf/pull/2229): `ceph_pgmap_state` metric now uses a single field `count`, with PG state published as `state` tag. - [#2251](https://github.com/influxdata/telegraf/pull/2251): InfluxDB output: use own client for improved through-put and less allocations. - [#2330](https://github.com/influxdata/telegraf/pull/2330): Keep -config-directory when running as Windows service. - [#1900](https://github.com/influxdata/telegraf/pull/1900): Riemann plugin rewrite. diff --git a/plugins/inputs/ceph/README.md b/plugins/inputs/ceph/README.md index b3bba1e50..771ec665b 100644 --- a/plugins/inputs/ceph/README.md +++ b/plugins/inputs/ceph/README.md @@ -117,7 +117,7 @@ All fields are collected under the **ceph** measurement and stored as float64s. * recovering\_objects\_per\_sec (float) * ceph\_pgmap\_state - * state name e.g. active+clean (float) + * count (float) * ceph\_usage * bytes\_used (float) @@ -186,7 +186,7 @@ All measurements will have the following tags: *Cluster Stats* -* ceph\_pg\_state has the following tags: +* ceph\_pgmap\_state has the following tags: * state (state for which the value applies e.g. active+clean, active+remapped+backfill) * ceph\_pool\_usage has the following tags: * id @@ -213,7 +213,8 @@ telegraf -test -config /etc/telegraf/telegraf.conf -config-directory /etc/telegr
 > ceph_osdmap,host=ceph-mon-0 epoch=170772,full=false,nearfull=false,num_in_osds=340,num_osds=340,num_remapped_pgs=0,num_up_osds=340 1468841037000000000
 > ceph_pgmap,host=ceph-mon-0 bytes_avail=634895531270144,bytes_total=812117151809536,bytes_used=177221620539392,data_bytes=56979991615058,num_pgs=22952,op_per_sec=15869,read_bytes_sec=43956026,version=39387592,write_bytes_sec=165344818 1468841037000000000
-> ceph_pgmap_state,host=ceph-mon-0 active+clean=22952 1468928660000000000
+> ceph_pgmap_state,host=ceph-mon-0,state=active+clean count=22952 1468928660000000000
+> ceph_pgmap_state,host=ceph-mon-0,state=active+degraded count=16 1468928660000000000
 > ceph_usage,host=ceph-mon-0 total_avail_bytes=634895514791936,total_bytes=812117151809536,total_used_bytes=177221637017600 1468841037000000000
 > ceph_pool_usage,host=ceph-mon-0,id=150,name=cinder.volumes bytes_used=12648553794802,kb_used=12352103316,max_avail=154342562489244,objects=3026295 1468841037000000000
 > ceph_pool_usage,host=ceph-mon-0,id=182,name=cinder.volumes.flash bytes_used=8541308223964,kb_used=8341121313,max_avail=39388593563936,objects=2075066 1468841037000000000
diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go
index e43c3d7d3..7c03b6262 100644
--- a/plugins/inputs/ceph/ceph.go
+++ b/plugins/inputs/ceph/ceph.go
@@ -4,13 +4,14 @@ import (
 	"bytes"
 	"encoding/json"
 	"fmt"
-	"github.com/influxdata/telegraf"
-	"github.com/influxdata/telegraf/plugins/inputs"
 	"io/ioutil"
 	"log"
 	"os/exec"
 	"path/filepath"
 	"strings"
+
+	"github.com/influxdata/telegraf"
+	"github.com/influxdata/telegraf/plugins/inputs"
 )
 
 const (
@@ -108,7 +109,7 @@ func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error {
 			log.Printf("E! error parsing dump from socket '%s': %v", s.socket, err)
 			continue
 		}
-		for tag, metrics := range *data {
+		for tag, metrics := range data {
 			acc.AddFields(measurement,
 				map[string]interface{}(metrics),
 				map[string]string{"type": s.sockType, "id": s.sockId, "collection": tag})
@@ -244,25 +245,19 @@ type taggedMetricMap map[string]metricMap
 
 // Parses a raw JSON string into a taggedMetricMap
 // Delegates the actual parsing to newTaggedMetricMap(..)
-func parseDump(dump string) (*taggedMetricMap, error) {
+func parseDump(dump string) (taggedMetricMap, error) {
 	data := make(map[string]interface{})
 	err := json.Unmarshal([]byte(dump), &data)
 	if err != nil {
 		return nil, fmt.Errorf("failed to parse json: '%s': %v", dump, err)
 	}
 
-	tmm := newTaggedMetricMap(data)
-
-	if err != nil {
-		return nil, fmt.Errorf("failed to tag dataset: '%v': %v", tmm, err)
-	}
-
-	return tmm, nil
+	return newTaggedMetricMap(data), nil
 }
 
 // Builds a TaggedMetricMap out of a generic string map.
 // The top-level key is used as a tag and all sub-keys are flattened into metrics
-func newTaggedMetricMap(data map[string]interface{}) *taggedMetricMap {
+func newTaggedMetricMap(data map[string]interface{}) taggedMetricMap {
 	tmm := make(taggedMetricMap)
 	for tag, datapoints := range data {
 		mm := make(metricMap)
@@ -271,7 +266,7 @@ func newTaggedMetricMap(data map[string]interface{}) *taggedMetricMap {
 		}
 		tmm[tag] = mm
 	}
-	return &tmm
+	return tmm
 }
 
 // Recursively flattens any k-v hierarchy present in data.
@@ -376,36 +371,53 @@ func decodeStatusPgmap(acc telegraf.Accumulator, data map[string]interface{}) er
 	return nil
 }
 
-func decodeStatusPgmapState(acc telegraf.Accumulator, data map[string]interface{}) error {
+func extractPgmapStates(data map[string]interface{}) ([]interface{}, error) {
+	const key = "pgs_by_state"
+
 	pgmap, ok := data["pgmap"].(map[string]interface{})
 	if !ok {
-		return fmt.Errorf("WARNING %s - unable to decode pgmap", measurement)
+		return nil, fmt.Errorf("WARNING %s - unable to decode pgmap", measurement)
 	}
-	fields := make(map[string]interface{})
-	for key, value := range pgmap {
-		switch value.(type) {
-		case []interface{}:
-			if key != "pgs_by_state" {
-				continue
-			}
-			for _, state := range value.([]interface{}) {
-				state_map, ok := state.(map[string]interface{})
-				if !ok {
-					return fmt.Errorf("WARNING %s - unable to decode pg state", measurement)
-				}
-				state_name, ok := state_map["state_name"].(string)
-				if !ok {
-					return fmt.Errorf("WARNING %s - unable to decode pg state name", measurement)
-				}
-				state_count, ok := state_map["count"].(float64)
-				if !ok {
-					return fmt.Errorf("WARNING %s - unable to decode pg state count", measurement)
-				}
-				fields[state_name] = state_count
-			}
+
+	s, ok := pgmap[key]
+	if !ok {
+		return nil, fmt.Errorf("WARNING %s - pgmap is missing the %s field", measurement, key)
+	}
+
+	states, ok := s.([]interface{})
+	if !ok {
+		return nil, fmt.Errorf("WARNING %s - pgmap[%s] is not a list", measurement, key)
+	}
+	return states, nil
+}
+
+func decodeStatusPgmapState(acc telegraf.Accumulator, data map[string]interface{}) error {
+	states, err := extractPgmapStates(data)
+	if err != nil {
+		return err
+	}
+	for _, state := range states {
+		stateMap, ok := state.(map[string]interface{})
+		if !ok {
+			return fmt.Errorf("WARNING %s - unable to decode pg state", measurement)
 		}
+		stateName, ok := stateMap["state_name"].(string)
+		if !ok {
+			return fmt.Errorf("WARNING %s - unable to decode pg state name", measurement)
+		}
+		stateCount, ok := stateMap["count"].(float64)
+		if !ok {
+			return fmt.Errorf("WARNING %s - unable to decode pg state count", measurement)
+		}
+
+		tags := map[string]string{
+			"state": stateName,
+		}
+		fields := map[string]interface{}{
+			"count": stateCount,
+		}
+		acc.AddFields("ceph_pgmap_state", fields, tags)
 	}
-	acc.AddFields("ceph_pgmap_state", fields, map[string]string{})
 	return nil
 }
 
diff --git a/plugins/inputs/ceph/ceph_test.go b/plugins/inputs/ceph/ceph_test.go
index f7b17ece3..4a75acd15 100644
--- a/plugins/inputs/ceph/ceph_test.go
+++ b/plugins/inputs/ceph/ceph_test.go
@@ -1,15 +1,17 @@
 package ceph
 
 import (
+	"encoding/json"
 	"fmt"
-	"github.com/influxdata/telegraf/testutil"
-	"github.com/stretchr/testify/assert"
 	"io/ioutil"
 	"os"
 	"path"
 	"strconv"
 	"strings"
 	"testing"
+
+	"github.com/influxdata/telegraf/testutil"
+	"github.com/stretchr/testify/assert"
 )
 
 const (
@@ -24,15 +26,38 @@ func TestParseSockId(t *testing.T) {
 func TestParseMonDump(t *testing.T) {
 	dump, err := parseDump(monPerfDump)
 	assert.NoError(t, err)
-	assert.InEpsilon(t, 5678670180, (*dump)["cluster"]["osd_kb_used"], epsilon)
-	assert.InEpsilon(t, 6866.540527000, (*dump)["paxos"]["store_state_latency.sum"], epsilon)
+	assert.InEpsilon(t, 5678670180, dump["cluster"]["osd_kb_used"], epsilon)
+	assert.InEpsilon(t, 6866.540527000, dump["paxos"]["store_state_latency.sum"], epsilon)
 }
 
 func TestParseOsdDump(t *testing.T) {
 	dump, err := parseDump(osdPerfDump)
 	assert.NoError(t, err)
-	assert.InEpsilon(t, 552132.109360000, (*dump)["filestore"]["commitcycle_interval.sum"], epsilon)
-	assert.Equal(t, float64(0), (*dump)["mutex-FileJournal::finisher_lock"]["wait.avgcount"])
+	assert.InEpsilon(t, 552132.109360000, dump["filestore"]["commitcycle_interval.sum"], epsilon)
+	assert.Equal(t, float64(0), dump["mutex-FileJournal::finisher_lock"]["wait.avgcount"])
+}
+
+func TestDecodeStatusPgmapState(t *testing.T) {
+	data := make(map[string]interface{})
+	err := json.Unmarshal([]byte(clusterStatusDump), &data)
+	assert.NoError(t, err)
+
+	acc := &testutil.Accumulator{}
+	err = decodeStatusPgmapState(acc, data)
+	assert.NoError(t, err)
+
+	var results = []struct {
+		fields map[string]interface{}
+		tags   map[string]string
+	}{
+		{map[string]interface{}{"count": float64(2560)}, map[string]string{"state": "active+clean"}},
+		{map[string]interface{}{"count": float64(10)}, map[string]string{"state": "active+scrubbing"}},
+		{map[string]interface{}{"count": float64(5)}, map[string]string{"state": "active+backfilling"}},
+	}
+
+	for _, r := range results {
+		acc.AssertContainsTaggedFields(t, "ceph_pgmap_state", r.fields, r.tags)
+	}
 }
 
 func TestGather(t *testing.T) {
@@ -685,3 +710,127 @@ var osdPerfDump = `
       "wait": { "avgcount": 0,
           "sum": 0.000000000}}}
 `
+var clusterStatusDump = `
+{
+  "health": {
+    "health": {
+      "health_services": [
+        {
+          "mons": [
+            {
+              "name": "a",
+              "kb_total": 114289256,
+              "kb_used": 26995516,
+              "kb_avail": 81465132,
+              "avail_percent": 71,
+              "last_updated": "2017-01-03 17:20:57.595004",
+              "store_stats": {
+                "bytes_total": 942117141,
+                "bytes_sst": 0,
+                "bytes_log": 4345406,
+                "bytes_misc": 937771735,
+                "last_updated": "0.000000"
+              },
+              "health": "HEALTH_OK"
+            },
+            {
+              "name": "b",
+              "kb_total": 114289256,
+              "kb_used": 27871624,
+              "kb_avail": 80589024,
+              "avail_percent": 70,
+              "last_updated": "2017-01-03 17:20:47.784331",
+              "store_stats": {
+                "bytes_total": 454853104,
+                "bytes_sst": 0,
+                "bytes_log": 5788320,
+                "bytes_misc": 449064784,
+                "last_updated": "0.000000"
+              },
+              "health": "HEALTH_OK"
+            },
+            {
+              "name": "c",
+              "kb_total": 130258508,
+              "kb_used": 38076996,
+              "kb_avail": 85541692,
+              "avail_percent": 65,
+              "last_updated": "2017-01-03 17:21:03.311123",
+              "store_stats": {
+                "bytes_total": 455555199,
+                "bytes_sst": 0,
+                "bytes_log": 6950876,
+                "bytes_misc": 448604323,
+                "last_updated": "0.000000"
+              },
+              "health": "HEALTH_OK"
+            }
+          ]
+        }
+      ]
+    },
+    "timechecks": {
+      "epoch": 504,
+      "round": 34642,
+      "round_status": "finished",
+      "mons": [
+        { "name": "a", "skew": 0, "latency": 0, "health": "HEALTH_OK" },
+        { "name": "b", "skew": -0, "latency": 0.000951, "health": "HEALTH_OK" },
+        { "name": "c", "skew": -0, "latency": 0.000946, "health": "HEALTH_OK" }
+      ]
+    },
+    "summary": [],
+    "overall_status": "HEALTH_OK",
+    "detail": []
+  },
+  "fsid": "01234567-abcd-9876-0123-ffeeddccbbaa",
+  "election_epoch": 504,
+  "quorum": [ 0, 1, 2 ],
+  "quorum_names": [ "a", "b", "c" ],
+  "monmap": {
+    "epoch": 17,
+    "fsid": "01234567-abcd-9876-0123-ffeeddccbbaa",
+    "modified": "2016-04-11 14:01:52.600198",
+    "created": "0.000000",
+    "mons": [
+      { "rank": 0, "name": "a", "addr": "192.168.0.1:6789/0" },
+      { "rank": 1, "name": "b", "addr": "192.168.0.2:6789/0" },
+      { "rank": 2, "name": "c", "addr": "192.168.0.3:6789/0" }
+    ]
+  },
+  "osdmap": {
+    "osdmap": {
+      "epoch": 21734,
+      "num_osds": 24,
+      "num_up_osds": 24,
+      "num_in_osds": 24,
+      "full": false,
+      "nearfull": false,
+      "num_remapped_pgs": 0
+    }
+  },
+  "pgmap": {
+    "pgs_by_state": [
+      { "state_name": "active+clean", "count": 2560 },
+      { "state_name": "active+scrubbing", "count": 10 },
+      { "state_name": "active+backfilling", "count": 5 }
+    ],
+    "version": 52314277,
+    "num_pgs": 2560,
+    "data_bytes": 2700031960713,
+    "bytes_used": 7478347665408,
+    "bytes_avail": 9857462382592,
+    "bytes_total": 17335810048000,
+    "read_bytes_sec": 0,
+    "write_bytes_sec": 367217,
+    "op_per_sec": 98
+  },
+  "mdsmap": {
+    "epoch": 1,
+    "up": 0,
+    "in": 0,
+    "max": 0,
+    "by_rank": []
+  }
+}
+`

From 3f224a15d56ab90e79dadcabb0cda1b8ddfeaf1e Mon Sep 17 00:00:00 2001
From: Nathan Haugo 
Date: Wed, 1 Feb 2017 13:23:45 -0800
Subject: [PATCH 0106/1302] Update readme to link to k8s plugin (#2355)

---
 README.md | 1 +
 1 file changed, 1 insertion(+)

diff --git a/README.md b/README.md
index 29892426c..f8a465591 100644
--- a/README.md
+++ b/README.md
@@ -124,6 +124,7 @@ configuration options.
 * [ipmi_sensor](./plugins/inputs/ipmi_sensor)
 * [iptables](./plugins/inputs/iptables)
 * [jolokia](./plugins/inputs/jolokia)
+* [kubernetes](./plugins/inputs/kubernetes)
 * [leofs](./plugins/inputs/leofs)
 * [lustre2](./plugins/inputs/lustre2)
 * [mailchimp](./plugins/inputs/mailchimp)

From f7d551a807a06731b79d68888a162c45f898d4af Mon Sep 17 00:00:00 2001
From: Cameron Sparr 
Date: Wed, 1 Feb 2017 22:43:23 +0000
Subject: [PATCH 0107/1302] Add more nested globpath tests

---
 internal/globpath/globpath_test.go                   | 12 +++++++++++-
 .../globpath/testdata/nested1/nested2/nested.txt     |  0
 2 files changed, 11 insertions(+), 1 deletion(-)
 create mode 100644 internal/globpath/testdata/nested1/nested2/nested.txt

diff --git a/internal/globpath/globpath_test.go b/internal/globpath/globpath_test.go
index db72c94f4..720572411 100644
--- a/internal/globpath/globpath_test.go
+++ b/internal/globpath/globpath_test.go
@@ -28,7 +28,7 @@ func TestCompileAndMatch(t *testing.T) {
 	require.NoError(t, err)
 
 	matches := g1.Match()
-	assert.Len(t, matches, 3)
+	assert.Len(t, matches, 6)
 	matches = g2.Match()
 	assert.Len(t, matches, 2)
 	matches = g3.Match()
@@ -56,6 +56,16 @@ func TestFindRootDir(t *testing.T) {
 	}
 }
 
+func TestFindNestedTextFile(t *testing.T) {
+	dir := getTestdataDir()
+	// test super asterisk
+	g1, err := Compile(dir + "/**.txt")
+	require.NoError(t, err)
+
+	matches := g1.Match()
+	assert.Len(t, matches, 1)
+}
+
 func getTestdataDir() string {
 	_, filename, _, _ := runtime.Caller(1)
 	return strings.Replace(filename, "globpath_test.go", "testdata", 1)
diff --git a/internal/globpath/testdata/nested1/nested2/nested.txt b/internal/globpath/testdata/nested1/nested2/nested.txt
new file mode 100644
index 000000000..e69de29bb

From 285be648c4c23159d40b4cf756bf786f2304653a Mon Sep 17 00:00:00 2001
From: Cameron Sparr 
Date: Wed, 1 Feb 2017 16:18:39 +0000
Subject: [PATCH 0108/1302] Godeps update

closes #2356
---
 CHANGELOG.md                         |   5 +-
 Godeps                               | 110 ++++++++++++++-------------
 metric/metric_test.go                |   3 +-
 plugins/inputs/docker/docker_test.go |   1 -
 plugins/outputs/kafka/kafka.go       |   1 +
 5 files changed, 62 insertions(+), 58 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index aa0ae6056..1f377a98c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -32,7 +32,7 @@ It is highly recommended that all users migrate to the new riemann output plugin
 - [#2204](https://github.com/influxdata/telegraf/pull/2204): Extend http_response to support searching for a substring in response. Return 1 if found, else 0.
 - [#2137](https://github.com/influxdata/telegraf/pull/2137): Added userstats to mysql input plugin.
 - [#2179](https://github.com/influxdata/telegraf/pull/2179): Added more InnoDB metric to MySQL plugin.
-- [#2229](https://github.com/influxdata/telegraf/pull/2229): `ceph_pgmap_state` metric now uses a single field `count`, with PG state published as `state` tag. 
+- [#2229](https://github.com/influxdata/telegraf/pull/2229): `ceph_pgmap_state` metric now uses a single field `count`, with PG state published as `state` tag.
 - [#2251](https://github.com/influxdata/telegraf/pull/2251): InfluxDB output: use own client for improved through-put and less allocations.
 - [#2330](https://github.com/influxdata/telegraf/pull/2330): Keep -config-directory when running as Windows service.
 - [#1900](https://github.com/influxdata/telegraf/pull/1900): Riemann plugin rewrite.
@@ -44,7 +44,8 @@ It is highly recommended that all users migrate to the new riemann output plugin
 
 - [#2077](https://github.com/influxdata/telegraf/issues/2077): SQL Server Input - Arithmetic overflow error converting numeric to data type int.
 - [#2262](https://github.com/influxdata/telegraf/issues/2262): Flush jitter can inhibit metric collection.
-- [#2287](https://github.com/influxdata/telegraf/issues/2287): Kubernetes input: Handle null startTime for stopped pods
+- [#2287](https://github.com/influxdata/telegraf/issues/2287): Kubernetes input: Handle null startTime for stopped pods.
+- [#2356](https://github.com/influxdata/telegraf/issues/2356): cpu input panic when /proc/stat is empty.
 
 ## v1.2.1 [2017-02-01]
 
diff --git a/Godeps b/Godeps
index c033159c3..5443c1039 100644
--- a/Godeps
+++ b/Godeps
@@ -1,65 +1,67 @@
-github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
-github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
-github.com/aerospike/aerospike-client-go 7f3a312c3b2a60ac083ec6da296091c52c795c63
+github.com/Shopify/sarama 574d3147eee384229bf96a5d12c207fe7b5234f3
+github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
+github.com/aerospike/aerospike-client-go 95e1ad7791bdbca44707fedbb29be42024900d9c
 github.com/amir/raidman c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985
-github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
-github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
-github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
-github.com/couchbase/go-couchbase cb664315a324d87d19c879d9cc67fda6be8c2ac1
-github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2
+github.com/aws/aws-sdk-go 7524cb911daddd6e5c9195def8e59ae892bef8d9
+github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
+github.com/cenkalti/backoff b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3
+github.com/couchbase/go-couchbase bfe555a140d53dc1adf390f1a1d4b0fd4ceadb28
+github.com/couchbase/gomemcached 4a25d2f4e1dea9ea7dd76dfd943407abf9b07d29
 github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
-github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc
-github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
-github.com/docker/engine-api 8924d6900370b4c7e7984be5adc61f50a80d7537
-github.com/docker/go-connections f549a9393d05688dff0992ef3efd8bbe6c628aeb
-github.com/docker/go-units 5d2041e26a699eaca682e2ea41c8f891e1060444
+github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
+github.com/docker/distribution fb0bebc4b64e3881cc52a2478d749845ed76d2a8
+github.com/docker/engine-api 4290f40c056686fcaa5c9caf02eac1dde9315adf
+github.com/docker/go-connections 9670439d95da2651d9dfc7acc5d2ed92d3f25ee6
+github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
 github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
-github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
-github.com/eclipse/paho.mqtt.golang 0f7a459f04f13a41b7ed752d47944528d4bf9a86
-github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
-github.com/gobwas/glob 49571a1557cd20e6a2410adc6421f85b66c730b5
-github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
-github.com/golang/snappy d9eb7a3d35ec988b8585d4a0068e462c27d28380
-github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
-github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
+github.com/eapache/go-xerial-snappy bb955e01b9346ac19dc29eb16586c90ded99a98c
+github.com/eapache/queue 44cc805cf13205b55f69e14bcb69867d1ae92f98
+github.com/eclipse/paho.mqtt.golang d4f545eb108a2d19f9b1a735689dbfb719bc21fb
+github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
+github.com/gobwas/glob bea32b9cd2d6f55753d94a28e959b13f0244797a
+github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
+github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7
+github.com/gorilla/mux 392c28fe23e1c45ddba891b0320b3b5df220beea
 github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
-github.com/hashicorp/consul 5aa90455ce78d4d41578bafc86305e6e6b28d7d2
-github.com/hpcloud/tail b2940955ab8b26e19d43a43c4da0475dd81bdb56
-github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
-github.com/influxdata/influxdb fc57c0f7c635df3873f3d64f0ed2100ddc94d5ae
-github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
+github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
+github.com/hpcloud/tail 915e5feba042395f5fda4dbe9c0e99aeab3088b3
+github.com/influxdata/config 8ec4638a81500c20be24855812bc8498ebe2dc92
+github.com/influxdata/influxdb 2fe8ed308439a98a9b01943939b44048ed952c90
+github.com/influxdata/toml ad49a5c2936f96b8f5943c3fdba47630ccf45a0d
 github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
-github.com/kardianos/osext 29ae4ffbc9a6fe9fb2bc5029050ce6996ea1d3bc
-github.com/kardianos/service 5e335590050d6d00f3aa270217d288dda1c94d0a
+github.com/jackc/pgx c8080fc4a1bfa44bf90383ad0fdce2f68b7d313c
+github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413
+github.com/kardianos/service 6d3a0ee7d3425d9d835debc51a0ca1ffa28f4893
 github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
-github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
-github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
-github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
-github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
+github.com/klauspost/crc32 cb6bfca970f6908083f26f39a79009d608efd5cd
+github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
+github.com/miekg/dns 99f84ae56e75126dd77e5de4fae2ea034a468ca1
 github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
-github.com/nats-io/nats ea8b4fd12ebb823073c0004b9f09ac8748f4f165
-github.com/nats-io/nuid a5152d67cf63cbfb5d992a395458722a45194715
-github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
-github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
-github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
+github.com/nats-io/go-nats ea9585611a4ab58a205b9b125ebd74c389a6b898
+github.com/nats-io/nats ea9585611a4ab58a205b9b125ebd74c389a6b898
+github.com/nats-io/nuid 289cccf02c178dc782430d534e3c1f5b72af807f
+github.com/nsqio/go-nsq a53d495e81424aaf7a7665a9d32a97715c40e953
+github.com/pierrec/lz4 5c9560bfa9ace2bf86080bf40d46b34ae44604df
+github.com/pierrec/xxHash 5a004441f897722c627870a981d02b29924215fa
+github.com/prometheus/client_golang c317fb74746eac4fc65fe3909195f4cf67c5562a
 github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
-github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
-github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
-github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
-github.com/shirou/gopsutil 1516eb9ddc5e61ba58874047a98f8b44b5e585e8
-github.com/soniah/gosnmp 3fe3beb30fa9700988893c56a63b1df8e1b68c26
-github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
-github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
+github.com/prometheus/common dd2f054febf4a6c00f2343686efb775948a8bff4
+github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
+github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
+github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
+github.com/shirou/gopsutil 77b5d0080adb6f028e457906f1944d9fcca34442
+github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
+github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
+github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
 github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2
 github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee
-github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
-github.com/yuin/gopher-lua bf3808abd44b1e55143a2d7f08571aaa80db1808
+github.com/wvanbergen/kazoo-go 968957352185472eacb69215fa3dbfcfdbac1096
+github.com/yuin/gopher-lua 66c871e454fcf10251c61bf8eff02d0978cae75a
 github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
-golang.org/x/crypto c197bcf24cde29d3f73c7b4ac6fd41f4384e8af6
-golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
-golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34
-gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef
-gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
-gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886
-gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4
-github.com/jackc/pgx bb73d8427902891bbad7b949b9c60b32949d935f
+golang.org/x/crypto dc137beb6cce2043eb6b5f223ab8bf51c32459f4
+golang.org/x/net f2499483f923065a842d38eb4c7f1927e6fc6e6d
+golang.org/x/text 506f9d5c962f284575e88337e7d9296d27e729d3
+gopkg.in/dancannon/gorethink.v1 edc7a6a68e2d8015f5ffe1b2560eed989f8a45be
+gopkg.in/fatih/pool.v2 6e328e67893eb46323ad06f0e92cb9536babbabc
+gopkg.in/mgo.v2 3f83fa5005286a7fe593b055f0d7771a7dce4655
+gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6
diff --git a/metric/metric_test.go b/metric/metric_test.go
index f209dc3e4..65b4b0fba 100644
--- a/metric/metric_test.go
+++ b/metric/metric_test.go
@@ -608,9 +608,10 @@ func TestNewMetricPoint(t *testing.T) {
 	assert.NoError(t, err)
 
 	p := m.Point()
+	pfields, _ := p.Fields()
 
 	assert.Equal(t, fields, m.Fields())
-	assert.Equal(t, fields, p.Fields())
+	assert.Equal(t, fields, pfields)
 	assert.Equal(t, "cpu", p.Name())
 }
 
diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go
index a60203af5..cc0ada3c4 100644
--- a/plugins/inputs/docker/docker_test.go
+++ b/plugins/inputs/docker/docker_test.go
@@ -273,7 +273,6 @@ func (d FakeDockerClient) Info(ctx context.Context) (types.Info, error) {
 		Name:               "absol",
 		SwapLimit:          false,
 		IPv4Forwarding:     true,
-		ExecutionDriver:    "native-0.2",
 		ExperimentalBuild:  false,
 		CPUCfsPeriod:       true,
 		RegistryConfig: ®istry.ServiceConfig{
diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go
index 0bec92812..3815f5726 100644
--- a/plugins/outputs/kafka/kafka.go
+++ b/plugins/outputs/kafka/kafka.go
@@ -109,6 +109,7 @@ func (k *Kafka) Connect() error {
 	config.Producer.RequiredAcks = sarama.RequiredAcks(k.RequiredAcks)
 	config.Producer.Compression = sarama.CompressionCodec(k.CompressionCodec)
 	config.Producer.Retry.Max = k.MaxRetry
+	config.Producer.Return.Successes = true
 
 	// Legacy support ssl config
 	if k.Certificate != "" {

From dfba3ff37a33da2c7286801b4a2b7ca8b8731c79 Mon Sep 17 00:00:00 2001
From: Cameron Sparr 
Date: Wed, 1 Feb 2017 17:14:47 +0000
Subject: [PATCH 0109/1302] fix telegraf swallowing panics in --test mode

this defer function was causing telegraf to call os.Exit(0) instead of
panicking when it was supposed to.

closes #2341
---
 CHANGELOG.md             |   1 +
 cmd/telegraf/telegraf.go | 193 +++++++++++++++++++++------------------
 2 files changed, 105 insertions(+), 89 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 1f377a98c..87748d2bc 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -46,6 +46,7 @@ It is highly recommended that all users migrate to the new riemann output plugin
 - [#2262](https://github.com/influxdata/telegraf/issues/2262): Flush jitter can inhibit metric collection.
 - [#2287](https://github.com/influxdata/telegraf/issues/2287): Kubernetes input: Handle null startTime for stopped pods.
 - [#2356](https://github.com/influxdata/telegraf/issues/2356): cpu input panic when /proc/stat is empty.
+- [#2341](https://github.com/influxdata/telegraf/issues/2341): telegraf swallowing panics in --test mode.
 
 ## v1.2.1 [2017-02-01]
 
diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go
index 398617e09..a3631d38a 100644
--- a/cmd/telegraf/telegraf.go
+++ b/cmd/telegraf/telegraf.go
@@ -109,94 +109,17 @@ Examples:
 
 var stop chan struct{}
 
-var srvc service.Service
-
-type program struct{}
-
-func reloadLoop(stop chan struct{}, s service.Service) {
-	defer func() {
-		if service.Interactive() {
-			os.Exit(0)
-		}
-		return
-	}()
+func reloadLoop(
+	stop chan struct{},
+	inputFilters []string,
+	outputFilters []string,
+	aggregatorFilters []string,
+	processorFilters []string,
+) {
 	reload := make(chan bool, 1)
 	reload <- true
 	for <-reload {
 		reload <- false
-		flag.Parse()
-		args := flag.Args()
-
-		var inputFilters []string
-		if *fInputFilters != "" {
-			inputFilter := strings.TrimSpace(*fInputFilters)
-			inputFilters = strings.Split(":"+inputFilter+":", ":")
-		}
-		var outputFilters []string
-		if *fOutputFilters != "" {
-			outputFilter := strings.TrimSpace(*fOutputFilters)
-			outputFilters = strings.Split(":"+outputFilter+":", ":")
-		}
-		var aggregatorFilters []string
-		if *fAggregatorFilters != "" {
-			aggregatorFilter := strings.TrimSpace(*fAggregatorFilters)
-			aggregatorFilters = strings.Split(":"+aggregatorFilter+":", ":")
-		}
-		var processorFilters []string
-		if *fProcessorFilters != "" {
-			processorFilter := strings.TrimSpace(*fProcessorFilters)
-			processorFilters = strings.Split(":"+processorFilter+":", ":")
-		}
-
-		if len(args) > 0 {
-			switch args[0] {
-			case "version":
-				fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
-				return
-			case "config":
-				config.PrintSampleConfig(
-					inputFilters,
-					outputFilters,
-					aggregatorFilters,
-					processorFilters,
-				)
-				return
-			}
-		}
-
-		// switch for flags which just do something and exit immediately
-		switch {
-		case *fOutputList:
-			fmt.Println("Available Output Plugins:")
-			for k, _ := range outputs.Outputs {
-				fmt.Printf("  %s\n", k)
-			}
-			return
-		case *fInputList:
-			fmt.Println("Available Input Plugins:")
-			for k, _ := range inputs.Inputs {
-				fmt.Printf("  %s\n", k)
-			}
-			return
-		case *fVersion:
-			fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
-			return
-		case *fSampleConfig:
-			config.PrintSampleConfig(
-				inputFilters,
-				outputFilters,
-				aggregatorFilters,
-				processorFilters,
-			)
-			return
-		case *fUsage != "":
-			if err := config.PrintInputConfig(*fUsage); err != nil {
-				if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
-					log.Fatalf("E! %s and %s", err, err2)
-				}
-			}
-			return
-		}
 
 		// If no other options are specified, load the config file and run.
 		c := config.NewConfig()
@@ -237,7 +160,7 @@ func reloadLoop(stop chan struct{}, s service.Service) {
 			if err != nil {
 				log.Fatal("E! " + err.Error())
 			}
-			return
+			os.Exit(0)
 		}
 
 		err = ag.Connect()
@@ -290,14 +213,26 @@ func usageExit(rc int) {
 	os.Exit(rc)
 }
 
+type program struct {
+	inputFilters      []string
+	outputFilters     []string
+	aggregatorFilters []string
+	processorFilters  []string
+}
+
 func (p *program) Start(s service.Service) error {
-	srvc = s
 	go p.run()
 	return nil
 }
 func (p *program) run() {
 	stop = make(chan struct{})
-	reloadLoop(stop, srvc)
+	reloadLoop(
+		stop,
+		p.inputFilters,
+		p.outputFilters,
+		p.aggregatorFilters,
+		p.processorFilters,
+	)
 }
 func (p *program) Stop(s service.Service) error {
 	close(stop)
@@ -307,6 +242,74 @@ func (p *program) Stop(s service.Service) error {
 func main() {
 	flag.Usage = func() { usageExit(0) }
 	flag.Parse()
+	args := flag.Args()
+
+	inputFilters, outputFilters := []string{}, []string{}
+	if *fInputFilters != "" {
+		inputFilters = strings.Split(":"+strings.TrimSpace(*fInputFilters)+":", ":")
+	}
+	if *fOutputFilters != "" {
+		outputFilters = strings.Split(":"+strings.TrimSpace(*fOutputFilters)+":", ":")
+	}
+
+	aggregatorFilters, processorFilters := []string{}, []string{}
+	if *fAggregatorFilters != "" {
+		aggregatorFilters = strings.Split(":"+strings.TrimSpace(*fAggregatorFilters)+":", ":")
+	}
+	if *fProcessorFilters != "" {
+		processorFilters = strings.Split(":"+strings.TrimSpace(*fProcessorFilters)+":", ":")
+	}
+
+	if len(args) > 0 {
+		switch args[0] {
+		case "version":
+			fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
+			return
+		case "config":
+			config.PrintSampleConfig(
+				inputFilters,
+				outputFilters,
+				aggregatorFilters,
+				processorFilters,
+			)
+			return
+		}
+	}
+
+	// switch for flags which just do something and exit immediately
+	switch {
+	case *fOutputList:
+		fmt.Println("Available Output Plugins:")
+		for k, _ := range outputs.Outputs {
+			fmt.Printf("  %s\n", k)
+		}
+		return
+	case *fInputList:
+		fmt.Println("Available Input Plugins:")
+		for k, _ := range inputs.Inputs {
+			fmt.Printf("  %s\n", k)
+		}
+		return
+	case *fVersion:
+		fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
+		return
+	case *fSampleConfig:
+		config.PrintSampleConfig(
+			inputFilters,
+			outputFilters,
+			aggregatorFilters,
+			processorFilters,
+		)
+		return
+	case *fUsage != "":
+		err := config.PrintInputConfig(*fUsage)
+		err2 := config.PrintOutputConfig(*fUsage)
+		if err != nil && err2 != nil {
+			log.Fatalf("E! %s and %s", err, err2)
+		}
+		return
+	}
+
 	if runtime.GOOS == "windows" {
 		svcConfig := &service.Config{
 			Name:        "telegraf",
@@ -316,7 +319,12 @@ func main() {
 			Arguments: []string{"-config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
 		}
 
-		prg := &program{}
+		prg := &program{
+			inputFilters:      inputFilters,
+			outputFilters:     outputFilters,
+			aggregatorFilters: aggregatorFilters,
+			processorFilters:  processorFilters,
+		}
 		s, err := service.New(prg, svcConfig)
 		if err != nil {
 			log.Fatal("E! " + err.Error())
@@ -334,6 +342,7 @@ func main() {
 			if err != nil {
 				log.Fatal("E! " + err.Error())
 			}
+			os.Exit(0)
 		} else {
 			err = s.Run()
 			if err != nil {
@@ -342,6 +351,12 @@ func main() {
 		}
 	} else {
 		stop = make(chan struct{})
-		reloadLoop(stop, nil)
+		reloadLoop(
+			stop,
+			inputFilters,
+			outputFilters,
+			aggregatorFilters,
+			processorFilters,
+		)
 	}
 }

From a610f8bd03f0ced2c38ed9b31eaf840bd7683cfb Mon Sep 17 00:00:00 2001
From: Matteo Cerutti 
Date: Mon, 9 Jan 2017 10:45:31 +0000
Subject: [PATCH 0110/1302] allow querying sensors via the open interface

closes #2244
closes #1547
---
 CHANGELOG.md                             |   1 +
 plugins/inputs/ipmi_sensor/README.md     |  38 ++-
 plugins/inputs/ipmi_sensor/command.go    |  35 ---
 plugins/inputs/ipmi_sensor/connection.go |   1 -
 plugins/inputs/ipmi_sensor/ipmi.go       |  80 +++--
 plugins/inputs/ipmi_sensor/ipmi_test.go  | 354 +++++++++++++++--------
 6 files changed, 315 insertions(+), 194 deletions(-)
 delete mode 100644 plugins/inputs/ipmi_sensor/command.go

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 87748d2bc..3696cdb8e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -39,6 +39,7 @@ It is highly recommended that all users migrate to the new riemann output plugin
 - [#1453](https://github.com/influxdata/telegraf/pull/1453): diskio: add support for name templates and udev tags.
 - [#2277](https://github.com/influxdata/telegraf/pull/2277): add integer metrics for Consul check health state.
 - [#2201](https://github.com/influxdata/telegraf/pull/2201): Add lock option to the IPtables input plugin.
+- [#2244](https://github.com/influxdata/telegraf/pull/2244): Support ipmi_sensor plugin querying local ipmi sensors.
 
 ### Bugfixes
 
diff --git a/plugins/inputs/ipmi_sensor/README.md b/plugins/inputs/ipmi_sensor/README.md
index 4a248fdc9..3a75d0c65 100644
--- a/plugins/inputs/ipmi_sensor/README.md
+++ b/plugins/inputs/ipmi_sensor/README.md
@@ -4,33 +4,50 @@ Get bare metal metrics using the command line utility `ipmitool`
 
 see ipmitool(https://sourceforge.net/projects/ipmitool/files/ipmitool/)
 
-The plugin will use the following command to collect remote host sensor stats:
+If no servers are specified, the plugin will query the local machine sensor stats via the following command:
 
-ipmitool -I lan -H 192.168.1.1 -U USERID -P PASSW0RD sdr
+```
+ipmitool sdr
+```
+
+When one or more servers are specified, the plugin will use the following command to collect remote host sensor stats:
+
+```
+ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr
+```
 
 ## Measurements
 
 - ipmi_sensor:
 
-    * Tags: `name`, `server`, `unit`
+    * Tags: `name`, `unit`
     * Fields:
       - status
       - value
 
+The `server` tag will be made available when retrieving stats from remote server(s).
+
 ## Configuration
 
 ```toml
+# Read metrics from the bare metal servers via IPMI
 [[inputs.ipmi_sensor]]
-  ## specify servers via a url matching:
+  ## optionally specify the path to the ipmitool executable
+  # path = "/usr/bin/ipmitool"
+  #
+  ## optionally specify one or more servers via a url matching
   ##  [username[:password]@][protocol[(address)]]
   ##  e.g.
   ##    root:passwd@lan(127.0.0.1)
   ##
-  servers = ["USERID:PASSW0RD@lan(10.20.2.203)"]
+  ## if no servers are specified, local machine sensor stats will be queried
+  ##
+  # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
 ```
 
 ## Output
 
+When retrieving stats from a remote server:
 ```
 > ipmi_sensor,server=10.20.2.203,unit=degrees_c,name=ambient_temp status=1i,value=20 1458488465012559455
 > ipmi_sensor,server=10.20.2.203,unit=feet,name=altitude status=1i,value=80 1458488465012688613
@@ -40,3 +57,14 @@ ipmitool -I lan -H 192.168.1.1 -U USERID -P PASSW0RD sdr
 > ipmi_sensor,server=10.20.2.203,unit=rpm,name=fan_1a_tach status=1i,value=2610 1458488465013137932
 > ipmi_sensor,server=10.20.2.203,unit=rpm,name=fan_1b_tach status=1i,value=1775 1458488465013279896
 ```
+
+When retrieving stats from the local machine (no server specified):
+```
+> ipmi_sensor,unit=degrees_c,name=ambient_temp status=1i,value=20 1458488465012559455
+> ipmi_sensor,unit=feet,name=altitude status=1i,value=80 1458488465012688613
+> ipmi_sensor,unit=watts,name=avg_power status=1i,value=220 1458488465012776511
+> ipmi_sensor,unit=volts,name=planar_3.3v status=1i,value=3.28 1458488465012861875
+> ipmi_sensor,unit=volts,name=planar_vbat status=1i,value=3.04 1458488465013072508
+> ipmi_sensor,unit=rpm,name=fan_1a_tach status=1i,value=2610 1458488465013137932
+> ipmi_sensor,unit=rpm,name=fan_1b_tach status=1i,value=1775 1458488465013279896
+```
diff --git a/plugins/inputs/ipmi_sensor/command.go b/plugins/inputs/ipmi_sensor/command.go
deleted file mode 100644
index 76374c494..000000000
--- a/plugins/inputs/ipmi_sensor/command.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package ipmi_sensor
-
-import (
-	"fmt"
-	"os/exec"
-	"strings"
-	"time"
-
-	"github.com/influxdata/telegraf/internal"
-)
-
-type CommandRunner struct{}
-
-func (t CommandRunner) cmd(conn *Connection, args ...string) *exec.Cmd {
-	path := conn.Path
-	opts := append(conn.options(), args...)
-
-	if path == "" {
-		path = "ipmitool"
-	}
-
-	return exec.Command(path, opts...)
-}
-
-func (t CommandRunner) Run(conn *Connection, args ...string) (string, error) {
-	cmd := t.cmd(conn, args...)
-
-	output, err := internal.CombinedOutputTimeout(cmd, time.Second*5)
-	if err != nil {
-		return "", fmt.Errorf("run %s %s: %s (%s)",
-			cmd.Path, strings.Join(cmd.Args, " "), string(output), err)
-	}
-
-	return string(output), err
-}
diff --git a/plugins/inputs/ipmi_sensor/connection.go b/plugins/inputs/ipmi_sensor/connection.go
index 1e9bfbdcb..432b4aa02 100644
--- a/plugins/inputs/ipmi_sensor/connection.go
+++ b/plugins/inputs/ipmi_sensor/connection.go
@@ -12,7 +12,6 @@ type Connection struct {
 	Hostname  string
 	Username  string
 	Password  string
-	Path      string
 	Port      int
 	Interface string
 }
diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go
index aec56a0e4..b2389a675 100644
--- a/plugins/inputs/ipmi_sensor/ipmi.go
+++ b/plugins/inputs/ipmi_sensor/ipmi.go
@@ -1,48 +1,62 @@
 package ipmi_sensor
 
 import (
+	"fmt"
+	"os/exec"
 	"strconv"
 	"strings"
 	"time"
 
 	"github.com/influxdata/telegraf"
+	"github.com/influxdata/telegraf/internal"
 	"github.com/influxdata/telegraf/plugins/inputs"
 )
 
+var (
+	execCommand = exec.Command // execCommand is used to mock commands in tests.
+)
+
 type Ipmi struct {
+	path    string
 	Servers []string
-	runner  Runner
 }
 
 var sampleConfig = `
-  ## specify servers via a url matching:
+  ## optionally specify the path to the ipmitool executable
+  # path = "/usr/bin/ipmitool"
+  #
+  ## optionally specify one or more servers via a url matching
   ##  [username[:password]@][protocol[(address)]]
   ##  e.g.
   ##    root:passwd@lan(127.0.0.1)
   ##
-  servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
+  ## if no servers are specified, local machine sensor stats will be queried
+  ##
+  # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
 `
 
-func NewIpmi() *Ipmi {
-	return &Ipmi{
-		runner: CommandRunner{},
-	}
-}
-
 func (m *Ipmi) SampleConfig() string {
 	return sampleConfig
 }
 
 func (m *Ipmi) Description() string {
-	return "Read metrics from one or many bare metal servers"
+	return "Read metrics from the bare metal servers via IPMI"
 }
 
 func (m *Ipmi) Gather(acc telegraf.Accumulator) error {
-	if m.runner == nil {
-		m.runner = CommandRunner{}
+	if len(m.path) == 0 {
+		return fmt.Errorf("ipmitool not found: verify that ipmitool is installed and that ipmitool is in your PATH")
 	}
-	for _, serv := range m.Servers {
-		err := m.gatherServer(serv, acc)
+
+	if len(m.Servers) > 0 {
+		for _, server := range m.Servers {
+			err := m.parse(acc, server)
+			if err != nil {
+				return err
+			}
+		}
+	} else {
+		err := m.parse(acc, "")
 		if err != nil {
 			return err
 		}
@@ -51,17 +65,26 @@ func (m *Ipmi) Gather(acc telegraf.Accumulator) error {
 	return nil
 }
 
-func (m *Ipmi) gatherServer(serv string, acc telegraf.Accumulator) error {
-	conn := NewConnection(serv)
+func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error {
+	opts := make([]string, 0)
+	hostname := ""
 
-	res, err := m.runner.Run(conn, "sdr")
+	if server != "" {
+		conn := NewConnection(server)
+		hostname = conn.Hostname
+		opts = conn.options()
+	}
+
+	opts = append(opts, "sdr")
+	cmd := execCommand(m.path, opts...)
+	out, err := internal.CombinedOutputTimeout(cmd, time.Second*5)
 	if err != nil {
-		return err
+		return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out))
 	}
 
 	// each line will look something like
 	// Planar VBAT      | 3.05 Volts        | ok
-	lines := strings.Split(res, "\n")
+	lines := strings.Split(string(out), "\n")
 	for i := 0; i < len(lines); i++ {
 		vals := strings.Split(lines[i], "|")
 		if len(vals) != 3 {
@@ -69,8 +92,12 @@ func (m *Ipmi) gatherServer(serv string, acc telegraf.Accumulator) error {
 		}
 
 		tags := map[string]string{
-			"server": conn.Hostname,
-			"name":   transform(vals[0]),
+			"name": transform(vals[0]),
+		}
+
+		// tag the server is we have one
+		if hostname != "" {
+			tags["server"] = hostname
 		}
 
 		fields := make(map[string]interface{})
@@ -99,10 +126,6 @@ func (m *Ipmi) gatherServer(serv string, acc telegraf.Accumulator) error {
 	return nil
 }
 
-type Runner interface {
-	Run(conn *Connection, args ...string) (string, error)
-}
-
 func Atofloat(val string) float64 {
 	f, err := strconv.ParseFloat(val, 64)
 	if err != nil {
@@ -123,7 +146,12 @@ func transform(s string) string {
 }
 
 func init() {
+	m := Ipmi{}
+	path, _ := exec.LookPath("ipmitool")
+	if len(path) > 0 {
+		m.path = path
+	}
 	inputs.Add("ipmi_sensor", func() telegraf.Input {
-		return &Ipmi{}
+		return &m
 	})
 }
diff --git a/plugins/inputs/ipmi_sensor/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_test.go
index c62447e39..94dc066c8 100644
--- a/plugins/inputs/ipmi_sensor/ipmi_test.go
+++ b/plugins/inputs/ipmi_sensor/ipmi_test.go
@@ -1,6 +1,9 @@
 package ipmi_sensor
 
 import (
+	"fmt"
+	"os"
+	"os/exec"
 	"testing"
 
 	"github.com/influxdata/telegraf/testutil"
@@ -8,10 +11,219 @@ import (
 	"github.com/stretchr/testify/require"
 )
 
-const serv = "USERID:PASSW0RD@lan(192.168.1.1)"
+func TestGather(t *testing.T) {
+	i := &Ipmi{
+		Servers: []string{"USERID:PASSW0RD@lan(192.168.1.1)"},
+		path:    "ipmitool",
+	}
+	// overwriting exec commands with mock commands
+	execCommand = fakeExecCommand
+	var acc testutil.Accumulator
 
-const cmdReturn = `
-Ambient Temp     | 20 degrees C      | ok
+	err := i.Gather(&acc)
+
+	require.NoError(t, err)
+
+	assert.Equal(t, acc.NFields(), 266, "non-numeric measurements should be ignored")
+
+	conn := NewConnection(i.Servers[0])
+	assert.Equal(t, "USERID", conn.Username)
+	assert.Equal(t, "lan", conn.Interface)
+
+	var testsWithServer = []struct {
+		fields map[string]interface{}
+		tags   map[string]string
+	}{
+		{
+			map[string]interface{}{
+				"value":  float64(20),
+				"status": int(1),
+			},
+			map[string]string{
+				"name":   "ambient_temp",
+				"server": "192.168.1.1",
+				"unit":   "degrees_c",
+			},
+		},
+		{
+			map[string]interface{}{
+				"value":  float64(80),
+				"status": int(1),
+			},
+			map[string]string{
+				"name":   "altitude",
+				"server": "192.168.1.1",
+				"unit":   "feet",
+			},
+		},
+		{
+			map[string]interface{}{
+				"value":  float64(210),
+				"status": int(1),
+			},
+			map[string]string{
+				"name":   "avg_power",
+				"server": "192.168.1.1",
+				"unit":   "watts",
+			},
+		},
+		{
+			map[string]interface{}{
+				"value":  float64(4.9),
+				"status": int(1),
+			},
+			map[string]string{
+				"name":   "planar_5v",
+				"server": "192.168.1.1",
+				"unit":   "volts",
+			},
+		},
+		{
+			map[string]interface{}{
+				"value":  float64(3.05),
+				"status": int(1),
+			},
+			map[string]string{
+				"name":   "planar_vbat",
+				"server": "192.168.1.1",
+				"unit":   "volts",
+			},
+		},
+		{
+			map[string]interface{}{
+				"value":  float64(2610),
+				"status": int(1),
+			},
+			map[string]string{
+				"name":   "fan_1a_tach",
+				"server": "192.168.1.1",
+				"unit":   "rpm",
+			},
+		},
+		{
+			map[string]interface{}{
+				"value":  float64(1775),
+				"status": int(1),
+			},
+			map[string]string{
+				"name":   "fan_1b_tach",
+				"server": "192.168.1.1",
+				"unit":   "rpm",
+			},
+		},
+	}
+
+	for _, test := range testsWithServer {
+		acc.AssertContainsTaggedFields(t, "ipmi_sensor", test.fields, test.tags)
+	}
+
+	i = &Ipmi{
+		path: "ipmitool",
+	}
+
+	err = i.Gather(&acc)
+
+	var testsWithoutServer = []struct {
+		fields map[string]interface{}
+		tags   map[string]string
+	}{
+		{
+			map[string]interface{}{
+				"value":  float64(20),
+				"status": int(1),
+			},
+			map[string]string{
+				"name": "ambient_temp",
+				"unit": "degrees_c",
+			},
+		},
+		{
+			map[string]interface{}{
+				"value":  float64(80),
+				"status": int(1),
+			},
+			map[string]string{
+				"name": "altitude",
+				"unit": "feet",
+			},
+		},
+		{
+			map[string]interface{}{
+				"value":  float64(210),
+				"status": int(1),
+			},
+			map[string]string{
+				"name": "avg_power",
+				"unit": "watts",
+			},
+		},
+		{
+			map[string]interface{}{
+				"value":  float64(4.9),
+				"status": int(1),
+			},
+			map[string]string{
+				"name": "planar_5v",
+				"unit": "volts",
+			},
+		},
+		{
+			map[string]interface{}{
+				"value":  float64(3.05),
+				"status": int(1),
+			},
+			map[string]string{
+				"name": "planar_vbat",
+				"unit": "volts",
+			},
+		},
+		{
+			map[string]interface{}{
+				"value":  float64(2610),
+				"status": int(1),
+			},
+			map[string]string{
+				"name": "fan_1a_tach",
+				"unit": "rpm",
+			},
+		},
+		{
+			map[string]interface{}{
+				"value":  float64(1775),
+				"status": int(1),
+			},
+			map[string]string{
+				"name": "fan_1b_tach",
+				"unit": "rpm",
+			},
+		},
+	}
+
+	for _, test := range testsWithoutServer {
+		acc.AssertContainsTaggedFields(t, "ipmi_sensor", test.fields, test.tags)
+	}
+}
+
+// fackeExecCommand is a helper function that mock
+// the exec.Command call (and call the test binary)
+func fakeExecCommand(command string, args ...string) *exec.Cmd {
+	cs := []string{"-test.run=TestHelperProcess", "--", command}
+	cs = append(cs, args...)
+	cmd := exec.Command(os.Args[0], cs...)
+	cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
+	return cmd
+}
+
+// TestHelperProcess isn't a real test. It's used to mock exec.Command
+// For example, if you run:
+// GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- chrony tracking
+// it returns below mockData.
+func TestHelperProcess(t *testing.T) {
+	if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
+		return
+	}
+
+	mockData := `Ambient Temp     | 20 degrees C      | ok
 Altitude         | 80 feet           | ok
 Avg Power        | 210 Watts         | ok
 Planar 3.3V      | 3.29 Volts        | ok
@@ -146,130 +358,18 @@ PCI 5            | 0x00              | ok
 OS RealTime Mod  | 0x00              | ok
 `
 
-type runnerMock struct {
-	out string
-	err error
-}
+	args := os.Args
+
+	// Previous arguments are tests stuff, that looks like :
+	// /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess --
+	cmd, args := args[3], args[4:]
+
+	if cmd == "ipmitool" {
+		fmt.Fprint(os.Stdout, mockData)
+	} else {
+		fmt.Fprint(os.Stdout, "command not found")
+		os.Exit(1)
 
-func newRunnerMock(out string, err error) Runner {
-	return &runnerMock{
-		out: out,
-		err: err,
 	}
-}
-
-func (r runnerMock) Run(conn *Connection, args ...string) (out string, err error) {
-	if r.err != nil {
-		return out, r.err
-	}
-	return r.out, nil
-}
-
-func TestIpmi(t *testing.T) {
-	i := &Ipmi{
-		Servers: []string{"USERID:PASSW0RD@lan(192.168.1.1)"},
-		runner:  newRunnerMock(cmdReturn, nil),
-	}
-
-	var acc testutil.Accumulator
-
-	err := i.Gather(&acc)
-
-	require.NoError(t, err)
-
-	assert.Equal(t, acc.NFields(), 266, "non-numeric measurements should be ignored")
-
-	var tests = []struct {
-		fields map[string]interface{}
-		tags   map[string]string
-	}{
-		{
-			map[string]interface{}{
-				"value":  float64(20),
-				"status": int(1),
-			},
-			map[string]string{
-				"name":   "ambient_temp",
-				"server": "192.168.1.1",
-				"unit":   "degrees_c",
-			},
-		},
-		{
-			map[string]interface{}{
-				"value":  float64(80),
-				"status": int(1),
-			},
-			map[string]string{
-				"name":   "altitude",
-				"server": "192.168.1.1",
-				"unit":   "feet",
-			},
-		},
-		{
-			map[string]interface{}{
-				"value":  float64(210),
-				"status": int(1),
-			},
-			map[string]string{
-				"name":   "avg_power",
-				"server": "192.168.1.1",
-				"unit":   "watts",
-			},
-		},
-		{
-			map[string]interface{}{
-				"value":  float64(4.9),
-				"status": int(1),
-			},
-			map[string]string{
-				"name":   "planar_5v",
-				"server": "192.168.1.1",
-				"unit":   "volts",
-			},
-		},
-		{
-			map[string]interface{}{
-				"value":  float64(3.05),
-				"status": int(1),
-			},
-			map[string]string{
-				"name":   "planar_vbat",
-				"server": "192.168.1.1",
-				"unit":   "volts",
-			},
-		},
-		{
-			map[string]interface{}{
-				"value":  float64(2610),
-				"status": int(1),
-			},
-			map[string]string{
-				"name":   "fan_1a_tach",
-				"server": "192.168.1.1",
-				"unit":   "rpm",
-			},
-		},
-		{
-			map[string]interface{}{
-				"value":  float64(1775),
-				"status": int(1),
-			},
-			map[string]string{
-				"name":   "fan_1b_tach",
-				"server": "192.168.1.1",
-				"unit":   "rpm",
-			},
-		},
-	}
-
-	for _, test := range tests {
-		acc.AssertContainsTaggedFields(t, "ipmi_sensor", test.fields, test.tags)
-	}
-}
-
-func TestIpmiConnection(t *testing.T) {
-	conn := NewConnection(serv)
-	assert.Equal(t, "USERID", conn.Username)
-	assert.Equal(t, "lan", conn.Interface)
-
+	os.Exit(0)
 }

From 55d3f70771996ed91cf29271ee0f4b39aff4b299 Mon Sep 17 00:00:00 2001
From: Patrick Hemmer 
Date: Thu, 2 Feb 2017 08:46:53 -0500
Subject: [PATCH 0111/1302] add missing fields to haproxy input (#2323)

---
 CHANGELOG.md                           |   1 +
 plugins/inputs/haproxy/README.md       |   6 +-
 plugins/inputs/haproxy/haproxy.go      | 354 +++++++------------------
 plugins/inputs/haproxy/haproxy_test.go | 182 +++++++++----
 4 files changed, 226 insertions(+), 317 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3696cdb8e..2e348ad47 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -45,6 +45,7 @@ It is highly recommended that all users migrate to the new riemann output plugin
 
 - [#2077](https://github.com/influxdata/telegraf/issues/2077): SQL Server Input - Arithmetic overflow error converting numeric to data type int.
 - [#2262](https://github.com/influxdata/telegraf/issues/2262): Flush jitter can inhibit metric collection.
+- [#2318](https://github.com/influxdata/telegraf/issues/2318): haproxy input - Add missing fields.
 - [#2287](https://github.com/influxdata/telegraf/issues/2287): Kubernetes input: Handle null startTime for stopped pods.
 - [#2356](https://github.com/influxdata/telegraf/issues/2356): cpu input panic when /proc/stat is empty.
 - [#2341](https://github.com/influxdata/telegraf/issues/2341): telegraf swallowing panics in --test mode.
diff --git a/plugins/inputs/haproxy/README.md b/plugins/inputs/haproxy/README.md
index 7f2bfa1dd..81c8fb894 100644
--- a/plugins/inputs/haproxy/README.md
+++ b/plugins/inputs/haproxy/README.md
@@ -10,6 +10,7 @@
   servers = ["http://1.2.3.4/haproxy?stats", "/var/run/haproxy*.sock"]
 ```
 
+#### `servers`
 Server addresses need to explicitly start with 'http' if you wish to use HAproxy status page. Otherwise, address will be assumed to be an UNIX socket and protocol (if present) will be discarded.
 
 For basic authentication you need to add username and password in the URL: `http://user:password@1.2.3.4/haproxy?stats`.
@@ -26,9 +27,12 @@ When using socket names, wildcard expansion is supported so plugin can gather st
 
 If no servers are specified, then the default address of `http://127.0.0.1:1936/haproxy?stats` will be used.
 
+#### `keep_field_names`
+By default, some of the fields are renamed from what haproxy calls them. Setting the `keep_field_names` parameter to `true` will result in the plugin keeping the original field names.
+
 ### Measurements & Fields:
 
-Plugin will gather measurements outlined in [HAproxy CSV format documentation](https://cbonte.github.io/haproxy-dconv/1.5/configuration.html#9.1).
+Plugin will gather measurements outlined in [HAproxy CSV format documentation](https://cbonte.github.io/haproxy-dconv/1.7/management.html#9.1).
 
 ### Tags:
 
diff --git a/plugins/inputs/haproxy/haproxy.go b/plugins/inputs/haproxy/haproxy.go
index c764a5530..2be418a65 100644
--- a/plugins/inputs/haproxy/haproxy.go
+++ b/plugins/inputs/haproxy/haproxy.go
@@ -14,80 +14,17 @@ import (
 	"time"
 
 	"github.com/influxdata/telegraf"
-	"github.com/influxdata/telegraf/internal/errchan"
 	"github.com/influxdata/telegraf/plugins/inputs"
 )
 
 //CSV format: https://cbonte.github.io/haproxy-dconv/1.5/configuration.html#9.1
-const (
-	HF_PXNAME         = 0  // 0. pxname [LFBS]: proxy name
-	HF_SVNAME         = 1  // 1. svname [LFBS]: service name (FRONTEND for frontend, BACKEND for backend, any name for server/listener)
-	HF_QCUR           = 2  //2. qcur [..BS]: current queued requests. For the backend this reports the number queued without a server assigned.
-	HF_QMAX           = 3  //3. qmax [..BS]: max value of qcur
-	HF_SCUR           = 4  // 4. scur [LFBS]: current sessions
-	HF_SMAX           = 5  //5. smax [LFBS]: max sessions
-	HF_SLIM           = 6  //6. slim [LFBS]: configured session limit
-	HF_STOT           = 7  //7. stot [LFBS]: cumulative number of connections
-	HF_BIN            = 8  //8. bin [LFBS]: bytes in
-	HF_BOUT           = 9  //9. bout [LFBS]: bytes out
-	HF_DREQ           = 10 //10. dreq [LFB.]: requests denied because of security concerns.
-	HF_DRESP          = 11 //11. dresp [LFBS]: responses denied because of security concerns.
-	HF_EREQ           = 12 //12. ereq [LF..]: request errors. Some of the possible causes are:
-	HF_ECON           = 13 //13. econ [..BS]: number of requests that encountered an error trying to
-	HF_ERESP          = 14 //14. eresp [..BS]: response errors. srv_abrt will be counted here also.  Some other errors are: - write error on the client socket (won't be counted for the server stat) - failure applying filters to the response.
-	HF_WRETR          = 15 //15. wretr [..BS]: number of times a connection to a server was retried.
-	HF_WREDIS         = 16 //16. wredis [..BS]: number of times a request was redispatched to another server. The server value counts the number of times that server was switched away from.
-	HF_STATUS         = 17 //17. status [LFBS]: status (UP/DOWN/NOLB/MAINT/MAINT(via)...)
-	HF_WEIGHT         = 18 //18. weight [..BS]: total weight (backend), server weight (server)
-	HF_ACT            = 19 //19. act [..BS]: number of active servers (backend), server is active (server)
-	HF_BCK            = 20 //20. bck [..BS]: number of backup servers (backend), server is backup (server)
-	HF_CHKFAIL        = 21 //21. chkfail [...S]: number of failed checks. (Only counts checks failed when the server is up.)
-	HF_CHKDOWN        = 22 //22. chkdown [..BS]: number of UP->DOWN transitions. The backend counter counts transitions to the whole backend being down, rather than the sum of the counters for each server.
-	HF_LASTCHG        = 23 //23. lastchg [..BS]: number of seconds since the last UP<->DOWN transition
-	HF_DOWNTIME       = 24 //24. downtime [..BS]: total downtime (in seconds). The value for the backend is the downtime for the whole backend, not the sum of the server downtime.
-	HF_QLIMIT         = 25 //25. qlimit [...S]: configured maxqueue for the server, or nothing in the value is 0 (default, meaning no limit)
-	HF_PID            = 26 //26. pid [LFBS]: process id (0 for first instance, 1 for second, ...)
-	HF_IID            = 27 //27. iid [LFBS]: unique proxy id
-	HF_SID            = 28 //28. sid [L..S]: server id (unique inside a proxy)
-	HF_THROTTLE       = 29 //29. throttle [...S]: current throttle percentage for the server, when slowstart is active, or no value if not in slowstart.
-	HF_LBTOT          = 30 //30. lbtot [..BS]: total number of times a server was selected, either for new sessions, or when re-dispatching. The server counter is the number of times that server was selected.
-	HF_TRACKED        = 31 //31. tracked [...S]: id of proxy/server if tracking is enabled.
-	HF_TYPE           = 32 //32. type [LFBS]: (0 = frontend, 1 = backend, 2 = server, 3 = socket/listener)
-	HF_RATE           = 33 //33. rate [.FBS]: number of sessions per second over last elapsed second
-	HF_RATE_LIM       = 34 //34. rate_lim [.F..]: configured limit on new sessions per second
-	HF_RATE_MAX       = 35 //35. rate_max [.FBS]: max number of new sessions per second
-	HF_CHECK_STATUS   = 36 //36. check_status [...S]: status of last health check, one of:
-	HF_CHECK_CODE     = 37 //37. check_code [...S]: layer5-7 code, if available
-	HF_CHECK_DURATION = 38 //38. check_duration [...S]: time in ms took to finish last health check
-	HF_HRSP_1xx       = 39 //39. hrsp_1xx [.FBS]: http responses with 1xx code
-	HF_HRSP_2xx       = 40 //40. hrsp_2xx [.FBS]: http responses with 2xx code
-	HF_HRSP_3xx       = 41 //41. hrsp_3xx [.FBS]: http responses with 3xx code
-	HF_HRSP_4xx       = 42 //42. hrsp_4xx [.FBS]: http responses with 4xx code
-	HF_HRSP_5xx       = 43 //43. hrsp_5xx [.FBS]: http responses with 5xx code
-	HF_HRSP_OTHER     = 44 //44. hrsp_other [.FBS]: http responses with other codes (protocol error)
-	HF_HANAFAIL       = 45 //45. hanafail [...S]: failed health checks details
-	HF_REQ_RATE       = 46 //46. req_rate [.F..]: HTTP requests per second over last elapsed second
-	HF_REQ_RATE_MAX   = 47 //47. req_rate_max [.F..]: max number of HTTP requests per second observed
-	HF_REQ_TOT        = 48 //48. req_tot [.F..]: total number of HTTP requests received
-	HF_CLI_ABRT       = 49 //49. cli_abrt [..BS]: number of data transfers aborted by the client
-	HF_SRV_ABRT       = 50 //50. srv_abrt [..BS]: number of data transfers aborted by the server (inc. in eresp)
-	HF_COMP_IN        = 51 //51. comp_in [.FB.]: number of HTTP response bytes fed to the compressor
-	HF_COMP_OUT       = 52 //52. comp_out [.FB.]: number of HTTP response bytes emitted by the compressor
-	HF_COMP_BYP       = 53 //53. comp_byp [.FB.]: number of bytes that bypassed the HTTP compressor (CPU/BW limit)
-	HF_COMP_RSP       = 54 //54. comp_rsp [.FB.]: number of HTTP responses that were compressed
-	HF_LASTSESS       = 55 //55. lastsess [..BS]: number of seconds since last session assigned to server/backend
-	HF_LAST_CHK       = 56 //56. last_chk [...S]: last health check contents or textual error
-	HF_LAST_AGT       = 57 //57. last_agt [...S]: last agent check contents or textual error
-	HF_QTIME          = 58 //58. qtime [..BS]:
-	HF_CTIME          = 59 //59. ctime [..BS]:
-	HF_RTIME          = 60 //60. rtime [..BS]: (0 for TCP)
-	HF_TTIME          = 61 //61. ttime [..BS]: the average total session time in ms over the 1024 last requests
-)
 
 type haproxy struct {
 	Servers []string
 
 	client *http.Client
+
+	KeepFieldNames bool
 }
 
 var sampleConfig = `
@@ -103,6 +40,11 @@ var sampleConfig = `
   ## Server address not starting with 'http' will be treated as a possible
   ## socket, so both examples below are valid.
   ## servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
+  #
+  ## By default, some of the fields are renamed from what haproxy calls them.
+  ## Setting this option to true results in the plugin keeping the original
+  ## field names.
+  ## keep_field_names = true
 `
 
 func (r *haproxy) SampleConfig() string {
@@ -147,17 +89,18 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error {
 	}
 
 	var wg sync.WaitGroup
-	errChan := errchan.New(len(endpoints))
 	wg.Add(len(endpoints))
 	for _, server := range endpoints {
 		go func(serv string) {
 			defer wg.Done()
-			errChan.C <- g.gatherServer(serv, acc)
+			if err := g.gatherServer(serv, acc); err != nil {
+				acc.AddError(err)
+			}
 		}(server)
 	}
 
 	wg.Wait()
-	return errChan.Error()
+	return nil
 }
 
 func (g *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) error {
@@ -175,7 +118,7 @@ func (g *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) erro
 		return fmt.Errorf("Could not write to socket '%s': %s", addr, errw)
 	}
 
-	return importCsvResult(c, acc, socketPath)
+	return g.importCsvResult(c, acc, socketPath)
 }
 
 func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
@@ -216,7 +159,11 @@ func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
 		return fmt.Errorf("Unable to get valid stat result from '%s', http response code : %d", addr, res.StatusCode)
 	}
 
-	return importCsvResult(res.Body, acc, u.Host)
+	if err := g.importCsvResult(res.Body, acc, u.Host); err != nil {
+		return fmt.Errorf("Unable to parse stat result from '%s': %s", addr, err)
+	}
+
+	return nil
 }
 
 func getSocketAddr(sock string) string {
@@ -229,205 +176,96 @@ func getSocketAddr(sock string) string {
 	}
 }
 
-func importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error {
-	csv := csv.NewReader(r)
-	result, err := csv.ReadAll()
+var typeNames = []string{"frontend", "backend", "server", "listener"}
+var fieldRenames = map[string]string{
+	"pxname":     "proxy",
+	"svname":     "sv",
+	"act":        "active_servers",
+	"bck":        "backup_servers",
+	"cli_abrt":   "cli_abort",
+	"srv_abrt":   "srv_abort",
+	"hrsp_1xx":   "http_response.1xx",
+	"hrsp_2xx":   "http_response.2xx",
+	"hrsp_3xx":   "http_response.3xx",
+	"hrsp_4xx":   "http_response.4xx",
+	"hrsp_5xx":   "http_response.5xx",
+	"hrsp_other": "http_response.other",
+}
+
+func (g *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error {
+	csvr := csv.NewReader(r)
 	now := time.Now()
 
-	for _, row := range result {
+	headers, err := csvr.Read()
+	if err != nil {
+		return err
+	}
+	if len(headers[0]) <= 2 || headers[0][:2] != "# " {
+		return fmt.Errorf("did not receive standard haproxy headers")
+	}
+	headers[0] = headers[0][2:]
+
+	for {
+		row, err := csvr.Read()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
 		fields := make(map[string]interface{})
 		tags := map[string]string{
 			"server": host,
-			"proxy":  row[HF_PXNAME],
-			"sv":     row[HF_SVNAME],
 		}
-		for field, v := range row {
-			switch field {
-			case HF_QCUR:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["qcur"] = ival
+
+		if len(row) != len(headers) {
+			return fmt.Errorf("number of columns does not match number of headers. headers=%d columns=%d", len(headers), len(row))
+		}
+		for i, v := range row {
+			if v == "" {
+				continue
+			}
+
+			colName := headers[i]
+			fieldName := colName
+			if !g.KeepFieldNames {
+				if fieldRename, ok := fieldRenames[colName]; ok {
+					fieldName = fieldRename
 				}
-			case HF_QMAX:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["qmax"] = ival
+			}
+
+			switch colName {
+			case "pxname", "svname":
+				tags[fieldName] = v
+			case "type":
+				vi, err := strconv.ParseInt(v, 10, 64)
+				if err != nil {
+					return fmt.Errorf("unable to parse type value '%s'", v)
 				}
-			case HF_SCUR:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["scur"] = ival
+				if int(vi) >= len(typeNames) {
+					return fmt.Errorf("received unknown type value: %d", vi)
 				}
-			case HF_SMAX:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["smax"] = ival
+				tags[fieldName] = typeNames[vi]
+			case "check_desc", "agent_desc":
+				// do nothing. These fields are just a more verbose description of the check_status & agent_status fields
+			case "status", "check_status", "last_chk", "mode", "tracked", "agent_status", "last_agt", "addr", "cookie":
+				// these are string fields
+				fields[fieldName] = v
+			case "lastsess":
+				vi, err := strconv.ParseInt(v, 10, 64)
+				if err != nil {
+					//TODO log the error. And just once (per column) so we don't spam the log
+					continue
 				}
-			case HF_SLIM:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["slim"] = ival
-				}
-			case HF_STOT:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["stot"] = ival
-				}
-			case HF_BIN:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["bin"] = ival
-				}
-			case HF_BOUT:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["bout"] = ival
-				}
-			case HF_DREQ:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["dreq"] = ival
-				}
-			case HF_DRESP:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["dresp"] = ival
-				}
-			case HF_EREQ:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["ereq"] = ival
-				}
-			case HF_ECON:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["econ"] = ival
-				}
-			case HF_ERESP:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["eresp"] = ival
-				}
-			case HF_WRETR:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["wretr"] = ival
-				}
-			case HF_WREDIS:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["wredis"] = ival
-				}
-			case HF_ACT:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["active_servers"] = ival
-				}
-			case HF_BCK:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["backup_servers"] = ival
-				}
-			case HF_DOWNTIME:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["downtime"] = ival
-				}
-			case HF_THROTTLE:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["throttle"] = ival
-				}
-			case HF_LBTOT:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["lbtot"] = ival
-				}
-			case HF_RATE:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["rate"] = ival
-				}
-			case HF_RATE_MAX:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["rate_max"] = ival
-				}
-			case HF_CHECK_DURATION:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["check_duration"] = ival
-				}
-			case HF_HRSP_1xx:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["http_response.1xx"] = ival
-				}
-			case HF_HRSP_2xx:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["http_response.2xx"] = ival
-				}
-			case HF_HRSP_3xx:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["http_response.3xx"] = ival
-				}
-			case HF_HRSP_4xx:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["http_response.4xx"] = ival
-				}
-			case HF_HRSP_5xx:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["http_response.5xx"] = ival
-				}
-			case HF_REQ_RATE:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["req_rate"] = ival
-				}
-			case HF_REQ_RATE_MAX:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["req_rate_max"] = ival
-				}
-			case HF_REQ_TOT:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["req_tot"] = ival
-				}
-			case HF_CLI_ABRT:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["cli_abort"] = ival
-				}
-			case HF_SRV_ABRT:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["srv_abort"] = ival
-				}
-			case HF_QTIME:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["qtime"] = ival
-				}
-			case HF_CTIME:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["ctime"] = ival
-				}
-			case HF_RTIME:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["rtime"] = ival
-				}
-			case HF_TTIME:
-				ival, err := strconv.ParseUint(v, 10, 64)
-				if err == nil {
-					fields["ttime"] = ival
+				fields[fieldName] = vi
+			default:
+				vi, err := strconv.ParseUint(v, 10, 64)
+				if err != nil {
+					//TODO log the error. And just once (per column) so we don't spam the log
+					continue
 				}
+				fields[fieldName] = vi
 			}
 		}
 		acc.AddFields("haproxy", fields, tags, now)
diff --git a/plugins/inputs/haproxy/haproxy_test.go b/plugins/inputs/haproxy/haproxy_test.go
index 12be2ed88..27a197304 100644
--- a/plugins/inputs/haproxy/haproxy_test.go
+++ b/plugins/inputs/haproxy/haproxy_test.go
@@ -68,8 +68,9 @@ func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) {
 
 	tags := map[string]string{
 		"server": ts.Listener.Addr().String(),
-		"proxy":  "be_app",
-		"sv":     "host0",
+		"proxy":  "git",
+		"sv":     "www",
+		"type":   "server",
 	}
 
 	fields := HaproxyGetFieldValues()
@@ -80,8 +81,8 @@ func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) {
 		Servers: []string{ts.URL},
 	}
 
-	err = r.Gather(&acc)
-	require.Error(t, err)
+	r.Gather(&acc)
+	require.NotEmpty(t, acc.Errors)
 }
 
 func TestHaproxyGeneratesMetricsWithoutAuthentication(t *testing.T) {
@@ -100,9 +101,10 @@ func TestHaproxyGeneratesMetricsWithoutAuthentication(t *testing.T) {
 	require.NoError(t, err)
 
 	tags := map[string]string{
-		"proxy":  "be_app",
 		"server": ts.Listener.Addr().String(),
-		"sv":     "host0",
+		"proxy":  "git",
+		"sv":     "www",
+		"type":   "server",
 	}
 
 	fields := HaproxyGetFieldValues()
@@ -144,9 +146,10 @@ func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) {
 
 	for _, sock := range sockets {
 		tags := map[string]string{
-			"proxy":  "be_app",
 			"server": sock.Addr().String(),
-			"sv":     "host0",
+			"proxy":  "git",
+			"sv":     "www",
+			"type":   "server",
 		}
 
 		acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
@@ -155,8 +158,8 @@ func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) {
 	// This mask should not match any socket
 	r.Servers = []string{_badmask}
 
-	err = r.Gather(&acc)
-	require.Error(t, err)
+	r.Gather(&acc)
+	require.NotEmpty(t, acc.Errors)
 }
 
 //When not passing server config, we default to localhost
@@ -171,59 +174,122 @@ func TestHaproxyDefaultGetFromLocalhost(t *testing.T) {
 	assert.Contains(t, err.Error(), "127.0.0.1:1936/haproxy?stats/;csv")
 }
 
+func TestHaproxyKeepFieldNames(t *testing.T) {
+	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		fmt.Fprint(w, csvOutputSample)
+	}))
+	defer ts.Close()
+
+	r := &haproxy{
+		Servers:        []string{ts.URL},
+		KeepFieldNames: true,
+	}
+
+	var acc testutil.Accumulator
+
+	err := r.Gather(&acc)
+	require.NoError(t, err)
+
+	tags := map[string]string{
+		"server": ts.Listener.Addr().String(),
+		"pxname": "git",
+		"svname": "www",
+		"type":   "server",
+	}
+
+	fields := HaproxyGetFieldValues()
+	fields["act"] = fields["active_servers"]
+	delete(fields, "active_servers")
+	fields["bck"] = fields["backup_servers"]
+	delete(fields, "backup_servers")
+	fields["cli_abrt"] = fields["cli_abort"]
+	delete(fields, "cli_abort")
+	fields["srv_abrt"] = fields["srv_abort"]
+	delete(fields, "srv_abort")
+	fields["hrsp_1xx"] = fields["http_response.1xx"]
+	delete(fields, "http_response.1xx")
+	fields["hrsp_2xx"] = fields["http_response.2xx"]
+	delete(fields, "http_response.2xx")
+	fields["hrsp_3xx"] = fields["http_response.3xx"]
+	delete(fields, "http_response.3xx")
+	fields["hrsp_4xx"] = fields["http_response.4xx"]
+	delete(fields, "http_response.4xx")
+	fields["hrsp_5xx"] = fields["http_response.5xx"]
+	delete(fields, "http_response.5xx")
+	fields["hrsp_other"] = fields["http_response.other"]
+	delete(fields, "http_response.other")
+
+	acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
+}
+
 func HaproxyGetFieldValues() map[string]interface{} {
 	fields := map[string]interface{}{
-		"active_servers":    uint64(1),
-		"backup_servers":    uint64(0),
-		"bin":               uint64(510913516),
-		"bout":              uint64(2193856571),
-		"check_duration":    uint64(10),
-		"cli_abort":         uint64(73),
-		"ctime":             uint64(2),
-		"downtime":          uint64(0),
-		"dresp":             uint64(0),
-		"econ":              uint64(0),
-		"eresp":             uint64(1),
-		"http_response.1xx": uint64(0),
-		"http_response.2xx": uint64(119534),
-		"http_response.3xx": uint64(48051),
-		"http_response.4xx": uint64(2345),
-		"http_response.5xx": uint64(1056),
-		"lbtot":             uint64(171013),
-		"qcur":              uint64(0),
-		"qmax":              uint64(0),
-		"qtime":             uint64(0),
-		"rate":              uint64(3),
-		"rate_max":          uint64(12),
-		"rtime":             uint64(312),
-		"scur":              uint64(1),
-		"smax":              uint64(32),
-		"slim":              uint64(32),
-		"srv_abort":         uint64(1),
-		"stot":              uint64(171014),
-		"ttime":             uint64(2341),
-		"wredis":            uint64(0),
-		"wretr":             uint64(1),
+		"active_servers":      uint64(1),
+		"backup_servers":      uint64(0),
+		"bin":                 uint64(5228218),
+		"bout":                uint64(303747244),
+		"check_code":          uint64(200),
+		"check_duration":      uint64(3),
+		"check_fall":          uint64(3),
+		"check_health":        uint64(4),
+		"check_rise":          uint64(2),
+		"check_status":        "L7OK",
+		"chkdown":             uint64(84),
+		"chkfail":             uint64(559),
+		"cli_abort":           uint64(690),
+		"ctime":               uint64(1),
+		"downtime":            uint64(3352),
+		"dresp":               uint64(0),
+		"econ":                uint64(0),
+		"eresp":               uint64(21),
+		"http_response.1xx":   uint64(0),
+		"http_response.2xx":   uint64(5668),
+		"http_response.3xx":   uint64(8710),
+		"http_response.4xx":   uint64(140),
+		"http_response.5xx":   uint64(0),
+		"http_response.other": uint64(0),
+		"iid":       uint64(4),
+		"last_chk":  "OK",
+		"lastchg":   uint64(1036557),
+		"lastsess":  int64(1342),
+		"lbtot":     uint64(9481),
+		"mode":      "http",
+		"pid":       uint64(1),
+		"qcur":      uint64(0),
+		"qmax":      uint64(0),
+		"qtime":     uint64(1268),
+		"rate":      uint64(0),
+		"rate_max":  uint64(2),
+		"rtime":     uint64(2908),
+		"sid":       uint64(1),
+		"scur":      uint64(0),
+		"slim":      uint64(2),
+		"smax":      uint64(2),
+		"srv_abort": uint64(0),
+		"status":    "UP",
+		"stot":      uint64(14539),
+		"ttime":     uint64(4500),
+		"weight":    uint64(1),
+		"wredis":    uint64(0),
+		"wretr":     uint64(0),
 	}
 	return fields
 }
 
+// Can obtain from official haproxy demo: 'http://demo.haproxy.org/;csv'
 const csvOutputSample = `
-# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,comp_in,comp_out,comp_byp,comp_rsp,lastsess,last_chk,last_agt,qtime,ctime,rtime,ttime,
-fe_app,FRONTEND,,81,288,713,2000,1094063,5557055817,24096715169,1102,80,95740,,,17,19,OPEN,,,,,,,,,2,16,113,13,114,,0,18,0,102,,,,0,1314093,537036,123452,11966,1360,,35,140,1987928,,,0,0,0,0,,,,,,,,
-be_static,host0,0,0,0,3,,3209,1141294,17389596,,0,,0,0,0,0,no check,1,1,0,,,,,,2,17,1,,3209,,2,0,,7,,,,0,218,1497,1494,0,0,0,,,,0,0,,,,,2,,,0,2,23,545,
-be_static,BACKEND,0,0,0,3,200,3209,1141294,17389596,0,0,,0,0,0,0,UP,1,1,0,,0,70698,0,,2,17,0,,3209,,1,0,,7,,,,0,218,1497,1494,0,0,,,,,0,0,0,0,0,0,2,,,0,2,23,545,
-be_static,host0,0,0,0,1,,28,17313,466003,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,1,,28,,2,0,,1,L4OK,,1,0,17,6,5,0,0,0,,,,0,0,,,,,2103,,,0,1,1,36,
-be_static,host4,0,0,0,1,,28,15358,1281073,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,2,,28,,2,0,,1,L4OK,,1,0,20,5,3,0,0,0,,,,0,0,,,,,2076,,,0,1,1,54,
-be_static,host5,0,0,0,1,,28,17547,1970404,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,3,,28,,2,0,,1,L4OK,,0,0,20,5,3,0,0,0,,,,0,0,,,,,1495,,,0,1,1,53,
-be_static,host6,0,0,0,1,,28,14105,1328679,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,4,,28,,2,0,,1,L4OK,,0,0,18,8,2,0,0,0,,,,0,0,,,,,1418,,,0,0,1,49,
-be_static,host7,0,0,0,1,,28,15258,1965185,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,5,,28,,2,0,,1,L4OK,,0,0,17,8,3,0,0,0,,,,0,0,,,,,935,,,0,0,1,28,
-be_static,host8,0,0,0,1,,28,12934,1034779,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,6,,28,,2,0,,1,L4OK,,0,0,17,9,2,0,0,0,,,,0,0,,,,,582,,,0,1,1,66,
-be_static,host9,0,0,0,1,,28,13434,134063,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,7,,28,,2,0,,1,L4OK,,0,0,17,8,3,0,0,0,,,,0,0,,,,,539,,,0,0,1,80,
-be_static,host1,0,0,0,1,,28,7873,1209688,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,8,,28,,2,0,,1,L4OK,,0,0,22,6,0,0,0,0,,,,0,0,,,,,487,,,0,0,1,36,
-be_static,host2,0,0,0,1,,28,13830,1085929,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,9,,28,,2,0,,1,L4OK,,0,0,19,6,3,0,0,0,,,,0,0,,,,,338,,,0,1,1,38,
-be_static,host3,0,0,0,1,,28,17959,1259760,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,10,,28,,2,0,,1,L4OK,,1,0,20,6,2,0,0,0,,,,0,0,,,,,92,,,0,1,1,17,
-be_static,BACKEND,0,0,0,2,200,307,160276,13322728,0,0,,0,0,0,0,UP,11,11,0,,0,70698,0,,2,18,0,,307,,1,0,,4,,,,0,205,73,29,0,0,,,,,0,0,0,0,0,0,92,,,0,1,3,381,
-be_app,host0,0,0,1,32,32,171014,510913516,2193856571,,0,,0,1,1,0,UP,100,1,0,1,0,70698,0,,2,19,1,,171013,,2,3,,12,L7OK,301,10,0,119534,48051,2345,1056,0,0,,,,73,1,,,,,0,Moved Permanently,,0,2,312,2341,
-be_app,host4,0,0,2,29,32,171013,499318742,2195595896,12,34,,0,2,0,0,UP,100,1,0,2,0,70698,0,,2,19,2,,171013,,2,3,,12,L7OK,301,12,0,119572,47882,2441,1088,0,0,,,,84,2,,,,,0,Moved Permanently,,0,2,316,2355,
+# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,comp_in,comp_out,comp_byp,comp_rsp,lastsess,last_chk,last_agt,qtime,ctime,rtime,ttime,agent_status,agent_code,agent_duration,check_desc,agent_desc,check_rise,check_fall,check_health,agent_rise,agent_fall,agent_health,addr,cookie,mode,algo,conn_rate,conn_rate_max,conn_tot,intercepted,dcon,dses,
+http-in,FRONTEND,,,3,100,100,2639994,813557487,65937668635,505252,0,47567,,,,,OPEN,,,,,,,,,1,2,0,,,,0,1,0,157,,,,0,1514640,606647,136264,496535,14948,,1,155,2754255,,,36370569635,17435137766,0,642264,,,,,,,,,,,,,,,,,,,,,http,,1,157,2649922,339471,0,0,
+http-in,IPv4-direct,,,3,41,100,349801,57445827,1503928881,269899,0,287,,,,,OPEN,,,,,,,,,1,2,1,,,,3,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,http,,,,,,0,0,
+http-in,IPv4-cached,,,0,33,100,1786155,644395819,57905460294,60511,0,1,,,,,OPEN,,,,,,,,,1,2,2,,,,3,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,http,,,,,,0,0,
+http-in,IPv6-direct,,,0,100,100,325619,92414745,6205208728,3399,0,47279,,,,,OPEN,,,,,,,,,1,2,3,,,,3,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,http,,,,,,0,0,
+http-in,local,,,0,0,100,0,0,0,0,0,0,,,,,OPEN,,,,,,,,,1,2,4,,,,3,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,http,,,,,,0,0,
+http-in,local-https,,,0,5,100,188347,19301096,323070732,171443,0,0,,,,,OPEN,,,,,,,,,1,2,5,,,,3,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,http,,,,,,0,0,
+www,www,0,0,0,20,20,1719698,672044109,64806076656,,0,,0,5285,22,0,UP,1,1,0,561,84,1036557,3356,,1,3,1,,1715117,,2,0,,45,L7OK,200,5,671,1144889,481714,87038,4,0,,,,,105016,167,,,,,5,OK,,0,5,16,1167,,,,Layer7 check passed,,2,3,4,,,,,,http,,,,,,,,
+www,bck,0,0,0,10,10,1483,537137,7544118,,0,,0,0,0,0,UP,1,0,1,4,0,5218087,0,,1,3,2,,1371,,2,0,,17,L7OK,200,2,0,629,99,755,0,0,,,,,16,0,,,,,1036557,OK,,756,1,13,1184,,,,Layer7 check passed,,2,5,6,,,,,,http,,,,,,,,
+www,BACKEND,0,25,0,46,100,1721835,674684790,64813732170,314,0,,130,5285,22,0,UP,1,1,1,,0,5218087,0,,1,3,0,,1716488,,1,0,,45,,,,0,1145518,481813,88664,5719,121,,,,1721835,105172,167,35669268059,17250148556,0,556042,5,,,0,5,16,1167,,,,,,,,,,,,,,http,,,,,,,,
+git,www,0,0,0,2,2,14539,5228218,303747244,,0,,0,21,0,0,UP,1,1,0,559,84,1036557,3352,,1,4,1,,9481,,2,0,,2,L7OK,200,3,0,5668,8710,140,0,0,,,,,690,0,,,,,1342,OK,,1268,1,2908,4500,,,,Layer7 check passed,,2,3,4,,,,,,http,,,,,,,,
+git,bck,0,0,0,0,2,0,0,0,,0,,0,0,0,0,UP,1,0,1,2,0,5218087,0,,1,4,2,,0,,2,0,,0,L7OK,200,2,0,0,0,0,0,0,,,,,0,0,,,,,-1,OK,,0,0,0,0,,,,Layer7 check passed,,2,3,4,,,,,,http,,,,,,,,
+git,BACKEND,0,6,0,8,2,14541,8082393,303747668,0,0,,2,21,0,0,UP,1,1,1,,0,5218087,0,,1,4,0,,9481,,1,0,,7,,,,0,5668,8710,140,23,0,,,,14541,690,0,133458298,38104818,0,4379,1342,,,1268,1,2908,4500,,,,,,,,,,,,,,http,,,,,,,,
+demo,BACKEND,0,0,1,5,20,24063,7876647,659864417,48,0,,1,0,0,0,UP,0,0,0,,0,5218087,,,1,17,0,,0,,1,1,,26,,,,0,23983,21,0,1,57,,,,24062,111,0,567843278,146884392,0,1083,0,,,2706,0,0,887,,,,,,,,,,,,,,http,,,,,,,,
 `

From 0ce44648cfa6d358f559a90e259dd4fd105a5c30 Mon Sep 17 00:00:00 2001
From: Yaron de Leeuw 
Date: Thu, 2 Feb 2017 09:12:22 -0500
Subject: [PATCH 0112/1302] Procstat: don't cache PIDs (#2206)

* Procstat: don't cache PIDs

Changed the procstat input plugin to not cache PIDs. Solves #1636.
The logic of creating a process by pid was moved from `procstat.go` to
`spec_processor.go`.

* Procstat: go fmt

* procstat: modify changelog for #2206
---
 CHANGELOG.md                              |  2 +
 plugins/inputs/procstat/procstat.go       | 44 +++----------------
 plugins/inputs/procstat/procstat_test.go  |  2 -
 plugins/inputs/procstat/spec_processor.go | 53 +++++++++++++----------
 4 files changed, 40 insertions(+), 61 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2e348ad47..41ef6e488 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -45,6 +45,8 @@ It is highly recommended that all users migrate to the new riemann output plugin
 
 - [#2077](https://github.com/influxdata/telegraf/issues/2077): SQL Server Input - Arithmetic overflow error converting numeric to data type int.
 - [#2262](https://github.com/influxdata/telegraf/issues/2262): Flush jitter can inhibit metric collection.
+- [#2287](https://github.com/influxdata/telegraf/issues/2287): Kubernetes input: Handle null startTime for stopped pods
+- [#1636](https://github.com/influxdata/telegraf/issues/1636): procstat - stop caching PIDs.
 - [#2318](https://github.com/influxdata/telegraf/issues/2318): haproxy input - Add missing fields.
 - [#2287](https://github.com/influxdata/telegraf/issues/2287): Kubernetes input: Handle null startTime for stopped pods.
 - [#2356](https://github.com/influxdata/telegraf/issues/2356): cpu input panic when /proc/stat is empty.
diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go
index 929490e4a..565d0ebd1 100644
--- a/plugins/inputs/procstat/procstat.go
+++ b/plugins/inputs/procstat/procstat.go
@@ -8,8 +8,6 @@ import (
 	"strconv"
 	"strings"
 
-	"github.com/shirou/gopsutil/process"
-
 	"github.com/influxdata/telegraf"
 	"github.com/influxdata/telegraf/plugins/inputs"
 )
@@ -23,15 +21,12 @@ type Procstat struct {
 	User        string
 	PidTag      bool
 
-	// pidmap maps a pid to a process object, so we don't recreate every gather
-	pidmap map[int32]*process.Process
 	// tagmap maps a pid to a map of tags for that pid
 	tagmap map[int32]map[string]string
 }
 
 func NewProcstat() *Procstat {
 	return &Procstat{
-		pidmap: make(map[int32]*process.Process),
 		tagmap: make(map[int32]map[string]string),
 	}
 }
@@ -67,51 +62,26 @@ func (_ *Procstat) Description() string {
 }
 
 func (p *Procstat) Gather(acc telegraf.Accumulator) error {
-	err := p.createProcesses()
+	pids, err := p.getAllPids()
 	if err != nil {
 		log.Printf("E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s",
 			p.Exe, p.PidFile, p.Pattern, p.User, err.Error())
 	} else {
-		for pid, proc := range p.pidmap {
+		for _, pid := range pids {
 			if p.PidTag {
 				p.tagmap[pid]["pid"] = fmt.Sprint(pid)
 			}
-			p := NewSpecProcessor(p.ProcessName, p.Prefix, pid, acc, proc, p.tagmap[pid])
-			p.pushMetrics()
+			p := NewSpecProcessor(p.ProcessName, p.Prefix, pid, acc, p.tagmap[pid])
+			err := p.pushMetrics()
+			if err != nil {
+				log.Printf("E! Error: procstat: %s", err.Error())
+			}
 		}
 	}
 
 	return nil
 }
 
-func (p *Procstat) createProcesses() error {
-	var errstring string
-	var outerr error
-
-	pids, err := p.getAllPids()
-	if err != nil {
-		errstring += err.Error() + " "
-	}
-
-	for _, pid := range pids {
-		_, ok := p.pidmap[pid]
-		if !ok {
-			proc, err := process.NewProcess(pid)
-			if err == nil {
-				p.pidmap[pid] = proc
-			} else {
-				errstring += err.Error() + " "
-			}
-		}
-	}
-
-	if errstring != "" {
-		outerr = fmt.Errorf("%s", errstring)
-	}
-
-	return outerr
-}
-
 func (p *Procstat) getAllPids() ([]int32, error) {
 	var pids []int32
 	var err error
diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go
index ccc72bdbb..001537178 100644
--- a/plugins/inputs/procstat/procstat_test.go
+++ b/plugins/inputs/procstat/procstat_test.go
@@ -6,7 +6,6 @@ import (
 	"strconv"
 	"testing"
 
-	"github.com/shirou/gopsutil/process"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 
@@ -24,7 +23,6 @@ func TestGather(t *testing.T) {
 	p := Procstat{
 		PidFile: file.Name(),
 		Prefix:  "foo",
-		pidmap:  make(map[int32]*process.Process),
 		tagmap:  make(map[int32]map[string]string),
 	}
 	p.Gather(&acc)
diff --git a/plugins/inputs/procstat/spec_processor.go b/plugins/inputs/procstat/spec_processor.go
index 3b56fbc3e..1b9f63126 100644
--- a/plugins/inputs/procstat/spec_processor.go
+++ b/plugins/inputs/procstat/spec_processor.go
@@ -1,6 +1,7 @@
 package procstat
 
 import (
+	"fmt"
 	"time"
 
 	"github.com/shirou/gopsutil/process"
@@ -9,12 +10,13 @@ import (
 )
 
 type SpecProcessor struct {
-	Prefix string
-	pid    int32
-	tags   map[string]string
-	fields map[string]interface{}
-	acc    telegraf.Accumulator
-	proc   *process.Process
+	ProcessName string
+	Prefix      string
+	pid         int32
+	tags        map[string]string
+	fields      map[string]interface{}
+	acc         telegraf.Accumulator
+	proc        *process.Process
 }
 
 func NewSpecProcessor(
@@ -22,29 +24,35 @@ func NewSpecProcessor(
 	prefix string,
 	pid int32,
 	acc telegraf.Accumulator,
-	p *process.Process,
 	tags map[string]string,
 ) *SpecProcessor {
-	if processName != "" {
-		tags["process_name"] = processName
-	} else {
-		name, err := p.Name()
-		if err == nil {
-			tags["process_name"] = name
-		}
-	}
 	return &SpecProcessor{
-		Prefix: prefix,
-		pid:    pid,
-		tags:   tags,
-		fields: make(map[string]interface{}),
-		acc:    acc,
-		proc:   p,
+		ProcessName: processName,
+		Prefix:      prefix,
+		pid:         pid,
+		tags:        tags,
+		fields:      make(map[string]interface{}),
+		acc:         acc,
 	}
 }
 
-func (p *SpecProcessor) pushMetrics() {
+func (p *SpecProcessor) pushMetrics() error {
 	var prefix string
+	proc, err := process.NewProcess(p.pid)
+	if err != nil {
+		return fmt.Errorf("Failed to open process with pid '%d'. Error: '%s'",
+			p.pid, err)
+	}
+	p.proc = proc
+	if p.ProcessName != "" {
+		p.tags["process_name"] = p.ProcessName
+	} else {
+		name, err := p.proc.Name()
+		if err == nil {
+			p.tags["process_name"] = name
+		}
+	}
+
 	if p.Prefix != "" {
 		prefix = p.Prefix + "_"
 	}
@@ -107,4 +115,5 @@ func (p *SpecProcessor) pushMetrics() {
 	}
 
 	p.acc.AddFields("procstat", fields, p.tags)
+	return nil
 }

From b3537ef2a8e56f9647c1c281d88e092b651c392c Mon Sep 17 00:00:00 2001
From: Patrick Hemmer 
Date: Thu, 2 Feb 2017 11:24:03 -0500
Subject: [PATCH 0113/1302] add socket listener & writer (#2094)

closes #1516
closes #1711
closes #1721
closes #1526
---
 CHANGELOG.md                                  |   1 +
 README.md                                     |   2 +
 plugins/inputs/all/all.go                     |   1 +
 .../inputs/socket_listener/socket_listener.go | 240 ++++++++++++++++++
 .../socket_listener/socket_listener_test.go   | 122 +++++++++
 plugins/outputs/all/all.go                    |   1 +
 .../outputs/socket_writer/socket_writer.go    | 106 ++++++++
 .../socket_writer/socket_writer_test.go       | 187 ++++++++++++++
 testutil/accumulator.go                       |  17 +-
 9 files changed, 675 insertions(+), 2 deletions(-)
 create mode 100644 plugins/inputs/socket_listener/socket_listener.go
 create mode 100644 plugins/inputs/socket_listener/socket_listener_test.go
 create mode 100644 plugins/outputs/socket_writer/socket_writer.go
 create mode 100644 plugins/outputs/socket_writer/socket_writer_test.go

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 41ef6e488..7ec1b5737 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -110,6 +110,7 @@ plugins, not just statsd.
 - [#1980](https://github.com/influxdata/telegraf/issues/1980): Hide username/password from elasticsearch error log messages.
 - [#2097](https://github.com/influxdata/telegraf/issues/2097): Configurable HTTP timeouts in Jolokia plugin
 - [#2255](https://github.com/influxdata/telegraf/pull/2255): Allow changing jolokia attribute delimiter
+- [#2094](https://github.com/influxdata/telegraf/pull/2094): Add generic socket listener & writer.
 
 ### Bugfixes
 
diff --git a/README.md b/README.md
index f8a465591..9b8a9ddd1 100644
--- a/README.md
+++ b/README.md
@@ -182,6 +182,7 @@ Telegraf can also collect metrics via the following service plugins:
 * [nsq_consumer](./plugins/inputs/nsq_consumer)
 * [logparser](./plugins/inputs/logparser)
 * [statsd](./plugins/inputs/statsd)
+* [socket_listener](./plugins/inputs/socket_listener)
 * [tail](./plugins/inputs/tail)
 * [tcp_listener](./plugins/inputs/tcp_listener)
 * [udp_listener](./plugins/inputs/udp_listener)
@@ -219,6 +220,7 @@ Telegraf can also collect metrics via the following service plugins:
 * [nsq](./plugins/outputs/nsq)
 * [opentsdb](./plugins/outputs/opentsdb)
 * [prometheus](./plugins/outputs/prometheus_client)
+* [socket_writer](./plugins/outputs/socket_writer)
 * [riemann](./plugins/outputs/riemann)
 * [riemann_legacy](./plugins/outputs/riemann_legacy)
 
diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go
index 7846f8c9a..924dffe3d 100644
--- a/plugins/inputs/all/all.go
+++ b/plugins/inputs/all/all.go
@@ -66,6 +66,7 @@ import (
 	_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
 	_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
 	_ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy"
+	_ "github.com/influxdata/telegraf/plugins/inputs/socket_listener"
 	_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
 	_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
 	_ "github.com/influxdata/telegraf/plugins/inputs/sysstat"
diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go
new file mode 100644
index 000000000..9d3a8e1fe
--- /dev/null
+++ b/plugins/inputs/socket_listener/socket_listener.go
@@ -0,0 +1,240 @@
+package socket_listener
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"log"
+	"net"
+	"strings"
+	"sync"
+
+	"github.com/influxdata/telegraf"
+	"github.com/influxdata/telegraf/plugins/inputs"
+	"github.com/influxdata/telegraf/plugins/parsers"
+)
+
+type setReadBufferer interface {
+	SetReadBuffer(bytes int) error
+}
+
+type streamSocketListener struct {
+	net.Listener
+	*SocketListener
+
+	connections    map[string]net.Conn
+	connectionsMtx sync.Mutex
+}
+
+func (ssl *streamSocketListener) listen() {
+	ssl.connections = map[string]net.Conn{}
+
+	for {
+		c, err := ssl.Accept()
+		if err != nil {
+			ssl.AddError(err)
+			break
+		}
+
+		ssl.connectionsMtx.Lock()
+		if ssl.MaxConnections > 0 && len(ssl.connections) >= ssl.MaxConnections {
+			ssl.connectionsMtx.Unlock()
+			c.Close()
+			continue
+		}
+		ssl.connections[c.RemoteAddr().String()] = c
+		ssl.connectionsMtx.Unlock()
+		go ssl.read(c)
+	}
+
+	ssl.connectionsMtx.Lock()
+	for _, c := range ssl.connections {
+		c.Close()
+	}
+	ssl.connectionsMtx.Unlock()
+}
+
+func (ssl *streamSocketListener) removeConnection(c net.Conn) {
+	ssl.connectionsMtx.Lock()
+	delete(ssl.connections, c.RemoteAddr().String())
+	ssl.connectionsMtx.Unlock()
+}
+
+func (ssl *streamSocketListener) read(c net.Conn) {
+	defer ssl.removeConnection(c)
+	defer c.Close()
+
+	scnr := bufio.NewScanner(c)
+	for scnr.Scan() {
+		metrics, err := ssl.Parse(scnr.Bytes())
+		if err != nil {
+			ssl.AddError(fmt.Errorf("unable to parse incoming line"))
+			//TODO rate limit
+			continue
+		}
+		for _, m := range metrics {
+			ssl.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
+		}
+	}
+
+	if err := scnr.Err(); err != nil {
+		ssl.AddError(err)
+	}
+}
+
+type packetSocketListener struct {
+	net.PacketConn
+	*SocketListener
+}
+
+func (psl *packetSocketListener) listen() {
+	buf := make([]byte, 64*1024) // 64kb - maximum size of IP packet
+	for {
+		n, _, err := psl.ReadFrom(buf)
+		if err != nil {
+			psl.AddError(err)
+			break
+		}
+
+		metrics, err := psl.Parse(buf[:n])
+		if err != nil {
+			psl.AddError(fmt.Errorf("unable to parse incoming packet"))
+			//TODO rate limit
+			continue
+		}
+		for _, m := range metrics {
+			psl.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
+		}
+	}
+}
+
+type SocketListener struct {
+	ServiceAddress string
+	MaxConnections int
+	ReadBufferSize int
+
+	parsers.Parser
+	telegraf.Accumulator
+	io.Closer
+}
+
+func (sl *SocketListener) Description() string {
+	return "Generic socket listener capable of handling multiple socket types."
+}
+
+func (sl *SocketListener) SampleConfig() string {
+	return `
+  ## URL to listen on
+  # service_address = "tcp://:8094"
+  # service_address = "tcp://127.0.0.1:http"
+  # service_address = "tcp4://:8094"
+  # service_address = "tcp6://:8094"
+  # service_address = "tcp6://[2001:db8::1]:8094"
+  # service_address = "udp://:8094"
+  # service_address = "udp4://:8094"
+  # service_address = "udp6://:8094"
+  # service_address = "unix:///tmp/telegraf.sock"
+  # service_address = "unixgram:///tmp/telegraf.sock"
+
+  ## Maximum number of concurrent connections.
+  ## Only applies to stream sockets (e.g. TCP).
+  ## 0 (default) is unlimited.
+  # max_connections = 1024
+
+  ## Maximum socket buffer size in bytes.
+  ## For stream sockets, once the buffer fills up, the sender will start backing up.
+  ## For datagram sockets, once the buffer fills up, metrics will start dropping.
+  ## Defaults to the OS default.
+  # read_buffer_size = 65535
+
+  ## Data format to consume.
+  ## Each data format has it's own unique set of configuration options, read
+  ## more about them here:
+  ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+  # data_format = "influx"
+`
+}
+
+func (sl *SocketListener) Gather(_ telegraf.Accumulator) error {
+	return nil
+}
+
+func (sl *SocketListener) SetParser(parser parsers.Parser) {
+	sl.Parser = parser
+}
+
+func (sl *SocketListener) Start(acc telegraf.Accumulator) error {
+	sl.Accumulator = acc
+	spl := strings.SplitN(sl.ServiceAddress, "://", 2)
+	if len(spl) != 2 {
+		return fmt.Errorf("invalid service address: %s", sl.ServiceAddress)
+	}
+
+	switch spl[0] {
+	case "tcp", "tcp4", "tcp6", "unix", "unixpacket":
+		l, err := net.Listen(spl[0], spl[1])
+		if err != nil {
+			return err
+		}
+
+		if sl.ReadBufferSize > 0 {
+			if srb, ok := l.(setReadBufferer); ok {
+				srb.SetReadBuffer(sl.ReadBufferSize)
+			} else {
+				log.Printf("W! Unable to set read buffer on a %s socket", spl[0])
+			}
+		}
+
+		ssl := &streamSocketListener{
+			Listener:       l,
+			SocketListener: sl,
+		}
+
+		sl.Closer = ssl
+		go ssl.listen()
+	case "udp", "udp4", "udp6", "ip", "ip4", "ip6", "unixgram":
+		pc, err := net.ListenPacket(spl[0], spl[1])
+		if err != nil {
+			return err
+		}
+
+		if sl.ReadBufferSize > 0 {
+			if srb, ok := pc.(setReadBufferer); ok {
+				srb.SetReadBuffer(sl.ReadBufferSize)
+			} else {
+				log.Printf("W! Unable to set read buffer on a %s socket", spl[0])
+			}
+		}
+
+		psl := &packetSocketListener{
+			PacketConn:     pc,
+			SocketListener: sl,
+		}
+
+		sl.Closer = psl
+		go psl.listen()
+	default:
+		return fmt.Errorf("unknown protocol '%s' in '%s'", spl[0], sl.ServiceAddress)
+	}
+
+	return nil
+}
+
+func (sl *SocketListener) Stop() {
+	if sl.Closer != nil {
+		sl.Close()
+		sl.Closer = nil
+	}
+}
+
+func newSocketListener() *SocketListener {
+	parser, _ := parsers.NewInfluxParser()
+
+	return &SocketListener{
+		Parser: parser,
+	}
+}
+
+func init() {
+	inputs.Add("socket_listener", func() telegraf.Input { return newSocketListener() })
+}
diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go
new file mode 100644
index 000000000..6764b6d2d
--- /dev/null
+++ b/plugins/inputs/socket_listener/socket_listener_test.go
@@ -0,0 +1,122 @@
+package socket_listener
+
+import (
+	"net"
+	"os"
+	"testing"
+	"time"
+
+	"github.com/influxdata/telegraf/testutil"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func TestSocketListener_tcp(t *testing.T) {
+	sl := newSocketListener()
+	sl.ServiceAddress = "tcp://127.0.0.1:0"
+
+	acc := &testutil.Accumulator{}
+	err := sl.Start(acc)
+	require.NoError(t, err)
+
+	client, err := net.Dial("tcp", sl.Closer.(net.Listener).Addr().String())
+	require.NoError(t, err)
+
+	testSocketListener(t, sl, client)
+}
+
+func TestSocketListener_udp(t *testing.T) {
+	sl := newSocketListener()
+	sl.ServiceAddress = "udp://127.0.0.1:0"
+
+	acc := &testutil.Accumulator{}
+	err := sl.Start(acc)
+	require.NoError(t, err)
+
+	client, err := net.Dial("udp", sl.Closer.(net.PacketConn).LocalAddr().String())
+	require.NoError(t, err)
+
+	testSocketListener(t, sl, client)
+}
+
+func TestSocketListener_unix(t *testing.T) {
+	defer os.Remove("/tmp/telegraf_test.sock")
+	sl := newSocketListener()
+	sl.ServiceAddress = "unix:///tmp/telegraf_test.sock"
+
+	acc := &testutil.Accumulator{}
+	err := sl.Start(acc)
+	require.NoError(t, err)
+
+	client, err := net.Dial("unix", "/tmp/telegraf_test.sock")
+	require.NoError(t, err)
+
+	testSocketListener(t, sl, client)
+}
+
+func TestSocketListener_unixgram(t *testing.T) {
+	defer os.Remove("/tmp/telegraf_test.sock")
+	sl := newSocketListener()
+	sl.ServiceAddress = "unixgram:///tmp/telegraf_test.sock"
+
+	acc := &testutil.Accumulator{}
+	err := sl.Start(acc)
+	require.NoError(t, err)
+
+	client, err := net.Dial("unixgram", "/tmp/telegraf_test.sock")
+	require.NoError(t, err)
+
+	testSocketListener(t, sl, client)
+}
+
+func testSocketListener(t *testing.T, sl *SocketListener, client net.Conn) {
+	mstr12 := "test,foo=bar v=1i 123456789\ntest,foo=baz v=2i 123456790\n"
+	mstr3 := "test,foo=zab v=3i 123456791"
+	client.Write([]byte(mstr12))
+	client.Write([]byte(mstr3))
+	if _, ok := client.(net.Conn); ok {
+		// stream connection. needs trailing newline to terminate mstr3
+		client.Write([]byte{'\n'})
+	}
+
+	acc := sl.Accumulator.(*testutil.Accumulator)
+
+	acc.Lock()
+	if len(acc.Metrics) < 1 {
+		acc.Wait()
+	}
+	require.True(t, len(acc.Metrics) >= 1)
+	m := acc.Metrics[0]
+	acc.Unlock()
+
+	assert.Equal(t, "test", m.Measurement)
+	assert.Equal(t, map[string]string{"foo": "bar"}, m.Tags)
+	assert.Equal(t, map[string]interface{}{"v": int64(1)}, m.Fields)
+	assert.True(t, time.Unix(0, 123456789).Equal(m.Time))
+
+	acc.Lock()
+	if len(acc.Metrics) < 2 {
+		acc.Wait()
+	}
+	require.True(t, len(acc.Metrics) >= 2)
+	m = acc.Metrics[1]
+	acc.Unlock()
+
+	assert.Equal(t, "test", m.Measurement)
+	assert.Equal(t, map[string]string{"foo": "baz"}, m.Tags)
+	assert.Equal(t, map[string]interface{}{"v": int64(2)}, m.Fields)
+	assert.True(t, time.Unix(0, 123456790).Equal(m.Time))
+
+	acc.Lock()
+	if len(acc.Metrics) < 3 {
+		acc.Wait()
+	}
+	require.True(t, len(acc.Metrics) >= 3)
+	m = acc.Metrics[2]
+	acc.Unlock()
+
+	assert.Equal(t, "test", m.Measurement)
+	assert.Equal(t, map[string]string{"foo": "zab"}, m.Tags)
+	assert.Equal(t, map[string]interface{}{"v": int64(3)}, m.Fields)
+	assert.True(t, time.Unix(0, 123456791).Equal(m.Time))
+}
diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go
index c10e00f78..eec2b95e3 100644
--- a/plugins/outputs/all/all.go
+++ b/plugins/outputs/all/all.go
@@ -21,4 +21,5 @@ import (
 	_ "github.com/influxdata/telegraf/plugins/outputs/prometheus_client"
 	_ "github.com/influxdata/telegraf/plugins/outputs/riemann"
 	_ "github.com/influxdata/telegraf/plugins/outputs/riemann_legacy"
+	_ "github.com/influxdata/telegraf/plugins/outputs/socket_writer"
 )
diff --git a/plugins/outputs/socket_writer/socket_writer.go b/plugins/outputs/socket_writer/socket_writer.go
new file mode 100644
index 000000000..2c54bb0bb
--- /dev/null
+++ b/plugins/outputs/socket_writer/socket_writer.go
@@ -0,0 +1,106 @@
+package socket_writer
+
+import (
+	"fmt"
+	"net"
+	"strings"
+
+	"github.com/influxdata/telegraf"
+	"github.com/influxdata/telegraf/plugins/outputs"
+	"github.com/influxdata/telegraf/plugins/serializers"
+)
+
+type SocketWriter struct {
+	Address string
+
+	serializers.Serializer
+
+	net.Conn
+}
+
+func (sw *SocketWriter) Description() string {
+	return "Generic socket writer capable of handling multiple socket types."
+}
+
+func (sw *SocketWriter) SampleConfig() string {
+	return `
+  ## URL to connect to
+  # address = "tcp://127.0.0.1:8094"
+  # address = "tcp://example.com:http"
+  # address = "tcp4://127.0.0.1:8094"
+  # address = "tcp6://127.0.0.1:8094"
+  # address = "tcp6://[2001:db8::1]:8094"
+  # address = "udp://127.0.0.1:8094"
+  # address = "udp4://127.0.0.1:8094"
+  # address = "udp6://127.0.0.1:8094"
+  # address = "unix:///tmp/telegraf.sock"
+  # address = "unixgram:///tmp/telegraf.sock"
+
+  ## Data format to generate.
+  ## Each data format has it's own unique set of configuration options, read
+  ## more about them here:
+  ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+  # data_format = "influx"
+`
+}
+
+func (sw *SocketWriter) SetSerializer(s serializers.Serializer) {
+	sw.Serializer = s
+}
+
+func (sw *SocketWriter) Connect() error {
+	spl := strings.SplitN(sw.Address, "://", 2)
+	if len(spl) != 2 {
+		return fmt.Errorf("invalid address: %s", sw.Address)
+	}
+
+	c, err := net.Dial(spl[0], spl[1])
+	if err != nil {
+		return err
+	}
+
+	sw.Conn = c
+	return nil
+}
+
+// Write writes the given metrics to the destination.
+// If an error is encountered, it is up to the caller to retry the same write again later.
+// Not parallel safe.
+func (sw *SocketWriter) Write(metrics []telegraf.Metric) error {
+	if sw.Conn == nil {
+		// previous write failed with permanent error and socket was closed.
+		if err := sw.Connect(); err != nil {
+			return err
+		}
+	}
+
+	for _, m := range metrics {
+		bs, err := sw.Serialize(m)
+		if err != nil {
+			//TODO log & keep going with remaining metrics
+			return err
+		}
+		if _, err := sw.Conn.Write(bs); err != nil {
+			//TODO log & keep going with remaining strings
+			if err, ok := err.(net.Error); !ok || !err.Temporary() {
+				// permanent error. close the connection
+				sw.Close()
+				sw.Conn = nil
+			}
+			return err
+		}
+	}
+
+	return nil
+}
+
+func newSocketWriter() *SocketWriter {
+	s, _ := serializers.NewInfluxSerializer()
+	return &SocketWriter{
+		Serializer: s,
+	}
+}
+
+func init() {
+	outputs.Add("socket_writer", func() telegraf.Output { return newSocketWriter() })
+}
diff --git a/plugins/outputs/socket_writer/socket_writer_test.go b/plugins/outputs/socket_writer/socket_writer_test.go
new file mode 100644
index 000000000..3ab9d1e34
--- /dev/null
+++ b/plugins/outputs/socket_writer/socket_writer_test.go
@@ -0,0 +1,187 @@
+package socket_writer
+
+import (
+	"bufio"
+	"bytes"
+	"net"
+	"os"
+	"sync"
+	"testing"
+
+	"github.com/influxdata/telegraf"
+	"github.com/influxdata/telegraf/testutil"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func TestSocketWriter_tcp(t *testing.T) {
+	listener, err := net.Listen("tcp", "127.0.0.1:0")
+	require.NoError(t, err)
+
+	sw := newSocketWriter()
+	sw.Address = "tcp://" + listener.Addr().String()
+
+	err = sw.Connect()
+	require.NoError(t, err)
+
+	lconn, err := listener.Accept()
+	require.NoError(t, err)
+
+	testSocketWriter_stream(t, sw, lconn)
+}
+
+func TestSocketWriter_udp(t *testing.T) {
+	listener, err := net.ListenPacket("udp", "127.0.0.1:0")
+	require.NoError(t, err)
+
+	sw := newSocketWriter()
+	sw.Address = "udp://" + listener.LocalAddr().String()
+
+	err = sw.Connect()
+	require.NoError(t, err)
+
+	testSocketWriter_packet(t, sw, listener)
+}
+
+func TestSocketWriter_unix(t *testing.T) {
+	defer os.Remove("/tmp/telegraf_test.sock")
+	listener, err := net.Listen("unix", "/tmp/telegraf_test.sock")
+	require.NoError(t, err)
+
+	sw := newSocketWriter()
+	sw.Address = "unix:///tmp/telegraf_test.sock"
+
+	err = sw.Connect()
+	require.NoError(t, err)
+
+	lconn, err := listener.Accept()
+	require.NoError(t, err)
+
+	testSocketWriter_stream(t, sw, lconn)
+}
+
+func TestSocketWriter_unixgram(t *testing.T) {
+	defer os.Remove("/tmp/telegraf_test.sock")
+	listener, err := net.ListenPacket("unixgram", "/tmp/telegraf_test.sock")
+	require.NoError(t, err)
+
+	sw := newSocketWriter()
+	sw.Address = "unixgram:///tmp/telegraf_test.sock"
+
+	err = sw.Connect()
+	require.NoError(t, err)
+
+	testSocketWriter_packet(t, sw, listener)
+}
+
+func testSocketWriter_stream(t *testing.T, sw *SocketWriter, lconn net.Conn) {
+	metrics := []telegraf.Metric{}
+	metrics = append(metrics, testutil.TestMetric(1, "test"))
+	mbs1out, _ := sw.Serialize(metrics[0])
+	metrics = append(metrics, testutil.TestMetric(2, "test"))
+	mbs2out, _ := sw.Serialize(metrics[1])
+
+	err := sw.Write(metrics)
+	require.NoError(t, err)
+
+	scnr := bufio.NewScanner(lconn)
+	require.True(t, scnr.Scan())
+	mstr1in := scnr.Text() + "\n"
+	require.True(t, scnr.Scan())
+	mstr2in := scnr.Text() + "\n"
+
+	assert.Equal(t, string(mbs1out), mstr1in)
+	assert.Equal(t, string(mbs2out), mstr2in)
+}
+
+func testSocketWriter_packet(t *testing.T, sw *SocketWriter, lconn net.PacketConn) {
+	metrics := []telegraf.Metric{}
+	metrics = append(metrics, testutil.TestMetric(1, "test"))
+	mbs1out, _ := sw.Serialize(metrics[0])
+	metrics = append(metrics, testutil.TestMetric(2, "test"))
+	mbs2out, _ := sw.Serialize(metrics[1])
+
+	err := sw.Write(metrics)
+	require.NoError(t, err)
+
+	buf := make([]byte, 256)
+	var mstrins []string
+	for len(mstrins) < 2 {
+		n, _, err := lconn.ReadFrom(buf)
+		require.NoError(t, err)
+		for _, bs := range bytes.Split(buf[:n], []byte{'\n'}) {
+			if len(bs) == 0 {
+				continue
+			}
+			mstrins = append(mstrins, string(bs)+"\n")
+		}
+	}
+	require.Len(t, mstrins, 2)
+
+	assert.Equal(t, string(mbs1out), mstrins[0])
+	assert.Equal(t, string(mbs2out), mstrins[1])
+}
+
+func TestSocketWriter_Write_err(t *testing.T) {
+	listener, err := net.Listen("tcp", "127.0.0.1:0")
+	require.NoError(t, err)
+
+	sw := newSocketWriter()
+	sw.Address = "tcp://" + listener.Addr().String()
+
+	err = sw.Connect()
+	require.NoError(t, err)
+	sw.Conn.(*net.TCPConn).SetReadBuffer(256)
+
+	lconn, err := listener.Accept()
+	require.NoError(t, err)
+	lconn.(*net.TCPConn).SetWriteBuffer(256)
+
+	metrics := []telegraf.Metric{testutil.TestMetric(1, "testerr")}
+
+	// close the socket to generate an error
+	lconn.Close()
+	sw.Close()
+	err = sw.Write(metrics)
+	require.Error(t, err)
+	assert.Nil(t, sw.Conn)
+}
+
+func TestSocketWriter_Write_reconnect(t *testing.T) {
+	listener, err := net.Listen("tcp", "127.0.0.1:0")
+	require.NoError(t, err)
+
+	sw := newSocketWriter()
+	sw.Address = "tcp://" + listener.Addr().String()
+
+	err = sw.Connect()
+	require.NoError(t, err)
+	sw.Conn.(*net.TCPConn).SetReadBuffer(256)
+
+	lconn, err := listener.Accept()
+	require.NoError(t, err)
+	lconn.(*net.TCPConn).SetWriteBuffer(256)
+	lconn.Close()
+	sw.Conn = nil
+
+	wg := sync.WaitGroup{}
+	wg.Add(1)
+	var lerr error
+	go func() {
+		lconn, lerr = listener.Accept()
+		wg.Done()
+	}()
+
+	metrics := []telegraf.Metric{testutil.TestMetric(1, "testerr")}
+	err = sw.Write(metrics)
+	require.NoError(t, err)
+
+	wg.Wait()
+	assert.NoError(t, lerr)
+
+	mbsout, _ := sw.Serialize(metrics[0])
+	buf := make([]byte, 256)
+	n, err := lconn.Read(buf)
+	require.NoError(t, err)
+	assert.Equal(t, string(mbsout), string(buf[:n]))
+}
diff --git a/testutil/accumulator.go b/testutil/accumulator.go
index 4f131ec8f..25e60920b 100644
--- a/testutil/accumulator.go
+++ b/testutil/accumulator.go
@@ -29,6 +29,7 @@ func (p *Metric) String() string {
 // Accumulator defines a mocked out accumulator
 type Accumulator struct {
 	sync.Mutex
+	*sync.Cond
 
 	Metrics  []*Metric
 	nMetrics uint64
@@ -56,11 +57,14 @@ func (a *Accumulator) AddFields(
 	timestamp ...time.Time,
 ) {
 	atomic.AddUint64(&a.nMetrics, 1)
+	a.Lock()
+	defer a.Unlock()
+	if a.Cond != nil {
+		a.Cond.Broadcast()
+	}
 	if a.Discard {
 		return
 	}
-	a.Lock()
-	defer a.Unlock()
 	if tags == nil {
 		tags = map[string]string{}
 	}
@@ -171,6 +175,15 @@ func (a *Accumulator) NFields() int {
 	return counter
 }
 
+// Wait waits for a metric to be added to the accumulator.
+// Accumulator must already be locked.
+func (a *Accumulator) Wait() {
+	if a.Cond == nil {
+		a.Cond = sync.NewCond(&a.Mutex)
+	}
+	a.Cond.Wait()
+}
+
 func (a *Accumulator) AssertContainsTaggedFields(
 	t *testing.T,
 	measurement string,

From dfb4038654af897d2213d4d306cba1bf2fae914e Mon Sep 17 00:00:00 2001
From: Nick Irvine 
Date: Fri, 3 Feb 2017 02:02:19 -0800
Subject: [PATCH 0114/1302] Remove pidfile if pidfile was created (#2358)

Also, ensure pidfile perms are 644
---
 cmd/telegraf/telegraf.go | 19 +++++++++++++------
 1 file changed, 13 insertions(+), 6 deletions(-)

diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go
index a3631d38a..16f7845d0 100644
--- a/cmd/telegraf/telegraf.go
+++ b/cmd/telegraf/telegraf.go
@@ -194,14 +194,21 @@ func reloadLoop(
 		log.Printf("I! Tags enabled: %s", c.ListTags())
 
 		if *fPidfile != "" {
-			f, err := os.Create(*fPidfile)
+			f, err := os.OpenFile(*fPidfile, os.O_CREATE|os.O_WRONLY, 0644)
 			if err != nil {
-				log.Fatalf("E! Unable to create pidfile: %s", err)
+				log.Printf("E! Unable to create pidfile: %s", err)
+			} else {
+				fmt.Fprintf(f, "%d\n", os.Getpid())
+
+				f.Close()
+
+				defer func() {
+					err := os.Remove(*fPidfile)
+					if err != nil {
+						log.Printf("E! Unable to remove pidfile: %s", err)
+					}
+				}()
 			}
-
-			fmt.Fprintf(f, "%d\n", os.Getpid())
-
-			f.Close()
 		}
 
 		ag.Run(shutdown)

From 1c4673e90086605909b7cf99a2931c82d67c49cb Mon Sep 17 00:00:00 2001
From: Cameron Sparr 
Date: Fri, 3 Feb 2017 10:04:50 +0000
Subject: [PATCH 0115/1302] changelog update

---
 CHANGELOG.md | 1 +
 1 file changed, 1 insertion(+)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 7ec1b5737..62f693c55 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -51,6 +51,7 @@ It is highly recommended that all users migrate to the new riemann output plugin
 - [#2287](https://github.com/influxdata/telegraf/issues/2287): Kubernetes input: Handle null startTime for stopped pods.
 - [#2356](https://github.com/influxdata/telegraf/issues/2356): cpu input panic when /proc/stat is empty.
 - [#2341](https://github.com/influxdata/telegraf/issues/2341): telegraf swallowing panics in --test mode.
+- [#2358](https://github.com/influxdata/telegraf/pull/2358): Create pidfile with 644 permissions & defer file deletion.
 
 ## v1.2.1 [2017-02-01]
 

From b1945c0493a482aff480610fc86071abd2b33ad3 Mon Sep 17 00:00:00 2001
From: Cosmo Petrich 
Date: Mon, 30 Jan 2017 09:28:52 +1100
Subject: [PATCH 0116/1302] Increment gather_errors for all input errors

closes #2339
---
 CHANGELOG.md   | 1 +
 agent/agent.go | 8 ++++----
 2 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 62f693c55..3cdd544d0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -40,6 +40,7 @@ It is highly recommended that all users migrate to the new riemann output plugin
 - [#2277](https://github.com/influxdata/telegraf/pull/2277): add integer metrics for Consul check health state.
 - [#2201](https://github.com/influxdata/telegraf/pull/2201): Add lock option to the IPtables input plugin.
 - [#2244](https://github.com/influxdata/telegraf/pull/2244): Support ipmi_sensor plugin querying local ipmi sensors.
+- [#2339](https://github.com/influxdata/telegraf/pull/2339): Increment gather_errors for all errors emitted by inputs.
 
 ### Bugfixes
 
diff --git a/agent/agent.go b/agent/agent.go
index a9e42643a..192e8c112 100644
--- a/agent/agent.go
+++ b/agent/agent.go
@@ -157,13 +157,13 @@ func gatherWithTimeout(
 		select {
 		case err := <-done:
 			if err != nil {
-				log.Printf("E! ERROR in input [%s]: %s", input.Name(), err)
+				acc.AddError(err)
 			}
 			return
 		case <-ticker.C:
-			log.Printf("E! ERROR: input [%s] took longer to collect than "+
-				"collection interval (%s)",
-				input.Name(), timeout)
+			err := fmt.Errorf("took longer to collect than collection interval (%s)",
+				timeout)
+			acc.AddError(err)
 			continue
 		case <-shutdown:
 			return

From 694955c87bde9320148544783936947ae337272e Mon Sep 17 00:00:00 2001
From: Cameron Sparr 
Date: Fri, 3 Feb 2017 16:41:45 +0000
Subject: [PATCH 0117/1302] Remove metric.Point from metric interface

---
 Godeps                |  1 -
 metric.go             |  7 -------
 metric/metric.go      |  8 --------
 metric/metric_test.go | 20 --------------------
 4 files changed, 36 deletions(-)

diff --git a/Godeps b/Godeps
index 5443c1039..d2293e142 100644
--- a/Godeps
+++ b/Godeps
@@ -26,7 +26,6 @@ github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
 github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
 github.com/hpcloud/tail 915e5feba042395f5fda4dbe9c0e99aeab3088b3
 github.com/influxdata/config 8ec4638a81500c20be24855812bc8498ebe2dc92
-github.com/influxdata/influxdb 2fe8ed308439a98a9b01943939b44048ed952c90
 github.com/influxdata/toml ad49a5c2936f96b8f5943c3fdba47630ccf45a0d
 github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
 github.com/jackc/pgx c8080fc4a1bfa44bf90383ad0fdce2f68b7d313c
diff --git a/metric.go b/metric.go
index b1ab1b29f..fc479b51d 100644
--- a/metric.go
+++ b/metric.go
@@ -2,9 +2,6 @@ package telegraf
 
 import (
 	"time"
-
-	// TODO remove
-	"github.com/influxdata/influxdb/client/v2"
 )
 
 // ValueType is an enumeration of metric types that represent a simple value.
@@ -62,8 +59,4 @@ type Metric interface {
 	// aggregator things:
 	SetAggregate(bool)
 	IsAggregate() bool
-
-	// Point returns a influxdb client.Point object
-	// TODO remove this function
-	Point() *client.Point
 }
diff --git a/metric/metric.go b/metric/metric.go
index 936d0907e..0a2ca41b6 100644
--- a/metric/metric.go
+++ b/metric/metric.go
@@ -9,9 +9,6 @@ import (
 	"time"
 
 	"github.com/influxdata/telegraf"
-
-	// TODO remove
-	"github.com/influxdata/influxdb/client/v2"
 )
 
 const MaxInt = int(^uint(0) >> 1)
@@ -137,11 +134,6 @@ type metric struct {
 	nsec   int64
 }
 
-func (m *metric) Point() *client.Point {
-	c, _ := client.NewPoint(m.Name(), m.Tags(), m.Fields(), m.Time())
-	return c
-}
-
 func (m *metric) String() string {
 	return string(m.name) + string(m.tags) + " " + string(m.fields) + " " + string(m.t) + "\n"
 }
diff --git a/metric/metric_test.go b/metric/metric_test.go
index 65b4b0fba..f133a507c 100644
--- a/metric/metric_test.go
+++ b/metric/metric_test.go
@@ -595,26 +595,6 @@ func TestNewMetricAggregate(t *testing.T) {
 	assert.True(t, m.IsAggregate())
 }
 
-func TestNewMetricPoint(t *testing.T) {
-	now := time.Now()
-
-	tags := map[string]string{
-		"host": "localhost",
-	}
-	fields := map[string]interface{}{
-		"usage_idle": float64(99),
-	}
-	m, err := New("cpu", tags, fields, now)
-	assert.NoError(t, err)
-
-	p := m.Point()
-	pfields, _ := p.Fields()
-
-	assert.Equal(t, fields, m.Fields())
-	assert.Equal(t, fields, pfields)
-	assert.Equal(t, "cpu", p.Name())
-}
-
 func TestNewMetricString(t *testing.T) {
 	now := time.Now()
 

From c8cc01ba6a27a68a5d161152d9323d3e801edfbd Mon Sep 17 00:00:00 2001
From: Cameron Sparr 
Date: Thu, 2 Feb 2017 17:06:41 +0000
Subject: [PATCH 0118/1302] deprecate udp_listener & tcp_listener

---
 CHANGELOG.md                                |   8 +-
 README.md                                   |   8 +-
 plugins/inputs/socket_listener/README.md    | 112 ++++++++++++++++++++
 plugins/inputs/tcp_listener/README.md       |  30 +-----
 plugins/inputs/tcp_listener/tcp_listener.go |  22 ++--
 plugins/inputs/udp_listener/README.md       |  86 +--------------
 plugins/inputs/udp_listener/udp_listener.go |  23 ++--
 7 files changed, 142 insertions(+), 147 deletions(-)
 create mode 100644 plugins/inputs/socket_listener/README.md

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3cdd544d0..283c5ccba 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -27,8 +27,15 @@ The previous riemann output will still be available using
 `outputs.riemann_legacy` if needed, but that will eventually be deprecated.
 It is highly recommended that all users migrate to the new riemann output plugin.
 
+- Generic [socket_listener](./plugins/inputs/socket_listener) and
+[socket_writer](./plugins/outputs/socket_writer) plugins have been implemented
+for receiving and sending UDP, TCP, unix, & unix-datagram data. These plugins
+will replace udp_listener and tcp_listener, which are still available but will
+be deprecated eventually.
+
 ### Features
 
+- [#2094](https://github.com/influxdata/telegraf/pull/2094): Add generic socket listener & writer.
 - [#2204](https://github.com/influxdata/telegraf/pull/2204): Extend http_response to support searching for a substring in response. Return 1 if found, else 0.
 - [#2137](https://github.com/influxdata/telegraf/pull/2137): Added userstats to mysql input plugin.
 - [#2179](https://github.com/influxdata/telegraf/pull/2179): Added more InnoDB metric to MySQL plugin.
@@ -112,7 +119,6 @@ plugins, not just statsd.
 - [#1980](https://github.com/influxdata/telegraf/issues/1980): Hide username/password from elasticsearch error log messages.
 - [#2097](https://github.com/influxdata/telegraf/issues/2097): Configurable HTTP timeouts in Jolokia plugin
 - [#2255](https://github.com/influxdata/telegraf/pull/2255): Allow changing jolokia attribute delimiter
-- [#2094](https://github.com/influxdata/telegraf/pull/2094): Add generic socket listener & writer.
 
 ### Bugfixes
 
diff --git a/README.md b/README.md
index 9b8a9ddd1..462076dca 100644
--- a/README.md
+++ b/README.md
@@ -184,8 +184,8 @@ Telegraf can also collect metrics via the following service plugins:
 * [statsd](./plugins/inputs/statsd)
 * [socket_listener](./plugins/inputs/socket_listener)
 * [tail](./plugins/inputs/tail)
-* [tcp_listener](./plugins/inputs/tcp_listener)
-* [udp_listener](./plugins/inputs/udp_listener)
+* [tcp_listener](./plugins/inputs/socket_listener)
+* [udp_listener](./plugins/inputs/socket_listener)
 * [webhooks](./plugins/inputs/webhooks)
   * [filestack](./plugins/inputs/webhooks/filestack)
   * [github](./plugins/inputs/webhooks/github)
@@ -220,9 +220,11 @@ Telegraf can also collect metrics via the following service plugins:
 * [nsq](./plugins/outputs/nsq)
 * [opentsdb](./plugins/outputs/opentsdb)
 * [prometheus](./plugins/outputs/prometheus_client)
-* [socket_writer](./plugins/outputs/socket_writer)
 * [riemann](./plugins/outputs/riemann)
 * [riemann_legacy](./plugins/outputs/riemann_legacy)
+* [socket_writer](./plugins/outputs/socket_writer)
+* [tcp](./plugins/outputs/socket_writer)
+* [udp](./plugins/outputs/socket_writer)
 
 ## Contributing
 
diff --git a/plugins/inputs/socket_listener/README.md b/plugins/inputs/socket_listener/README.md
new file mode 100644
index 000000000..e73296804
--- /dev/null
+++ b/plugins/inputs/socket_listener/README.md
@@ -0,0 +1,112 @@
+# socket listener service input plugin
+
+The Socket Listener is a service input plugin that listens for messages from
+streaming (tcp, unix) or datagram (udp, unixgram) protocols.
+
+The plugin expects messages in the
+[Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
+
+### Configuration:
+
+This is a sample configuration for the plugin.
+
+```toml
+# Generic socket listener capable of handling multiple socket types.
+[[inputs.socket_listener]]
+  ## URL to listen on
+  # service_address = "tcp://:8094"
+  # service_address = "tcp://127.0.0.1:http"
+  # service_address = "tcp4://:8094"
+  # service_address = "tcp6://:8094"
+  # service_address = "tcp6://[2001:db8::1]:8094"
+  # service_address = "udp://:8094"
+  # service_address = "udp4://:8094"
+  # service_address = "udp6://:8094"
+  # service_address = "unix:///tmp/telegraf.sock"
+  # service_address = "unixgram:///tmp/telegraf.sock"
+
+  ## Maximum number of concurrent connections.
+  ## Only applies to stream sockets (e.g. TCP).
+  ## 0 (default) is unlimited.
+  # max_connections = 1024
+
+  ## Maximum socket buffer size in bytes.
+  ## For stream sockets, once the buffer fills up, the sender will start backing up.
+  ## For datagram sockets, once the buffer fills up, metrics will start dropping.
+  ## Defaults to the OS default.
+  # read_buffer_size = 65535
+
+  ## Data format to consume.
+  ## Each data format has it's own unique set of configuration options, read
+  ## more about them here:
+  ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+  # data_format = "influx"
+```
+
+## A Note on UDP OS Buffer Sizes
+
+The `read_buffer_size` config option can be used to adjust the size of the socket
+buffer, but this number is limited by OS settings. On Linux, `read_buffer_size`
+will default to `rmem_default` and will be capped by `rmem_max`. On BSD systems,
+`read_buffer_size` is capped by `maxsockbuf`, and there is no OS default
+setting.
+
+Instructions on how to adjust these OS settings are available below.
+
+Some OSes (most notably, Linux) place very restricive limits on the performance
+of UDP protocols. It is _highly_ recommended that you increase these OS limits to
+at least 8MB before trying to run large amounts of UDP traffic to your instance.
+8MB is just a recommendation, and can be adjusted higher.
+
+### Linux
+Check the current UDP/IP receive buffer limit & default by typing the following
+commands:
+
+```
+sysctl net.core.rmem_max
+sysctl net.core.rmem_default
+```
+
+If the values are less than 8388608 bytes you should add the following lines to
+the /etc/sysctl.conf file:
+
+```
+net.core.rmem_max=8388608
+net.core.rmem_default=8388608
+```
+
+Changes to /etc/sysctl.conf do not take effect until reboot.
+To update the values immediately, type the following commands as root:
+
+```
+sysctl -w net.core.rmem_max=8388608
+sysctl -w net.core.rmem_default=8388608
+```
+
+### BSD/Darwin
+
+On BSD/Darwin systems you need to add about a 15% padding to the kernel limit
+socket buffer. Meaning if you want an 8MB buffer (8388608 bytes) you need to set
+the kernel limit to `8388608*1.15 = 9646900`. This is not documented anywhere but
+happens
+[in the kernel here.](https://github.com/freebsd/freebsd/blob/master/sys/kern/uipc_sockbuf.c#L63-L64)
+
+Check the current UDP/IP buffer limit by typing the following command:
+
+```
+sysctl kern.ipc.maxsockbuf
+```
+
+If the value is less than 9646900 bytes you should add the following lines
+to the /etc/sysctl.conf file (create it if necessary):
+
+```
+kern.ipc.maxsockbuf=9646900
+```
+
+Changes to /etc/sysctl.conf do not take effect until reboot.
+To update the values immediately, type the following command as root:
+
+```
+sysctl -w kern.ipc.maxsockbuf=9646900
+```
diff --git a/plugins/inputs/tcp_listener/README.md b/plugins/inputs/tcp_listener/README.md
index 0066ea801..f858c7179 100644
--- a/plugins/inputs/tcp_listener/README.md
+++ b/plugins/inputs/tcp_listener/README.md
@@ -1,30 +1,4 @@
 # TCP listener service input plugin
 
-The TCP listener is a service input plugin that listens for messages on a TCP
-socket and adds those messages to InfluxDB.
-The plugin expects messages in the
-[Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
-
-### Configuration:
-
-This is a sample configuration for the plugin.
-
-```toml
-# Generic TCP listener
-[[inputs.tcp_listener]]
-  ## Address and port to host TCP listener on
-  service_address = ":8094"
-
-  ## Number of TCP messages allowed to queue up. Once filled, the
-  ## TCP listener will start dropping packets.
-  allowed_pending_messages = 10000
-
-  ## Maximum number of concurrent TCP connections to allow
-  max_tcp_connections = 250
-
-  ## Data format to consume.
-  ## Each data format has it's own unique set of configuration options, read
-  ## more about them here:
-  ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
-  data_format = "influx"
-```
+> DEPRECATED: As of version 1.3 the TCP listener plugin has been deprecated in favor of the
+> [socket_listener plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener)
diff --git a/plugins/inputs/tcp_listener/tcp_listener.go b/plugins/inputs/tcp_listener/tcp_listener.go
index 3ce4d87b4..544f36bd6 100644
--- a/plugins/inputs/tcp_listener/tcp_listener.go
+++ b/plugins/inputs/tcp_listener/tcp_listener.go
@@ -58,21 +58,9 @@ var malformedwarn = "E! tcp_listener has received %d malformed packets" +
 	" thus far."
 
 const sampleConfig = `
-  ## Address and port to host TCP listener on
-  # service_address = ":8094"
-
-  ## Number of TCP messages allowed to queue up. Once filled, the
-  ## TCP listener will start dropping packets.
-  # allowed_pending_messages = 10000
-
-  ## Maximum number of concurrent TCP connections to allow
-  # max_tcp_connections = 250
-
-  ## Data format to consume.
-  ## Each data format has it's own unique set of configuration options, read
-  ## more about them here:
-  ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
-  data_format = "influx"
+  # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
+  # socket_listener plugin
+  # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
 `
 
 func (t *TcpListener) SampleConfig() string {
@@ -98,6 +86,10 @@ func (t *TcpListener) Start(acc telegraf.Accumulator) error {
 	t.Lock()
 	defer t.Unlock()
 
+	log.Println("W! DEPRECATED: the TCP listener plugin has been deprecated " +
+		"in favor of the socket_listener plugin " +
+		"(https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener)")
+
 	tags := map[string]string{
 		"address": t.ServiceAddress,
 	}
diff --git a/plugins/inputs/udp_listener/README.md b/plugins/inputs/udp_listener/README.md
index ee675f535..6228090b6 100644
--- a/plugins/inputs/udp_listener/README.md
+++ b/plugins/inputs/udp_listener/README.md
@@ -1,86 +1,4 @@
 # UDP listener service input plugin
 
-The UDP listener is a service input plugin that listens for messages on a UDP
-socket and adds those messages to InfluxDB.
-The plugin expects messages in the
-[Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
-
-### Configuration:
-
-This is a sample configuration for the plugin.
-
-```toml
-[[inputs.udp_listener]]
-  ## Address and port to host UDP listener on
-  service_address = ":8092"
-
-  ## Number of UDP messages allowed to queue up. Once filled, the
-  ## UDP listener will start dropping packets.
-  allowed_pending_messages = 10000
-
-  ## Data format to consume.
-  ## Each data format has it's own unique set of configuration options, read
-  ## more about them here:
-  ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
-  data_format = "influx"
-```
-
-## A Note on UDP OS Buffer Sizes
-
-Some OSes (most notably, Linux) place very restricive limits on the performance
-of UDP protocols. It is _highly_ recommended that you increase these OS limits to
-at least 8MB before trying to run large amounts of UDP traffic to your instance.
-8MB is just a recommendation, and can be adjusted higher.
-
-### Linux
-Check the current UDP/IP receive buffer limit & default by typing the following
-commands:
-
-```
-sysctl net.core.rmem_max
-sysctl net.core.rmem_default
-```
-
-If the values are less than 8388608 bytes you should add the following lines to
-the /etc/sysctl.conf file:
-
-```
-net.core.rmem_max=8388608
-net.core.rmem_default=8388608
-```
-
-Changes to /etc/sysctl.conf do not take effect until reboot.
-To update the values immediately, type the following commands as root:
-
-```
-sysctl -w net.core.rmem_max=8388608
-sysctl -w net.core.rmem_default=8388608
-```
-
-### BSD/Darwin
-
-On BSD/Darwin systems you need to add about a 15% padding to the kernel limit
-socket buffer. Meaning if you want an 8MB buffer (8388608 bytes) you need to set
-the kernel limit to `8388608*1.15 = 9646900`. This is not documented anywhere but
-happens
-[in the kernel here.](https://github.com/freebsd/freebsd/blob/master/sys/kern/uipc_sockbuf.c#L63-L64)
-
-Check the current UDP/IP buffer limit by typing the following command:
-
-```
-sysctl kern.ipc.maxsockbuf
-```
-
-If the value is less than 9646900 bytes you should add the following lines
-to the /etc/sysctl.conf file (create it if necessary):
-
-```
-kern.ipc.maxsockbuf=9646900
-```
-
-Changes to /etc/sysctl.conf do not take effect until reboot.
-To update the values immediately, type the following commands as root:
-
-```
-sysctl -w kern.ipc.maxsockbuf=9646900
-```
+> DEPRECATED: As of version 1.3 the UDP listener plugin has been deprecated in favor of the
+> [socket_listener plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener)
diff --git a/plugins/inputs/udp_listener/udp_listener.go b/plugins/inputs/udp_listener/udp_listener.go
index 518a3fe48..53c6a72f5 100644
--- a/plugins/inputs/udp_listener/udp_listener.go
+++ b/plugins/inputs/udp_listener/udp_listener.go
@@ -66,22 +66,9 @@ var malformedwarn = "E! udp_listener has received %d malformed packets" +
 	" thus far."
 
 const sampleConfig = `
-  ## Address and port to host UDP listener on
-  # service_address = ":8092"
-
-  ## Number of UDP messages allowed to queue up. Once filled, the
-  ## UDP listener will start dropping packets.
-  # allowed_pending_messages = 10000
-
-  ## Set the buffer size of the UDP connection outside of OS default (in bytes)
-  ## If set to 0, take OS default
-  udp_buffer_size = 16777216
-
-  ## Data format to consume.
-  ## Each data format has it's own unique set of configuration options, read
-  ## more about them here:
-  ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
-  data_format = "influx"
+  # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
+  # socket_listener plugin
+  # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
 `
 
 func (u *UdpListener) SampleConfig() string {
@@ -106,6 +93,10 @@ func (u *UdpListener) Start(acc telegraf.Accumulator) error {
 	u.Lock()
 	defer u.Unlock()
 
+	log.Println("W! DEPRECATED: the UDP listener plugin has been deprecated " +
+		"in favor of the socket_listener plugin " +
+		"(https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener)")
+
 	tags := map[string]string{
 		"address": u.ServiceAddress,
 	}

From f05fac74cbf6d5c1642838062a82998f17f0f1f0 Mon Sep 17 00:00:00 2001
From: Cameron Sparr 
Date: Fri, 10 Feb 2017 17:05:13 +0000
Subject: [PATCH 0119/1302] update naoina/toml to do config validation

---
 Godeps | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/Godeps b/Godeps
index d2293e142..69c1ab3a6 100644
--- a/Godeps
+++ b/Godeps
@@ -26,7 +26,7 @@ github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
 github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
 github.com/hpcloud/tail 915e5feba042395f5fda4dbe9c0e99aeab3088b3
 github.com/influxdata/config 8ec4638a81500c20be24855812bc8498ebe2dc92
-github.com/influxdata/toml ad49a5c2936f96b8f5943c3fdba47630ccf45a0d
+github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
 github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
 github.com/jackc/pgx c8080fc4a1bfa44bf90383ad0fdce2f68b7d313c
 github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413

From 21cf79163c3b0d9ee9575b1dca5e458fa9d858b0 Mon Sep 17 00:00:00 2001
From: Cameron Sparr 
Date: Fri, 10 Feb 2017 17:27:18 +0000
Subject: [PATCH 0120/1302] don't use influxdata/config, just use
 influxdata/toml

---
 Godeps                    |  1 -
 internal/config/config.go | 17 ++++++++---------
 2 files changed, 8 insertions(+), 10 deletions(-)

diff --git a/Godeps b/Godeps
index 69c1ab3a6..fb8e23860 100644
--- a/Godeps
+++ b/Godeps
@@ -25,7 +25,6 @@ github.com/gorilla/mux 392c28fe23e1c45ddba891b0320b3b5df220beea
 github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
 github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
 github.com/hpcloud/tail 915e5feba042395f5fda4dbe9c0e99aeab3088b3
-github.com/influxdata/config 8ec4638a81500c20be24855812bc8498ebe2dc92
 github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
 github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
 github.com/jackc/pgx c8080fc4a1bfa44bf90383ad0fdce2f68b7d313c
diff --git a/internal/config/config.go b/internal/config/config.go
index 24dec4169..90b158716 100644
--- a/internal/config/config.go
+++ b/internal/config/config.go
@@ -25,7 +25,6 @@ import (
 	"github.com/influxdata/telegraf/plugins/processors"
 	"github.com/influxdata/telegraf/plugins/serializers"
 
-	"github.com/influxdata/config"
 	"github.com/influxdata/toml"
 	"github.com/influxdata/toml/ast"
 )
@@ -566,7 +565,7 @@ func (c *Config) LoadConfig(path string) error {
 			if !ok {
 				return fmt.Errorf("%s: invalid configuration", path)
 			}
-			if err = config.UnmarshalTable(subTable, c.Tags); err != nil {
+			if err = toml.UnmarshalTable(subTable, c.Tags); err != nil {
 				log.Printf("E! Could not parse [global_tags] config\n")
 				return fmt.Errorf("Error parsing %s, %s", path, err)
 			}
@@ -579,7 +578,7 @@ func (c *Config) LoadConfig(path string) error {
 		if !ok {
 			return fmt.Errorf("%s: invalid configuration", path)
 		}
-		if err = config.UnmarshalTable(subTable, c.Agent); err != nil {
+		if err = toml.UnmarshalTable(subTable, c.Agent); err != nil {
 			log.Printf("E! Could not parse [agent] config\n")
 			return fmt.Errorf("Error parsing %s, %s", path, err)
 		}
@@ -716,7 +715,7 @@ func (c *Config) addAggregator(name string, table *ast.Table) error {
 		return err
 	}
 
-	if err := config.UnmarshalTable(table, aggregator); err != nil {
+	if err := toml.UnmarshalTable(table, aggregator); err != nil {
 		return err
 	}
 
@@ -736,7 +735,7 @@ func (c *Config) addProcessor(name string, table *ast.Table) error {
 		return err
 	}
 
-	if err := config.UnmarshalTable(table, processor); err != nil {
+	if err := toml.UnmarshalTable(table, processor); err != nil {
 		return err
 	}
 
@@ -776,7 +775,7 @@ func (c *Config) addOutput(name string, table *ast.Table) error {
 		return err
 	}
 
-	if err := config.UnmarshalTable(table, output); err != nil {
+	if err := toml.UnmarshalTable(table, output); err != nil {
 		return err
 	}
 
@@ -817,7 +816,7 @@ func (c *Config) addInput(name string, table *ast.Table) error {
 		return err
 	}
 
-	if err := config.UnmarshalTable(table, input); err != nil {
+	if err := toml.UnmarshalTable(table, input); err != nil {
 		return err
 	}
 
@@ -909,7 +908,7 @@ func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, err
 	conf.Tags = make(map[string]string)
 	if node, ok := tbl.Fields["tags"]; ok {
 		if subtbl, ok := node.(*ast.Table); ok {
-			if err := config.UnmarshalTable(subtbl, conf.Tags); err != nil {
+			if err := toml.UnmarshalTable(subtbl, conf.Tags); err != nil {
 				log.Printf("Could not parse tags for input %s\n", name)
 			}
 		}
@@ -1146,7 +1145,7 @@ func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
 	cp.Tags = make(map[string]string)
 	if node, ok := tbl.Fields["tags"]; ok {
 		if subtbl, ok := node.(*ast.Table); ok {
-			if err := config.UnmarshalTable(subtbl, cp.Tags); err != nil {
+			if err := toml.UnmarshalTable(subtbl, cp.Tags); err != nil {
 				log.Printf("E! Could not parse tags for input %s\n", name)
 			}
 		}

From ff9369f1a1cb69fa44fdd8e664ffffddc7949a13 Mon Sep 17 00:00:00 2001
From: Cameron Sparr 
Date: Mon, 13 Feb 2017 10:33:51 +0000
Subject: [PATCH 0121/1302] prepend 'inputs.' to --test output check

---
 agent/agent.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/agent/agent.go b/agent/agent.go
index 192e8c112..11f343632 100644
--- a/agent/agent.go
+++ b/agent/agent.go
@@ -209,7 +209,7 @@ func (a *Agent) Test() error {
 		// Special instructions for some inputs. cpu, for example, needs to be
 		// run twice in order to return cpu usage percentages.
 		switch input.Name() {
-		case "cpu", "mongodb", "procstat":
+		case "inputs.cpu", "inputs.mongodb", "inputs.procstat":
 			time.Sleep(500 * time.Millisecond)
 			fmt.Printf("* Plugin: %s, Collection 2\n", input.Name())
 			if err := input.Input.Gather(acc); err != nil {

From 22243a8354d0c88cf20e853fce1124d421e3c979 Mon Sep 17 00:00:00 2001
From: Cameron Sparr 
Date: Mon, 13 Feb 2017 10:40:38 +0000
Subject: [PATCH 0122/1302] Skip service input plugins in test mode

---
 agent/agent.go | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/agent/agent.go b/agent/agent.go
index 11f343632..7909a4c8a 100644
--- a/agent/agent.go
+++ b/agent/agent.go
@@ -191,6 +191,12 @@ func (a *Agent) Test() error {
 	}()
 
 	for _, input := range a.Config.Inputs {
+		if _, ok := input.Input.(telegraf.ServiceInput); ok {
+			fmt.Printf("\nWARNING: skipping plugin [[%s]]: service inputs not supported in --test mode\n",
+				input.Name())
+			continue
+		}
+
 		acc := NewAccumulator(input, metricC)
 		acc.SetPrecision(a.Config.Agent.Precision.Duration,
 			a.Config.Agent.Interval.Duration)

From 45a168e4253477b0ecace60ea78af5cab905ddef Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Fran=C3=A7ois=20de=20Metz?= 
Date: Mon, 13 Feb 2017 15:30:30 +0000
Subject: [PATCH 0123/1302] Fix setting the username and the password to the
 influxdb output. (#2401)

---
 plugins/outputs/influxdb/influxdb.go | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go
index 5a5899a60..6c19a35fc 100644
--- a/plugins/outputs/influxdb/influxdb.go
+++ b/plugins/outputs/influxdb/influxdb.go
@@ -112,6 +112,8 @@ func (i *InfluxDB) Connect() error {
 				Timeout:   i.Timeout.Duration,
 				TLSConfig: tlsConfig,
 				UserAgent: i.UserAgent,
+				Username:  i.Username,
+				Password:  i.Password,
 			}
 			wp := client.WriteParams{
 				Database:        i.Database,

From c19fb1535e8b72824622b745c533eea47cb97af3 Mon Sep 17 00:00:00 2001
From: Yaron de Leeuw 
Date: Wed, 15 Feb 2017 12:17:26 -0500
Subject: [PATCH 0124/1302] README: update golang requirement to 1.7 (#2412)

The docker engine-api package we use needs golang 1.7+, see:
https://github.com/docker/engine-api/pull/382#issuecomment-244512952

So telegraf won't compile without 1.7
---
 README.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/README.md b/README.md
index 462076dca..3c0bedd1a 100644
--- a/README.md
+++ b/README.md
@@ -43,7 +43,7 @@ Ansible role: https://github.com/rossmcdonald/telegraf
 
 Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
 which gets installed via the Makefile
-if you don't have it already. You also must build with golang version 1.5+.
+if you don't have it already. You also must build with golang version 1.7+.
 
 1. [Install Go](https://golang.org/doc/install)
 2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)

From 25c55419dfcb8546e0c38be568cfdf673c254878 Mon Sep 17 00:00:00 2001
From: Priyank Trivedi 
Date: Fri, 17 Feb 2017 00:33:17 +0530
Subject: [PATCH 0125/1302] Fix typo - Default from Defalt (#2417)

---
 plugins/inputs/apache/README.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/plugins/inputs/apache/README.md b/plugins/inputs/apache/README.md
index b700a1b44..ffebbcbf8 100644
--- a/plugins/inputs/apache/README.md
+++ b/plugins/inputs/apache/README.md
@@ -4,7 +4,7 @@
 - **urls** []string: List of apache-status URLs to collect from. Default is "http://localhost/server-status?auto".
 - **username** string: Username for HTTP basic authentication
 - **password** string: Password for HTTP basic authentication
-- **timeout** duration: time that the HTTP connection will remain waiting for response. Defalt 4 seconds ("4s")
+- **timeout** duration: time that the HTTP connection will remain waiting for response. Default 4 seconds ("4s")
 
 ##### Optional SSL Config
 

From 54c9a385d503957bcd61ab951fc992e5db9c351d Mon Sep 17 00:00:00 2001
From: Cameron Sparr 
Date: Sat, 21 Jan 2017 15:37:53 -0800
Subject: [PATCH 0126/1302] Fix prometheus_client reload behavior

fixes #2282
---
 CHANGELOG.md                                  |  1 +
 agent/agent.go                                |  1 +
 circle.yml                                    |  6 ++--
 .../prometheus_client/prometheus_client.go    | 30 +++++++++----------
 4 files changed, 19 insertions(+), 19 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 283c5ccba..2aaf94adb 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -60,6 +60,7 @@ be deprecated eventually.
 - [#2356](https://github.com/influxdata/telegraf/issues/2356): cpu input panic when /proc/stat is empty.
 - [#2341](https://github.com/influxdata/telegraf/issues/2341): telegraf swallowing panics in --test mode.
 - [#2358](https://github.com/influxdata/telegraf/pull/2358): Create pidfile with 644 permissions & defer file deletion.
+- [#2282](https://github.com/influxdata/telegraf/issues/2282): Reloading telegraf freezes prometheus output.
 
 ## v1.2.1 [2017-02-01]
 
diff --git a/agent/agent.go b/agent/agent.go
index 7909a4c8a..e82caf148 100644
--- a/agent/agent.go
+++ b/agent/agent.go
@@ -398,5 +398,6 @@ func (a *Agent) Run(shutdown chan struct{}) error {
 	}
 
 	wg.Wait()
+	a.Close()
 	return nil
 }
diff --git a/circle.yml b/circle.yml
index c237040a2..a7d15368d 100644
--- a/circle.yml
+++ b/circle.yml
@@ -4,9 +4,9 @@ machine:
   post:
     - sudo service zookeeper stop
     - go version
-    - go version | grep 1.7.5 || sudo rm -rf /usr/local/go
-    - wget https://storage.googleapis.com/golang/go1.7.5.linux-amd64.tar.gz
-    - sudo tar -C /usr/local -xzf go1.7.5.linux-amd64.tar.gz
+    - sudo rm -rf /usr/local/go
+    - wget https://storage.googleapis.com/golang/go1.8.linux-amd64.tar.gz
+    - sudo tar -C /usr/local -xzf go1.8.linux-amd64.tar.gz
     - go version
 
 dependencies:
diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go
index e86a0a526..8c52b3358 100644
--- a/plugins/outputs/prometheus_client/prometheus_client.go
+++ b/plugins/outputs/prometheus_client/prometheus_client.go
@@ -1,6 +1,7 @@
 package prometheus_client
 
 import (
+	"context"
 	"fmt"
 	"log"
 	"net/http"
@@ -24,6 +25,7 @@ type MetricWithExpiration struct {
 type PrometheusClient struct {
 	Listen             string
 	ExpirationInterval internal.Duration `toml:"expiration_interval"`
+	server             *http.Server
 
 	metrics map[string]*MetricWithExpiration
 
@@ -41,30 +43,25 @@ var sampleConfig = `
 func (p *PrometheusClient) Start() error {
 	p.metrics = make(map[string]*MetricWithExpiration)
 	prometheus.Register(p)
-	defer func() {
-		if r := recover(); r != nil {
-			// recovering from panic here because there is no way to stop a
-			// running http go server except by a kill signal. Since the server
-			// does not stop on SIGHUP, Start() will panic when the process
-			// is reloaded.
-		}
-	}()
+
 	if p.Listen == "" {
 		p.Listen = "localhost:9126"
 	}
 
-	http.Handle("/metrics", prometheus.Handler())
-	server := &http.Server{
-		Addr: p.Listen,
+	mux := http.NewServeMux()
+	mux.Handle("/metrics", prometheus.Handler())
+
+	p.server = &http.Server{
+		Addr:    p.Listen,
+		Handler: mux,
 	}
 
-	go server.ListenAndServe()
+	go p.server.ListenAndServe()
 	return nil
 }
 
 func (p *PrometheusClient) Stop() {
-	// TODO: Use a listener for http.Server that counts active connections
-	//       that can be stopped and closed gracefully
+	// plugin gets cleaned up in Close() already.
 }
 
 func (p *PrometheusClient) Connect() error {
@@ -73,8 +70,9 @@ func (p *PrometheusClient) Connect() error {
 }
 
 func (p *PrometheusClient) Close() error {
-	// This service output does not need to close any of its connections
-	return nil
+	ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+	defer cancel()
+	return p.server.Shutdown(ctx)
 }
 
 func (p *PrometheusClient) SampleConfig() string {

From 5da40d56adba8b416ae45a886918f4c04b56ddeb Mon Sep 17 00:00:00 2001
From: Leandro Piccilli 
Date: Tue, 14 Feb 2017 00:56:48 +0100
Subject: [PATCH 0127/1302] Check if tag value is empty before allocation

closes #2390
closes #2404
---
 CHANGELOG.md                             |  1 +
 metric/metric.go                         |  7 ++++++-
 metric/metric_test.go                    | 23 +++++++++++++++++++++++
 plugins/inputs/prometheus/parser_test.go |  8 +++-----
 plugins/parsers/nagios/parser_test.go    |  2 +-
 5 files changed, 34 insertions(+), 7 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2aaf94adb..a49f5e8f8 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -61,6 +61,7 @@ be deprecated eventually.
 - [#2341](https://github.com/influxdata/telegraf/issues/2341): telegraf swallowing panics in --test mode.
 - [#2358](https://github.com/influxdata/telegraf/pull/2358): Create pidfile with 644 permissions & defer file deletion.
 - [#2282](https://github.com/influxdata/telegraf/issues/2282): Reloading telegraf freezes prometheus output.
+- [#2390](https://github.com/influxdata/telegraf/issues/2390): Empty tag value causes error on InfluxDB output.
 
 ## v1.2.1 [2017-02-01]
 
diff --git a/metric/metric.go b/metric/metric.go
index 0a2ca41b6..edf3b7794 100644
--- a/metric/metric.go
+++ b/metric/metric.go
@@ -44,13 +44,18 @@ func New(
 	// pre-allocate exact size of the tags slice
 	taglen := 0
 	for k, v := range tags {
-		// TODO check that length of tag key & value are > 0
+		if len(k) == 0 || len(v) == 0 {
+			continue
+		}
 		taglen += 2 + len(escape(k, "tagkey")) + len(escape(v, "tagval"))
 	}
 	m.tags = make([]byte, taglen)
 
 	i := 0
 	for k, v := range tags {
+		if len(k) == 0 || len(v) == 0 {
+			continue
+		}
 		m.tags[i] = ','
 		i++
 		i += copy(m.tags[i:], escape(k, "tagkey"))
diff --git a/metric/metric_test.go b/metric/metric_test.go
index f133a507c..dd231f8c4 100644
--- a/metric/metric_test.go
+++ b/metric/metric_test.go
@@ -625,3 +625,26 @@ func TestNewMetricFailNaN(t *testing.T) {
 	_, err := New("cpu", tags, fields, now)
 	assert.NoError(t, err)
 }
+
+func TestEmptyTagValueOrKey(t *testing.T) {
+	now := time.Now()
+
+	tags := map[string]string{
+		"host":     "localhost",
+		"emptytag": "",
+		"":         "valuewithoutkey",
+	}
+	fields := map[string]interface{}{
+		"usage_idle": float64(99),
+	}
+	m, err := New("cpu", tags, fields, now)
+
+	assert.True(t, m.HasTag("host"))
+	assert.False(t, m.HasTag("emptytag"))
+	assert.Equal(t,
+		fmt.Sprintf("cpu,host=localhost usage_idle=99 %d\n", now.UnixNano()),
+		m.String())
+
+	assert.NoError(t, err)
+
+}
diff --git a/plugins/inputs/prometheus/parser_test.go b/plugins/inputs/prometheus/parser_test.go
index fcd32ad43..4f2a8516f 100644
--- a/plugins/inputs/prometheus/parser_test.go
+++ b/plugins/inputs/prometheus/parser_test.go
@@ -111,11 +111,9 @@ func TestParseValidPrometheus(t *testing.T) {
 		"gauge": float64(1),
 	}, metrics[0].Fields())
 	assert.Equal(t, map[string]string{
-		"osVersion":        "CentOS Linux 7 (Core)",
-		"dockerVersion":    "1.8.2",
-		"kernelVersion":    "3.10.0-229.20.1.el7.x86_64",
-		"cadvisorRevision": "",
-		"cadvisorVersion":  "",
+		"osVersion":     "CentOS Linux 7 (Core)",
+		"dockerVersion": "1.8.2",
+		"kernelVersion": "3.10.0-229.20.1.el7.x86_64",
 	}, metrics[0].Tags())
 
 	// Counter value
diff --git a/plugins/parsers/nagios/parser_test.go b/plugins/parsers/nagios/parser_test.go
index ee21ea117..b1e3d6fdd 100644
--- a/plugins/parsers/nagios/parser_test.go
+++ b/plugins/parsers/nagios/parser_test.go
@@ -67,7 +67,7 @@ func TestParseValidOutput(t *testing.T) {
 	assert.Equal(t, map[string]interface{}{
 		"value": float64(0.008457),
 	}, metrics[0].Fields())
-	assert.Equal(t, map[string]string{"unit": ""}, metrics[0].Tags())
+	assert.Equal(t, map[string]string{}, metrics[0].Tags())
 }
 
 func TestParseInvalidOutput(t *testing.T) {

From 2a3448c8f3b0add54a28da87ce83509d1f83bf2b Mon Sep 17 00:00:00 2001
From: Cameron Sparr 
Date: Thu, 16 Feb 2017 23:13:14 +0000
Subject: [PATCH 0128/1302] socket_writer output plugin README

---
 plugins/outputs/socket_writer/README.md | 27 +++++++++++++++++++++++++
 1 file changed, 27 insertions(+)
 create mode 100644 plugins/outputs/socket_writer/README.md

diff --git a/plugins/outputs/socket_writer/README.md b/plugins/outputs/socket_writer/README.md
new file mode 100644
index 000000000..441cdf1f7
--- /dev/null
+++ b/plugins/outputs/socket_writer/README.md
@@ -0,0 +1,27 @@
+# socket_writer Plugin
+
+The socket_writer plugin can write to a UDP, TCP, or unix socket.
+
+It can output data in any of the [supported output formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md).
+
+```toml
+# Generic socket writer capable of handling multiple socket types.
+[[outputs.socket_writer]]
+  ## URL to connect to
+  # address = "tcp://127.0.0.1:8094"
+  # address = "tcp://example.com:http"
+  # address = "tcp4://127.0.0.1:8094"
+  # address = "tcp6://127.0.0.1:8094"
+  # address = "tcp6://[2001:db8::1]:8094"
+  # address = "udp://127.0.0.1:8094"
+  # address = "udp4://127.0.0.1:8094"
+  # address = "udp6://127.0.0.1:8094"
+  # address = "unix:///tmp/telegraf.sock"
+  # address = "unixgram:///tmp/telegraf.sock"
+
+  ## Data format to generate.
+  ## Each data format has it's own unique set of configuration options, read
+  ## more about them here:
+  ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+  # data_format = "influx"
+```

From 06176ef410c51854580c4340cd0aa7085ad49bd7 Mon Sep 17 00:00:00 2001
From: Cameron Sparr 
Date: Thu, 16 Feb 2017 19:37:55 +0000
Subject: [PATCH 0129/1302] Only set the buffer size once

fixes #2380
---
 CHANGELOG.md                      | 1 +
 internal/models/running_output.go | 3 +--
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index a49f5e8f8..6896a7ad7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -62,6 +62,7 @@ be deprecated eventually.
 - [#2358](https://github.com/influxdata/telegraf/pull/2358): Create pidfile with 644 permissions & defer file deletion.
 - [#2282](https://github.com/influxdata/telegraf/issues/2282): Reloading telegraf freezes prometheus output.
 - [#2390](https://github.com/influxdata/telegraf/issues/2390): Empty tag value causes error on InfluxDB output.
+- [#2380](https://github.com/influxdata/telegraf/issues/2380): buffer_size field value is negative number from "internal" plugin.
 
 ## v1.2.1 [2017-02-01]
 
diff --git a/internal/models/running_output.go b/internal/models/running_output.go
index 0ce756f47..61f26add1 100644
--- a/internal/models/running_output.go
+++ b/internal/models/running_output.go
@@ -122,9 +122,9 @@ func (ro *RunningOutput) AddMetric(m telegraf.Metric) {
 // Write writes all cached points to this output.
 func (ro *RunningOutput) Write() error {
 	nFails, nMetrics := ro.failMetrics.Len(), ro.metrics.Len()
+	ro.BufferSize.Set(int64(nFails + nMetrics))
 	log.Printf("D! Output [%s] buffer fullness: %d / %d metrics. ",
 		ro.Name, nFails+nMetrics, ro.MetricBufferLimit)
-	ro.BufferSize.Incr(int64(nFails + nMetrics))
 	var err error
 	if !ro.failMetrics.IsEmpty() {
 		// how many batches of failed writes we need to write.
@@ -176,7 +176,6 @@ func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
 		log.Printf("D! Output [%s] wrote batch of %d metrics in %s\n",
 			ro.Name, nMetrics, elapsed)
 		ro.MetricsWritten.Incr(int64(nMetrics))
-		ro.BufferSize.Incr(-int64(nMetrics))
 		ro.WriteTime.Incr(elapsed.Nanoseconds())
 	}
 	return err

From e5349393f878bb994990d6b2d33e82e731f50802 Mon Sep 17 00:00:00 2001
From: Cameron Sparr 
Date: Thu, 16 Feb 2017 22:24:42 +0000
Subject: [PATCH 0130/1302] Check for errors in user stats & process list

closes #2414
---
 CHANGELOG.md                  |   1 +
 plugins/inputs/mysql/mysql.go | 153 ++++++++++++++++++----------------
 2 files changed, 81 insertions(+), 73 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6896a7ad7..8febc3e50 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -63,6 +63,7 @@ be deprecated eventually.
 - [#2282](https://github.com/influxdata/telegraf/issues/2282): Reloading telegraf freezes prometheus output.
 - [#2390](https://github.com/influxdata/telegraf/issues/2390): Empty tag value causes error on InfluxDB output.
 - [#2380](https://github.com/influxdata/telegraf/issues/2380): buffer_size field value is negative number from "internal" plugin.
+- [#2414](https://github.com/influxdata/telegraf/issues/2414): Missing error handling in the MySQL plugin leads to segmentation violation.
 
 ## v1.2.1 [2017-02-01]
 
diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go
index adc21880b..1ff7c3421 100644
--- a/plugins/inputs/mysql/mysql.go
+++ b/plugins/inputs/mysql/mysql.go
@@ -4,6 +4,7 @@ import (
 	"bytes"
 	"database/sql"
 	"fmt"
+	"log"
 	"strconv"
 	"strings"
 	"sync"
@@ -904,92 +905,98 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, serv string, acc telegraf.Accum
 	// gather connection metrics from processlist for each user
 	if m.GatherProcessList {
 		conn_rows, err := db.Query("SELECT user, sum(1) FROM INFORMATION_SCHEMA.PROCESSLIST GROUP BY user")
+		if err != nil {
+			log.Printf("E! MySQL Error gathering process list: %s", err)
+		} else {
+			for conn_rows.Next() {
+				var user string
+				var connections int64
 
-		for conn_rows.Next() {
-			var user string
-			var connections int64
+				err = conn_rows.Scan(&user, &connections)
+				if err != nil {
+					return err
+				}
 
-			err = conn_rows.Scan(&user, &connections)
-			if err != nil {
-				return err
+				tags := map[string]string{"server": servtag, "user": user}
+				fields := make(map[string]interface{})
+
+				if err != nil {
+					return err
+				}
+				fields["connections"] = connections
+				acc.AddFields("mysql_users", fields, tags)
 			}
-
-			tags := map[string]string{"server": servtag, "user": user}
-			fields := make(map[string]interface{})
-
-			if err != nil {
-				return err
-			}
-			fields["connections"] = connections
-			acc.AddFields("mysql_users", fields, tags)
 		}
 	}
 
 	// gather connection metrics from user_statistics for each user
 	if m.GatherUserStatistics {
 		conn_rows, err := db.Query("select user, total_connections, concurrent_connections, connected_time, busy_time, cpu_time, bytes_received, bytes_sent, binlog_bytes_written, rows_fetched, rows_updated, table_rows_read, select_commands, update_commands, other_commands, commit_transactions, rollback_transactions, denied_connections, lost_connections, access_denied, empty_queries, total_ssl_connections FROM INFORMATION_SCHEMA.USER_STATISTICS GROUP BY user")
+		if err != nil {
+			log.Printf("E! MySQL Error gathering user stats: %s", err)
+		} else {
+			for conn_rows.Next() {
+				var user string
+				var total_connections int64
+				var concurrent_connections int64
+				var connected_time int64
+				var busy_time int64
+				var cpu_time int64
+				var bytes_received int64
+				var bytes_sent int64
+				var binlog_bytes_written int64
+				var rows_fetched int64
+				var rows_updated int64
+				var table_rows_read int64
+				var select_commands int64
+				var update_commands int64
+				var other_commands int64
+				var commit_transactions int64
+				var rollback_transactions int64
+				var denied_connections int64
+				var lost_connections int64
+				var access_denied int64
+				var empty_queries int64
+				var total_ssl_connections int64
 
-		for conn_rows.Next() {
-			var user string
-			var total_connections int64
-			var concurrent_connections int64
-			var connected_time int64
-			var busy_time int64
-			var cpu_time int64
-			var bytes_received int64
-			var bytes_sent int64
-			var binlog_bytes_written int64
-			var rows_fetched int64
-			var rows_updated int64
-			var table_rows_read int64
-			var select_commands int64
-			var update_commands int64
-			var other_commands int64
-			var commit_transactions int64
-			var rollback_transactions int64
-			var denied_connections int64
-			var lost_connections int64
-			var access_denied int64
-			var empty_queries int64
-			var total_ssl_connections int64
+				err = conn_rows.Scan(&user, &total_connections, &concurrent_connections,
+					&connected_time, &busy_time, &cpu_time, &bytes_received, &bytes_sent, &binlog_bytes_written,
+					&rows_fetched, &rows_updated, &table_rows_read, &select_commands, &update_commands, &other_commands,
+					&commit_transactions, &rollback_transactions, &denied_connections, &lost_connections, &access_denied,
+					&empty_queries, &total_ssl_connections,
+				)
 
-			err = conn_rows.Scan(&user, &total_connections, &concurrent_connections,
-				&connected_time, &busy_time, &cpu_time, &bytes_received, &bytes_sent, &binlog_bytes_written,
-				&rows_fetched, &rows_updated, &table_rows_read, &select_commands, &update_commands, &other_commands,
-				&commit_transactions, &rollback_transactions, &denied_connections, &lost_connections, &access_denied,
-				&empty_queries, &total_ssl_connections,
-			)
+				if err != nil {
+					return err
+				}
 
-			if err != nil {
-				return err
+				tags := map[string]string{"server": servtag, "user": user}
+				fields := map[string]interface{}{
+					"total_connections":      total_connections,
+					"concurrent_connections": concurrent_connections,
+					"connected_time":         connected_time,
+					"busy_time":              busy_time,
+					"cpu_time":               cpu_time,
+					"bytes_received":         bytes_received,
+					"bytes_sent":             bytes_sent,
+					"binlog_bytes_written":   binlog_bytes_written,
+					"rows_fetched":           rows_fetched,
+					"rows_updated":           rows_updated,
+					"table_rows_read":        table_rows_read,
+					"select_commands":        select_commands,
+					"update_commands":        update_commands,
+					"other_commands":         other_commands,
+					"commit_transactions":    commit_transactions,
+					"rollback_transactions":  rollback_transactions,
+					"denied_connections":     denied_connections,
+					"lost_connections":       lost_connections,
+					"access_denied":          access_denied,
+					"empty_queries":          empty_queries,
+					"total_ssl_connections":  total_ssl_connections,
+				}
+
+				acc.AddFields("mysql_user_stats", fields, tags)
 			}
-
-			tags := map[string]string{"server": servtag, "user": user}
-			fields := map[string]interface{}{
-				"total_connections":      total_connections,
-				"concurrent_connections": concurrent_connections,
-				"connected_time":         connected_time,
-				"busy_time":              busy_time,
-				"cpu_time":               cpu_time,
-				"bytes_received":         bytes_received,
-				"bytes_sent":             bytes_sent,
-				"binlog_bytes_written":   binlog_bytes_written,
-				"rows_fetched":           rows_fetched,
-				"rows_updated":           rows_updated,
-				"table_rows_read":        table_rows_read,
-				"select_commands":        select_commands,
-				"update_commands":        update_commands,
-				"other_commands":         other_commands,
-				"commit_transactions":    commit_transactions,
-				"rollback_transactions":  rollback_transactions,
-				"denied_connections":     denied_connections,
-				"lost_connections":       lost_connections,
-				"access_denied":          access_denied,
-				"empty_queries":          empty_queries,
-				"total_ssl_connections":  total_ssl_connections,
-			}
-
-			acc.AddFields("mysql_user_stats", fields, tags)
 		}
 	}
 

From e17164d3f09a292078b528cb96c75456b2297c28 Mon Sep 17 00:00:00 2001
From: Carlos 
Date: Mon, 20 Feb 2017 11:50:39 +0100
Subject: [PATCH 0131/1302] Added default config to file output pugin's README
 (#2426)

---
 plugins/outputs/file/README.md | 15 +++++++++++++++
 1 file changed, 15 insertions(+)

diff --git a/plugins/outputs/file/README.md b/plugins/outputs/file/README.md
index 6f3b7f513..a75297bb2 100644
--- a/plugins/outputs/file/README.md
+++ b/plugins/outputs/file/README.md
@@ -1 +1,16 @@
 # file Output Plugin
+
+This plugin writes telegraf metrics to files
+
+### Configuration
+```
+[[outputs.file]]
+  ## Files to write to, "stdout" is a specially handled file.
+  files = ["stdout", "/tmp/metrics.out"]
+
+  ## Data format to output.
+  ## Each data format has it's own unique set of configuration options, read
+  ## more about them here:
+  ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+  data_format = "influx"
+```

From c4212d69c942ec07a87fdd6ddfb6795d50112f0c Mon Sep 17 00:00:00 2001
From: Rickard von Essen 
Date: Tue, 21 Feb 2017 22:13:22 +0100
Subject: [PATCH 0132/1302] Updated readme, now requires Go 1.8 (#2455)

---
 README.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/README.md b/README.md
index 3c0bedd1a..258464237 100644
--- a/README.md
+++ b/README.md
@@ -43,7 +43,7 @@ Ansible role: https://github.com/rossmcdonald/telegraf
 
 Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
 which gets installed via the Makefile
-if you don't have it already. You also must build with golang version 1.7+.
+if you don't have it already. You also must build with golang version 1.8+.
 
 1. [Install Go](https://golang.org/doc/install)
 2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)

From 81408f9da7f124e8fa811e90297d5b2870443acd Mon Sep 17 00:00:00 2001
From: Cameron Sparr 
Date: Fri, 17 Feb 2017 19:36:44 +0000
Subject: [PATCH 0133/1302] switch out deprecated docker client library

closes #2071
---
 CHANGELOG.md                         |   1 +
 Godeps                               |   5 +-
 plugins/inputs/docker/README.md      |  16 ++-
 plugins/inputs/docker/docker.go      |  63 ++++++++---
 plugins/inputs/docker/docker_test.go | 150 ++-------------------------
 plugins/inputs/docker/fake_client.go | 143 +++++++++++++++++++++++++
 6 files changed, 210 insertions(+), 168 deletions(-)
 create mode 100644 plugins/inputs/docker/fake_client.go

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8febc3e50..509d6f2f1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -48,6 +48,7 @@ be deprecated eventually.
 - [#2201](https://github.com/influxdata/telegraf/pull/2201): Add lock option to the IPtables input plugin.
 - [#2244](https://github.com/influxdata/telegraf/pull/2244): Support ipmi_sensor plugin querying local ipmi sensors.
 - [#2339](https://github.com/influxdata/telegraf/pull/2339): Increment gather_errors for all errors emitted by inputs.
+- [#2071](https://github.com/influxdata/telegraf/issues/2071): Use official docker SDK.
 
 ### Bugfixes
 
diff --git a/Godeps b/Godeps
index fb8e23860..de326cb19 100644
--- a/Godeps
+++ b/Godeps
@@ -9,10 +9,7 @@ github.com/couchbase/go-couchbase bfe555a140d53dc1adf390f1a1d4b0fd4ceadb28
 github.com/couchbase/gomemcached 4a25d2f4e1dea9ea7dd76dfd943407abf9b07d29
 github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
 github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
-github.com/docker/distribution fb0bebc4b64e3881cc52a2478d749845ed76d2a8
-github.com/docker/engine-api 4290f40c056686fcaa5c9caf02eac1dde9315adf
-github.com/docker/go-connections 9670439d95da2651d9dfc7acc5d2ed92d3f25ee6
-github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
+github.com/docker/docker b89aff1afa1f61993ab2ba18fd62d9375a195f5d
 github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
 github.com/eapache/go-xerial-snappy bb955e01b9346ac19dc29eb16586c90ded99a98c
 github.com/eapache/queue 44cc805cf13205b55f69e14bcb69867d1ae92f98
diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md
index 5e8910677..94965213f 100644
--- a/plugins/inputs/docker/README.md
+++ b/plugins/inputs/docker/README.md
@@ -16,12 +16,20 @@ for the stat structure can be found
 ```
 # Read metrics about docker containers
 [[inputs.docker]]
-  # Docker Endpoint
-  #   To use TCP, set endpoint = "tcp://[ip]:[port]"
-  #   To use environment variables (ie, docker-machine), set endpoint = "ENV"
+  ## Docker Endpoint
+  ##   To use TCP, set endpoint = "tcp://[ip]:[port]"
+  ##   To use environment variables (ie, docker-machine), set endpoint = "ENV"
   endpoint = "unix:///var/run/docker.sock"
-  # Only collect metrics for these containers, collect all if empty
+  ## Only collect metrics for these containers, collect all if empty
   container_names = []
+  ## Timeout for docker list, info, and stats commands
+  timeout = "5s"
+
+  ## Whether to report for each container per-device blkio (8:0, 8:1...) and
+  ## network (eth0, eth1, ...) stats or not
+  perdevice = true
+  ## Whether to report for each container total blkio and network stats or not
+  total = false
 ```
 
 ### Measurements & Fields:
diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go
index 82a3791b6..ec192efd5 100644
--- a/plugins/inputs/docker/docker.go
+++ b/plugins/inputs/docker/docker.go
@@ -1,6 +1,7 @@
-package system
+package docker
 
 import (
+	"context"
 	"encoding/json"
 	"fmt"
 	"io"
@@ -11,10 +12,9 @@ import (
 	"sync"
 	"time"
 
-	"golang.org/x/net/context"
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/client"
 
-	"github.com/docker/engine-api/client"
-	"github.com/docker/engine-api/types"
 	"github.com/influxdata/telegraf"
 	"github.com/influxdata/telegraf/internal"
 	"github.com/influxdata/telegraf/plugins/inputs"
@@ -28,15 +28,46 @@ type Docker struct {
 	PerDevice      bool `toml:"perdevice"`
 	Total          bool `toml:"total"`
 
-	client      DockerClient
+	client      *client.Client
 	engine_host string
+
+	testing bool
 }
 
-// DockerClient interface, useful for testing
-type DockerClient interface {
-	Info(ctx context.Context) (types.Info, error)
-	ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
-	ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error)
+// infoWrapper wraps client.Client.List for testing.
+func infoWrapper(c *client.Client, ctx context.Context) (types.Info, error) {
+	if c != nil {
+		return c.Info(ctx)
+	}
+	fc := FakeDockerClient{}
+	return fc.Info(ctx)
+}
+
+// listWrapper wraps client.Client.ContainerList for testing.
+func listWrapper(
+	c *client.Client,
+	ctx context.Context,
+	options types.ContainerListOptions,
+) ([]types.Container, error) {
+	if c != nil {
+		return c.ContainerList(ctx, options)
+	}
+	fc := FakeDockerClient{}
+	return fc.ContainerList(ctx, options)
+}
+
+// statsWrapper wraps client.Client.ContainerStats for testing.
+func statsWrapper(
+	c *client.Client,
+	ctx context.Context,
+	containerID string,
+	stream bool,
+) (types.ContainerStats, error) {
+	if c != nil {
+		return c.ContainerStats(ctx, containerID, stream)
+	}
+	fc := FakeDockerClient{}
+	return fc.ContainerStats(ctx, containerID, stream)
 }
 
 // KB, MB, GB, TB, PB...human friendly
@@ -80,7 +111,7 @@ func (d *Docker) SampleConfig() string { return sampleConfig }
 
 // Gather starts stats collection
 func (d *Docker) Gather(acc telegraf.Accumulator) error {
-	if d.client == nil {
+	if d.client == nil && !d.testing {
 		var c *client.Client
 		var err error
 		defaultHeaders := map[string]string{"User-Agent": "engine-api-cli-1.0"}
@@ -113,7 +144,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
 	opts := types.ContainerListOptions{}
 	ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
 	defer cancel()
-	containers, err := d.client.ContainerList(ctx, opts)
+	containers, err := listWrapper(d.client, ctx, opts)
 	if err != nil {
 		return err
 	}
@@ -144,7 +175,7 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
 	// Get info from docker daemon
 	ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
 	defer cancel()
-	info, err := d.client.Info(ctx)
+	info, err := infoWrapper(d.client, ctx)
 	if err != nil {
 		return err
 	}
@@ -247,12 +278,12 @@ func (d *Docker) gatherContainer(
 
 	ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
 	defer cancel()
-	r, err := d.client.ContainerStats(ctx, container.ID, false)
+	r, err := statsWrapper(d.client, ctx, container.ID, false)
 	if err != nil {
 		return fmt.Errorf("Error getting docker stats: %s", err.Error())
 	}
-	defer r.Close()
-	dec := json.NewDecoder(r)
+	defer r.Body.Close()
+	dec := json.NewDecoder(r.Body)
 	if err = dec.Decode(&v); err != nil {
 		if err == io.EOF {
 			return nil
diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go
index cc0ada3c4..f0add03ea 100644
--- a/plugins/inputs/docker/docker_test.go
+++ b/plugins/inputs/docker/docker_test.go
@@ -1,18 +1,12 @@
-package system
+package docker
 
 import (
-	"io"
-	"io/ioutil"
-	"strings"
 	"testing"
 	"time"
 
-	"golang.org/x/net/context"
-
-	"github.com/docker/engine-api/types"
-	"github.com/docker/engine-api/types/registry"
 	"github.com/influxdata/telegraf/testutil"
 
+	"github.com/docker/docker/api/types"
 	"github.com/stretchr/testify/require"
 )
 
@@ -250,146 +244,14 @@ func testStats() *types.StatsJSON {
 	return stats
 }
 
-type FakeDockerClient struct {
-}
-
-func (d FakeDockerClient) Info(ctx context.Context) (types.Info, error) {
-	env := types.Info{
-		Containers:         108,
-		ContainersRunning:  98,
-		ContainersStopped:  6,
-		ContainersPaused:   3,
-		OomKillDisable:     false,
-		SystemTime:         "2016-02-24T00:55:09.15073105-05:00",
-		NEventsListener:    0,
-		ID:                 "5WQQ:TFWR:FDNG:OKQ3:37Y4:FJWG:QIKK:623T:R3ME:QTKB:A7F7:OLHD",
-		Debug:              false,
-		LoggingDriver:      "json-file",
-		KernelVersion:      "4.3.0-1-amd64",
-		IndexServerAddress: "https://index.docker.io/v1/",
-		MemTotal:           3840757760,
-		Images:             199,
-		CPUCfsQuota:        true,
-		Name:               "absol",
-		SwapLimit:          false,
-		IPv4Forwarding:     true,
-		ExperimentalBuild:  false,
-		CPUCfsPeriod:       true,
-		RegistryConfig: ®istry.ServiceConfig{
-			IndexConfigs: map[string]*registry.IndexInfo{
-				"docker.io": {
-					Name:     "docker.io",
-					Mirrors:  []string{},
-					Official: true,
-					Secure:   true,
-				},
-			}, InsecureRegistryCIDRs: []*registry.NetIPNet{{IP: []byte{127, 0, 0, 0}, Mask: []byte{255, 0, 0, 0}}}, Mirrors: []string{}},
-		OperatingSystem:   "Linux Mint LMDE (containerized)",
-		BridgeNfIptables:  true,
-		HTTPSProxy:        "",
-		Labels:            []string{},
-		MemoryLimit:       false,
-		DriverStatus:      [][2]string{{"Pool Name", "docker-8:1-1182287-pool"}, {"Pool Blocksize", "65.54 kB"}, {"Backing Filesystem", "extfs"}, {"Data file", "/dev/loop0"}, {"Metadata file", "/dev/loop1"}, {"Data Space Used", "17.3 GB"}, {"Data Space Total", "107.4 GB"}, {"Data Space Available", "36.53 GB"}, {"Metadata Space Used", "20.97 MB"}, {"Metadata Space Total", "2.147 GB"}, {"Metadata Space Available", "2.127 GB"}, {"Udev Sync Supported", "true"}, {"Deferred Removal Enabled", "false"}, {"Data loop file", "/var/lib/docker/devicemapper/devicemapper/data"}, {"Metadata loop file", "/var/lib/docker/devicemapper/devicemapper/metadata"}, {"Library Version", "1.02.115 (2016-01-25)"}},
-		NFd:               19,
-		HTTPProxy:         "",
-		Driver:            "devicemapper",
-		NGoroutines:       39,
-		NCPU:              4,
-		DockerRootDir:     "/var/lib/docker",
-		NoProxy:           "",
-		BridgeNfIP6tables: true,
-	}
-	return env, nil
-}
-
-func (d FakeDockerClient) ContainerList(octx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
-	container1 := types.Container{
-		ID:      "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb",
-		Names:   []string{"/etcd"},
-		Image:   "quay.io/coreos/etcd:v2.2.2",
-		Command: "/etcd -name etcd0 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
-		Created: 1455941930,
-		Status:  "Up 4 hours",
-		Ports: []types.Port{
-			types.Port{
-				PrivatePort: 7001,
-				PublicPort:  0,
-				Type:        "tcp",
-			},
-			types.Port{
-				PrivatePort: 4001,
-				PublicPort:  0,
-				Type:        "tcp",
-			},
-			types.Port{
-				PrivatePort: 2380,
-				PublicPort:  0,
-				Type:        "tcp",
-			},
-			types.Port{
-				PrivatePort: 2379,
-				PublicPort:  2379,
-				Type:        "tcp",
-				IP:          "0.0.0.0",
-			},
-		},
-		SizeRw:     0,
-		SizeRootFs: 0,
-	}
-	container2 := types.Container{
-		ID:      "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
-		Names:   []string{"/etcd2"},
-		Image:   "quay.io:4443/coreos/etcd:v2.2.2",
-		Command: "/etcd -name etcd2 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
-		Created: 1455941933,
-		Status:  "Up 4 hours",
-		Ports: []types.Port{
-			types.Port{
-				PrivatePort: 7002,
-				PublicPort:  0,
-				Type:        "tcp",
-			},
-			types.Port{
-				PrivatePort: 4002,
-				PublicPort:  0,
-				Type:        "tcp",
-			},
-			types.Port{
-				PrivatePort: 2381,
-				PublicPort:  0,
-				Type:        "tcp",
-			},
-			types.Port{
-				PrivatePort: 2382,
-				PublicPort:  2382,
-				Type:        "tcp",
-				IP:          "0.0.0.0",
-			},
-		},
-		SizeRw:     0,
-		SizeRootFs: 0,
-	}
-
-	containers := []types.Container{container1, container2}
-	return containers, nil
-
-	//#{e6a96c84ca91a5258b7cb752579fb68826b68b49ff957487695cd4d13c343b44 titilambert/snmpsim /bin/sh -c 'snmpsimd --agent-udpv4-endpoint=0.0.0.0:31161 --process-user=root --process-group=user' 1455724831 Up 4 hours [{31161 31161 udp 0.0.0.0}] 0 0 [/snmp] map[]}]2016/02/24 01:05:01 Gathered metrics, (3s interval), from 1 inputs in 1.233836656s
-}
-
-func (d FakeDockerClient) ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error) {
-	var stat io.ReadCloser
-	jsonStat := `{"read":"2016-02-24T11:42:27.472459608-05:00","memory_stats":{"stats":{},"limit":18935443456},"blkio_stats":{"io_service_bytes_recursive":[{"major":252,"minor":1,"op":"Read","value":753664},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":753664},{"major":252,"minor":1,"op":"Total","value":753664}],"io_serviced_recursive":[{"major":252,"minor":1,"op":"Read","value":26},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":26},{"major":252,"minor":1,"op":"Total","value":26}]},"cpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052607520000000,"throttling_data":{}},"precpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052599550000000,"throttling_data":{}}}`
-	stat = ioutil.NopCloser(strings.NewReader(jsonStat))
-	return stat, nil
-}
-
 func TestDockerGatherInfo(t *testing.T) {
 	var acc testutil.Accumulator
-	client := FakeDockerClient{}
-	d := Docker{client: client}
+	d := Docker{
+		client:  nil,
+		testing: true,
+	}
 
 	err := d.Gather(&acc)
-
 	require.NoError(t, err)
 
 	acc.AssertContainsTaggedFields(t,
diff --git a/plugins/inputs/docker/fake_client.go b/plugins/inputs/docker/fake_client.go
new file mode 100644
index 000000000..03da23198
--- /dev/null
+++ b/plugins/inputs/docker/fake_client.go
@@ -0,0 +1,143 @@
+package docker
+
+import (
+	"context"
+	"io/ioutil"
+	"strings"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/registry"
+)
+
+type FakeDockerClient struct {
+}
+
+func (d FakeDockerClient) Info(ctx context.Context) (types.Info, error) {
+	env := types.Info{
+		Containers:         108,
+		ContainersRunning:  98,
+		ContainersStopped:  6,
+		ContainersPaused:   3,
+		OomKillDisable:     false,
+		SystemTime:         "2016-02-24T00:55:09.15073105-05:00",
+		NEventsListener:    0,
+		ID:                 "5WQQ:TFWR:FDNG:OKQ3:37Y4:FJWG:QIKK:623T:R3ME:QTKB:A7F7:OLHD",
+		Debug:              false,
+		LoggingDriver:      "json-file",
+		KernelVersion:      "4.3.0-1-amd64",
+		IndexServerAddress: "https://index.docker.io/v1/",
+		MemTotal:           3840757760,
+		Images:             199,
+		CPUCfsQuota:        true,
+		Name:               "absol",
+		SwapLimit:          false,
+		IPv4Forwarding:     true,
+		ExperimentalBuild:  false,
+		CPUCfsPeriod:       true,
+		RegistryConfig: ®istry.ServiceConfig{
+			IndexConfigs: map[string]*registry.IndexInfo{
+				"docker.io": {
+					Name:     "docker.io",
+					Mirrors:  []string{},
+					Official: true,
+					Secure:   true,
+				},
+			}, InsecureRegistryCIDRs: []*registry.NetIPNet{{IP: []byte{127, 0, 0, 0}, Mask: []byte{255, 0, 0, 0}}}, Mirrors: []string{}},
+		OperatingSystem:   "Linux Mint LMDE (containerized)",
+		BridgeNfIptables:  true,
+		HTTPSProxy:        "",
+		Labels:            []string{},
+		MemoryLimit:       false,
+		DriverStatus:      [][2]string{{"Pool Name", "docker-8:1-1182287-pool"}, {"Pool Blocksize", "65.54 kB"}, {"Backing Filesystem", "extfs"}, {"Data file", "/dev/loop0"}, {"Metadata file", "/dev/loop1"}, {"Data Space Used", "17.3 GB"}, {"Data Space Total", "107.4 GB"}, {"Data Space Available", "36.53 GB"}, {"Metadata Space Used", "20.97 MB"}, {"Metadata Space Total", "2.147 GB"}, {"Metadata Space Available", "2.127 GB"}, {"Udev Sync Supported", "true"}, {"Deferred Removal Enabled", "false"}, {"Data loop file", "/var/lib/docker/devicemapper/devicemapper/data"}, {"Metadata loop file", "/var/lib/docker/devicemapper/devicemapper/metadata"}, {"Library Version", "1.02.115 (2016-01-25)"}},
+		NFd:               19,
+		HTTPProxy:         "",
+		Driver:            "devicemapper",
+		NGoroutines:       39,
+		NCPU:              4,
+		DockerRootDir:     "/var/lib/docker",
+		NoProxy:           "",
+		BridgeNfIP6tables: true,
+	}
+	return env, nil
+}
+
+func (d FakeDockerClient) ContainerList(octx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
+	container1 := types.Container{
+		ID:      "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb",
+		Names:   []string{"/etcd"},
+		Image:   "quay.io/coreos/etcd:v2.2.2",
+		Command: "/etcd -name etcd0 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
+		Created: 1455941930,
+		Status:  "Up 4 hours",
+		Ports: []types.Port{
+			types.Port{
+				PrivatePort: 7001,
+				PublicPort:  0,
+				Type:        "tcp",
+			},
+			types.Port{
+				PrivatePort: 4001,
+				PublicPort:  0,
+				Type:        "tcp",
+			},
+			types.Port{
+				PrivatePort: 2380,
+				PublicPort:  0,
+				Type:        "tcp",
+			},
+			types.Port{
+				PrivatePort: 2379,
+				PublicPort:  2379,
+				Type:        "tcp",
+				IP:          "0.0.0.0",
+			},
+		},
+		SizeRw:     0,
+		SizeRootFs: 0,
+	}
+	container2 := types.Container{
+		ID:      "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
+		Names:   []string{"/etcd2"},
+		Image:   "quay.io:4443/coreos/etcd:v2.2.2",
+		Command: "/etcd -name etcd2 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
+		Created: 1455941933,
+		Status:  "Up 4 hours",
+		Ports: []types.Port{
+			types.Port{
+				PrivatePort: 7002,
+				PublicPort:  0,
+				Type:        "tcp",
+			},
+			types.Port{
+				PrivatePort: 4002,
+				PublicPort:  0,
+				Type:        "tcp",
+			},
+			types.Port{
+				PrivatePort: 2381,
+				PublicPort:  0,
+				Type:        "tcp",
+			},
+			types.Port{
+				PrivatePort: 2382,
+				PublicPort:  2382,
+				Type:        "tcp",
+				IP:          "0.0.0.0",
+			},
+		},
+		SizeRw:     0,
+		SizeRootFs: 0,
+	}
+
+	containers := []types.Container{container1, container2}
+	return containers, nil
+
+	//#{e6a96c84ca91a5258b7cb752579fb68826b68b49ff957487695cd4d13c343b44 titilambert/snmpsim /bin/sh -c 'snmpsimd --agent-udpv4-endpoint=0.0.0.0:31161 --process-user=root --process-group=user' 1455724831 Up 4 hours [{31161 31161 udp 0.0.0.0}] 0 0 [/snmp] map[]}]2016/02/24 01:05:01 Gathered metrics, (3s interval), from 1 inputs in 1.233836656s
+}
+
+func (d FakeDockerClient) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) {
+	var stat types.ContainerStats
+	jsonStat := `{"read":"2016-02-24T11:42:27.472459608-05:00","memory_stats":{"stats":{},"limit":18935443456},"blkio_stats":{"io_service_bytes_recursive":[{"major":252,"minor":1,"op":"Read","value":753664},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":753664},{"major":252,"minor":1,"op":"Total","value":753664}],"io_serviced_recursive":[{"major":252,"minor":1,"op":"Read","value":26},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":26},{"major":252,"minor":1,"op":"Total","value":26}]},"cpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052607520000000,"throttling_data":{}},"precpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052599550000000,"throttling_data":{}}}`
+	stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat))
+	return stat, nil
+}

From 42a41d33cc537058396e702908fb62337ef109fe Mon Sep 17 00:00:00 2001
From: Cameron Sparr 
Date: Fri, 24 Feb 2017 09:43:22 +0000
Subject: [PATCH 0134/1302] add cgroup plugin to README

---
 README.md | 1 +
 1 file changed, 1 insertion(+)

diff --git a/README.md b/README.md
index 258464237..3dd06e93a 100644
--- a/README.md
+++ b/README.md
@@ -103,6 +103,7 @@ configuration options.
 * [bcache](./plugins/inputs/bcache)
 * [cassandra](./plugins/inputs/cassandra)
 * [ceph](./plugins/inputs/ceph)
+* [cgroup](./plugins/inputs/cgroup)
 * [chrony](./plugins/inputs/chrony)
 * [consul](./plugins/inputs/consul)
 * [conntrack](./plugins/inputs/conntrack)

From 6f2eeae49822782b85caf7639067685b812bc325 Mon Sep 17 00:00:00 2001
From: Cameron Sparr 
Date: Tue, 28 Feb 2017 12:46:27 +0000
Subject: [PATCH 0135/1302] Remove sleep from riemann test

---
 plugins/outputs/riemann/riemann_test.go | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/plugins/outputs/riemann/riemann_test.go b/plugins/outputs/riemann/riemann_test.go
index e03d720ce..67a161be5 100644
--- a/plugins/outputs/riemann/riemann_test.go
+++ b/plugins/outputs/riemann/riemann_test.go
@@ -193,7 +193,16 @@ func TestConnectAndWrite(t *testing.T) {
 	err = r.Write(metrics)
 	require.NoError(t, err)
 
-	time.Sleep(200 * time.Millisecond)
+	start := time.Now()
+	for true {
+		events, _ := r.client.Query(`tagged "docker"`)
+		if len(events) > 0 {
+			break
+		}
+		if time.Since(start) > time.Second {
+			break
+		}
+	}
 
 	// are there any "docker" tagged events in Riemann?
 	events, err := r.client.Query(`tagged "docker"`)

From b9457a109268ef1c055f4aa5b4f92a42afc21c24 Mon Sep 17 00:00:00 2001
From: Cameron Sparr 
Date: Tue, 21 Feb 2017 19:50:10 +0100
Subject: [PATCH 0136/1302] log error message when invalid regex is used

closes #2178
---
 CHANGELOG.md                               |  1 +
 plugins/inputs/logparser/grok/grok_test.go | 37 ++++++++++++++++++++++
 plugins/inputs/logparser/logparser.go      |  2 ++
 3 files changed, 40 insertions(+)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 509d6f2f1..2a43e844f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -65,6 +65,7 @@ be deprecated eventually.
 - [#2390](https://github.com/influxdata/telegraf/issues/2390): Empty tag value causes error on InfluxDB output.
 - [#2380](https://github.com/influxdata/telegraf/issues/2380): buffer_size field value is negative number from "internal" plugin.
 - [#2414](https://github.com/influxdata/telegraf/issues/2414): Missing error handling in the MySQL plugin leads to segmentation violation.
+- [#2178](https://github.com/influxdata/telegraf/issues/2178): logparser: regexp with lookahead.
 
 ## v1.2.1 [2017-02-01]
 
diff --git a/plugins/inputs/logparser/grok/grok_test.go b/plugins/inputs/logparser/grok/grok_test.go
index 1344896b8..4e0ead6e9 100644
--- a/plugins/inputs/logparser/grok/grok_test.go
+++ b/plugins/inputs/logparser/grok/grok_test.go
@@ -57,6 +57,43 @@ func Benchmark_ParseLine_CustomPattern(b *testing.B) {
 	benchM = m
 }
 
+// Test a very simple parse pattern.
+func TestSimpleParse(t *testing.T) {
+	p := &Parser{
+		Patterns: []string{"%{TESTLOG}"},
+		CustomPatterns: `
+			TESTLOG %{NUMBER:num:int} %{WORD:client}
+		`,
+	}
+	assert.NoError(t, p.Compile())
+
+	m, err := p.ParseLine(`142 bot`)
+	assert.NoError(t, err)
+	require.NotNil(t, m)
+
+	assert.Equal(t,
+		map[string]interface{}{
+			"num":    int64(142),
+			"client": "bot",
+		},
+		m.Fields())
+}
+
+// Verify that patterns with a regex lookahead fail at compile time.
+func TestParsePatternsWithLookahead(t *testing.T) {
+	p := &Parser{
+		Patterns: []string{"%{MYLOG}"},
+		CustomPatterns: `
+			NOBOT ((?!bot|crawl).)*
+			MYLOG %{NUMBER:num:int} %{NOBOT:client}
+		`,
+	}
+	assert.NoError(t, p.Compile())
+
+	_, err := p.ParseLine(`1466004605359052000 bot`)
+	assert.Error(t, err)
+}
+
 func TestMeasurementName(t *testing.T) {
 	p := &Parser{
 		Measurement: "my_web_log",
diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go
index 8ec328358..a22832277 100644
--- a/plugins/inputs/logparser/logparser.go
+++ b/plugins/inputs/logparser/logparser.go
@@ -226,6 +226,8 @@ func (l *LogParserPlugin) parser() {
 				if m != nil {
 					l.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
 				}
+			} else {
+				log.Println("E! Error parsing log line: " + err.Error())
 			}
 		}
 	}

From 9e810ac46369e723f14fb3c51cbada859b4d0f8c Mon Sep 17 00:00:00 2001
From: Cameron Sparr 
Date: Thu, 23 Feb 2017 13:45:36 +0000
Subject: [PATCH 0137/1302] Handle nil os.FileInfo in filepath.Walk

closes #2466
---
 CHANGELOG.md              | 1 +
 internal/config/config.go | 4 ++++
 2 files changed, 5 insertions(+)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2a43e844f..622243458 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -66,6 +66,7 @@ be deprecated eventually.
 - [#2380](https://github.com/influxdata/telegraf/issues/2380): buffer_size field value is negative number from "internal" plugin.
 - [#2414](https://github.com/influxdata/telegraf/issues/2414): Missing error handling in the MySQL plugin leads to segmentation violation.
 - [#2178](https://github.com/influxdata/telegraf/issues/2178): logparser: regexp with lookahead.
+- [#2466](https://github.com/influxdata/telegraf/issues/2466): Telegraf can crash in LoadDirectory on 0600 files.
 
 ## v1.2.1 [2017-02-01]
 
diff --git a/internal/config/config.go b/internal/config/config.go
index 90b158716..651c4e9ef 100644
--- a/internal/config/config.go
+++ b/internal/config/config.go
@@ -505,6 +505,10 @@ func PrintOutputConfig(name string) error {
 
 func (c *Config) LoadDirectory(path string) error {
 	walkfn := func(thispath string, info os.FileInfo, _ error) error {
+		if info == nil {
+			log.Printf("W! Telegraf is not permitted to read %s", thispath)
+			return nil
+		}
 		if info.IsDir() {
 			return nil
 		}

From a251adb838cbcdc24f678a59d6551cc3ec94fea2 Mon Sep 17 00:00:00 2001
From: Cameron Sparr 
Date: Wed, 1 Mar 2017 11:22:42 +0000
Subject: [PATCH 0138/1302] Fix type conflict on windows ping plugin (#2462)

closes #1433
---
 CHANGELOG.md                             |  7 +++++++
 plugins/inputs/ping/ping_windows.go      | 12 ++++++------
 plugins/inputs/ping/ping_windows_test.go |  6 +++---
 3 files changed, 16 insertions(+), 9 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 622243458..d8dc382d7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,12 @@
 
 ### Release Notes
 
+- Users of the windows `ping` plugin will need to drop or migrate their
+measurements in order to continue using the plugin. The reason for this is that
+the windows plugin was outputting a different type than the linux plugin. This
+made it impossible to use the `ping` plugin for both windows and linux
+machines.
+
 - Ceph: the `ceph_pgmap_state` metric content has been modified to use a unique field `count`, with each state expressed as a `state` tag.
 
 Telegraf < 1.3:
@@ -65,6 +71,7 @@ be deprecated eventually.
 - [#2390](https://github.com/influxdata/telegraf/issues/2390): Empty tag value causes error on InfluxDB output.
 - [#2380](https://github.com/influxdata/telegraf/issues/2380): buffer_size field value is negative number from "internal" plugin.
 - [#2414](https://github.com/influxdata/telegraf/issues/2414): Missing error handling in the MySQL plugin leads to segmentation violation.
+- [#2462](https://github.com/influxdata/telegraf/pull/2462): Fix type conflict in windows ping plugin.
 - [#2178](https://github.com/influxdata/telegraf/issues/2178): logparser: regexp with lookahead.
 - [#2466](https://github.com/influxdata/telegraf/issues/2466): Telegraf can crash in LoadDirectory on 0600 files.
 
diff --git a/plugins/inputs/ping/ping_windows.go b/plugins/inputs/ping/ping_windows.go
index 7fb112810..b1212eaaa 100644
--- a/plugins/inputs/ping/ping_windows.go
+++ b/plugins/inputs/ping/ping_windows.go
@@ -40,10 +40,10 @@ func (s *Ping) Description() string {
 const sampleConfig = `
 	## urls to ping
 	urls = ["www.google.com"] # required
-	
+
 	## number of pings to send per collection (ping -n )
 	count = 4 # required
-	
+
 	## Ping timeout, in seconds. 0 means default timeout (ping -w )
 	Timeout = 0
 `
@@ -64,7 +64,7 @@ func hostPinger(timeout float64, args ...string) (string, error) {
 }
 
 // processPingOutput takes in a string output from the ping command
-// based on linux implementation but using regex ( multilanguage support ) ( shouldn't affect the performance of the program )
+// based on linux implementation but using regex ( multilanguage support )
 // It returns (, , , , , )
 func processPingOutput(out string) (int, int, int, int, int, int, error) {
 	// So find a line contain 3 numbers except reply lines
@@ -189,13 +189,13 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error {
 				"percent_reply_loss":  lossReply,
 			}
 			if avg > 0 {
-				fields["average_response_ms"] = avg
+				fields["average_response_ms"] = float64(avg)
 			}
 			if min > 0 {
-				fields["minimum_response_ms"] = min
+				fields["minimum_response_ms"] = float64(min)
 			}
 			if max > 0 {
-				fields["maximum_response_ms"] = max
+				fields["maximum_response_ms"] = float64(max)
 			}
 			acc.AddFields("ping", fields, tags)
 		}(url)
diff --git a/plugins/inputs/ping/ping_windows_test.go b/plugins/inputs/ping/ping_windows_test.go
index 34428b814..b55b7955b 100644
--- a/plugins/inputs/ping/ping_windows_test.go
+++ b/plugins/inputs/ping/ping_windows_test.go
@@ -77,9 +77,9 @@ func TestPingGather(t *testing.T) {
 		"reply_received":      4,
 		"percent_packet_loss": 0.0,
 		"percent_reply_loss":  0.0,
-		"average_response_ms": 50,
-		"minimum_response_ms": 50,
-		"maximum_response_ms": 52,
+		"average_response_ms": 50.0,
+		"minimum_response_ms": 50.0,
+		"maximum_response_ms": 52.0,
 	}
 	acc.AssertContainsTaggedFields(t, "ping", fields, tags)
 

From 96185159264e5e1a5259cf6329c71ac3a3944ca8 Mon Sep 17 00:00:00 2001
From: Chris Koehnke 
Date: Thu, 2 Mar 2017 03:43:33 -0500
Subject: [PATCH 0139/1302] Disk counter array newline (#2481)

Tweak formatting of `LogicalDisk` counter array to have one entry per
line.
---
 etc/telegraf_windows.conf | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf
index 7380ab8a3..535b0f414 100644
--- a/etc/telegraf_windows.conf
+++ b/etc/telegraf_windows.conf
@@ -117,7 +117,8 @@
     Instances = ["*"]
     Counters = [
       "% Idle Time",
-      "% Disk Time","% Disk Read Time",
+      "% Disk Time",
+      "% Disk Read Time",
       "% Disk Write Time",
       "Current Disk Queue Length",
       "% Free Space",

From 1873abd2484987280b29322cfb03b29f471fca7b Mon Sep 17 00:00:00 2001
From: Charles-Henri 
Date: Thu, 2 Mar 2017 10:58:26 +0100
Subject: [PATCH 0140/1302] Iptables input: document better the ignored rules
 behavior (#2482)

During issue #2215 it was highlighted that the current behavior where
rules without a comment are ignored is confusing for several users.

This commit improves the documentation and adds a NOTE to the sample
config to clarify the behavior for new users.
---
 CHANGELOG.md                        | 1 +
 plugins/inputs/iptables/README.md   | 6 +++++-
 plugins/inputs/iptables/iptables.go | 6 ++++--
 3 files changed, 10 insertions(+), 3 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index d8dc382d7..fe5631767 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -74,6 +74,7 @@ be deprecated eventually.
 - [#2462](https://github.com/influxdata/telegraf/pull/2462): Fix type conflict in windows ping plugin.
 - [#2178](https://github.com/influxdata/telegraf/issues/2178): logparser: regexp with lookahead.
 - [#2466](https://github.com/influxdata/telegraf/issues/2466): Telegraf can crash in LoadDirectory on 0600 files.
+- [#2215](https://github.com/influxdata/telegraf/issues/2215): Iptables input: document better that rules without a comment are ignored.
 
 ## v1.2.1 [2017-02-01]
 
diff --git a/plugins/inputs/iptables/README.md b/plugins/inputs/iptables/README.md
index a711f1d4e..2b226b9fe 100644
--- a/plugins/inputs/iptables/README.md
+++ b/plugins/inputs/iptables/README.md
@@ -2,7 +2,11 @@
 
 The iptables plugin gathers packets and bytes counters for rules within a set of table and chain from the Linux's iptables firewall.
 
-Rules are identified through associated comment. Rules without comment are ignored.
+Rules are identified through associated comment. **Rules without comment are ignored**.
+Indeed we need a unique ID for the rule and the rule number is not a constant: it may vary when rules are inserted/deleted at start-up or by automatic tools (interactive firewalls, fail2ban, ...).
+Also when the rule set is becoming big (hundreds of lines) most people are interested in monitoring only a small part of the rule set.
+
+Before using this plugin **you must ensure that the rules you want to monitor are named with a unique comment**. Comments are added using the `-m comment --comment "my comment"` iptables options.
 
 The iptables command requires CAP_NET_ADMIN and CAP_NET_RAW capabilities. You have several options to grant telegraf to run iptables:
 
diff --git a/plugins/inputs/iptables/iptables.go b/plugins/inputs/iptables/iptables.go
index 31b049d9f..eab33bf9f 100644
--- a/plugins/inputs/iptables/iptables.go
+++ b/plugins/inputs/iptables/iptables.go
@@ -33,14 +33,16 @@ func (ipt *Iptables) SampleConfig() string {
   ## iptables require root access on most systems.
   ## Setting 'use_sudo' to true will make use of sudo to run iptables.
   ## Users must configure sudo to allow telegraf user to run iptables with no password.
-  ## iptables can be restricted to only list command "iptables -nvL"
+  ## iptables can be restricted to only list command "iptables -nvL".
   use_sudo = false
   ## Setting 'use_lock' to true runs iptables with the "-w" option.
   ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl")
   use_lock = false
   ## defines the table to monitor:
   table = "filter"
-  ## defines the chains to monitor:
+  ## defines the chains to monitor.
+  ## NOTE: iptables rules without a comment will not be monitored.
+  ## Read the plugin documentation for more information.
   chains = [ "INPUT" ]
 `
 }

From 10744646dbdd52dea349dd7773bcd7115e4a838d Mon Sep 17 00:00:00 2001
From: Jack Zampolin 
Date: Fri, 3 Mar 2017 10:24:50 -0800
Subject: [PATCH 0141/1302] AMQP Consumer plugin (#1678)

---
 CHANGELOG.md                                  |   1 +
 README.md                                     |   3 +-
 plugins/inputs/all/all.go                     |   1 +
 plugins/inputs/amqp_consumer/README.md        |  47 +++
 plugins/inputs/amqp_consumer/amqp_consumer.go | 280 ++++++++++++++++++
 plugins/outputs/amqp/README.md                |  11 +-
 plugins/outputs/amqp/amqp.go                  |  20 +-
 7 files changed, 357 insertions(+), 6 deletions(-)
 create mode 100644 plugins/inputs/amqp_consumer/README.md
 create mode 100644 plugins/inputs/amqp_consumer/amqp_consumer.go

diff --git a/CHANGELOG.md b/CHANGELOG.md
index fe5631767..323b23915 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -55,6 +55,7 @@ be deprecated eventually.
 - [#2244](https://github.com/influxdata/telegraf/pull/2244): Support ipmi_sensor plugin querying local ipmi sensors.
 - [#2339](https://github.com/influxdata/telegraf/pull/2339): Increment gather_errors for all errors emitted by inputs.
 - [#2071](https://github.com/influxdata/telegraf/issues/2071): Use official docker SDK.
+- [#1678](https://github.com/influxdata/telegraf/pull/1678): Add AMQP consumer input plugin
 
 ### Bugfixes
 
diff --git a/README.md b/README.md
index 3dd06e93a..915c7b761 100644
--- a/README.md
+++ b/README.md
@@ -97,9 +97,10 @@ configuration options.
 
 ## Input Plugins
 
-* [aws cloudwatch](./plugins/inputs/cloudwatch)
 * [aerospike](./plugins/inputs/aerospike)
+* [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq)
 * [apache](./plugins/inputs/apache)
+* [aws cloudwatch](./plugins/inputs/cloudwatch)
 * [bcache](./plugins/inputs/bcache)
 * [cassandra](./plugins/inputs/cassandra)
 * [ceph](./plugins/inputs/ceph)
diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go
index 924dffe3d..a9147c53e 100644
--- a/plugins/inputs/all/all.go
+++ b/plugins/inputs/all/all.go
@@ -2,6 +2,7 @@ package all
 
 import (
 	_ "github.com/influxdata/telegraf/plugins/inputs/aerospike"
+	_ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer"
 	_ "github.com/influxdata/telegraf/plugins/inputs/apache"
 	_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
 	_ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
diff --git a/plugins/inputs/amqp_consumer/README.md b/plugins/inputs/amqp_consumer/README.md
new file mode 100644
index 000000000..85780700f
--- /dev/null
+++ b/plugins/inputs/amqp_consumer/README.md
@@ -0,0 +1,47 @@
+# AMQP Consumer Input Plugin
+
+This plugin provides a consumer for use with AMQP 0-9-1, a promenent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/).
+
+Metrics are read from a topic exchange using the configured queue and binding_key.
+
+Message payload should be formatted in one of the [Telegraf Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
+
+For an introduction to AMQP see:
+- https://www.rabbitmq.com/tutorials/amqp-concepts.html
+- https://www.rabbitmq.com/getstarted.html
+
+The following defaults are known to work with RabbitMQ:
+
+```toml
+# AMQP consumer plugin
+[[inputs.amqp_consumer]]
+  ## AMQP url
+  url = "amqp://localhost:5672/influxdb"
+  ## AMQP exchange
+  exchange = "telegraf"
+  ## AMQP queue name
+  queue = "telegraf"
+  ## Binding Key
+  binding_key = "#"
+
+  ## Controls how many messages the server will try to keep on the network
+  ## for consumers before receiving delivery acks.
+  #prefetch_count = 50
+
+  ## Auth method. PLAIN and EXTERNAL are supported.
+  ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
+  ## described here: https://www.rabbitmq.com/plugins.html
+  # auth_method = "PLAIN"
+  ## Optional SSL Config
+  # ssl_ca = "/etc/telegraf/ca.pem"
+  # ssl_cert = "/etc/telegraf/cert.pem"
+  # ssl_key = "/etc/telegraf/key.pem"
+  ## Use SSL but skip chain & host verification
+  # insecure_skip_verify = false
+
+  ## Data format to output.
+  ## Each data format has it's own unique set of configuration options, read
+  ## more about them here:
+  ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+  data_format = "influx"
+```
diff --git a/plugins/inputs/amqp_consumer/amqp_consumer.go b/plugins/inputs/amqp_consumer/amqp_consumer.go
new file mode 100644
index 000000000..6f12244aa
--- /dev/null
+++ b/plugins/inputs/amqp_consumer/amqp_consumer.go
@@ -0,0 +1,280 @@
+package amqp_consumer
+
+import (
+	"fmt"
+	"log"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/streadway/amqp"
+
+	"github.com/influxdata/telegraf"
+	"github.com/influxdata/telegraf/internal"
+	"github.com/influxdata/telegraf/plugins/inputs"
+	"github.com/influxdata/telegraf/plugins/parsers"
+)
+
+// AMQPConsumer is the top level struct for this plugin
+type AMQPConsumer struct {
+	URL string
+	// AMQP exchange
+	Exchange string
+	// Queue Name
+	Queue string
+	// Binding Key
+	BindingKey string `toml:"binding_key"`
+
+	// Controls how many messages the server will try to keep on the network
+	// for consumers before receiving delivery acks.
+	PrefetchCount int
+
+	// AMQP Auth method
+	AuthMethod string
+	// Path to CA file
+	SSLCA string `toml:"ssl_ca"`
+	// Path to host cert file
+	SSLCert string `toml:"ssl_cert"`
+	// Path to cert key file
+	SSLKey string `toml:"ssl_key"`
+	// Use SSL but skip chain & host verification
+	InsecureSkipVerify bool
+
+	parser parsers.Parser
+	conn   *amqp.Connection
+	wg     *sync.WaitGroup
+}
+
+type externalAuth struct{}
+
+func (a *externalAuth) Mechanism() string {
+	return "EXTERNAL"
+}
+func (a *externalAuth) Response() string {
+	return fmt.Sprintf("\000")
+}
+
+const (
+	DefaultAuthMethod    = "PLAIN"
+	DefaultPrefetchCount = 50
+)
+
+func (a *AMQPConsumer) SampleConfig() string {
+	return `
+  ## AMQP url
+  url = "amqp://localhost:5672/influxdb"
+  ## AMQP exchange
+  exchange = "telegraf"
+  ## AMQP queue name
+  queue = "telegraf"
+  ## Binding Key
+  binding_key = "#"
+
+  ## Maximum number of messages server should give to the worker.
+  prefetch_count = 50
+
+  ## Auth method. PLAIN and EXTERNAL are supported
+  ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
+  ## described here: https://www.rabbitmq.com/plugins.html
+  # auth_method = "PLAIN"
+
+  ## Optional SSL Config
+  # ssl_ca = "/etc/telegraf/ca.pem"
+  # ssl_cert = "/etc/telegraf/cert.pem"
+  # ssl_key = "/etc/telegraf/key.pem"
+  ## Use SSL but skip chain & host verification
+  # insecure_skip_verify = false
+
+  ## Data format to output.
+  ## Each data format has it's own unique set of configuration options, read
+  ## more about them here:
+  ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+  data_format = "influx"
+`
+}
+
+func (a *AMQPConsumer) Description() string {
+	return "AMQP consumer plugin"
+}
+
+func (a *AMQPConsumer) SetParser(parser parsers.Parser) {
+	a.parser = parser
+}
+
+// All gathering is done in the Start function
+func (a *AMQPConsumer) Gather(_ telegraf.Accumulator) error {
+	return nil
+}
+
+func (a *AMQPConsumer) createConfig() (*amqp.Config, error) {
+	// make new tls config
+	tls, err := internal.GetTLSConfig(
+		a.SSLCert, a.SSLKey, a.SSLCA, a.InsecureSkipVerify)
+	if err != nil {
+		return nil, err
+	}
+
+	// parse auth method
+	var sasl []amqp.Authentication // nil by default
+
+	if strings.ToUpper(a.AuthMethod) == "EXTERNAL" {
+		sasl = []amqp.Authentication{&externalAuth{}}
+	}
+
+	config := amqp.Config{
+		TLSClientConfig: tls,
+		SASL:            sasl, // if nil, it will be PLAIN
+	}
+	return &config, nil
+}
+
+// Start satisfies the telegraf.ServiceInput interface
+func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error {
+	amqpConf, err := a.createConfig()
+	if err != nil {
+		return err
+	}
+
+	msgs, err := a.connect(amqpConf)
+	if err != nil {
+		return err
+	}
+
+	a.wg = &sync.WaitGroup{}
+	a.wg.Add(1)
+	go a.process(msgs, acc)
+
+	go func() {
+		err := <-a.conn.NotifyClose(make(chan *amqp.Error))
+		if err == nil {
+			return
+		}
+
+		log.Printf("I! AMQP consumer connection closed: %s; trying to reconnect", err)
+		for {
+			msgs, err := a.connect(amqpConf)
+			if err != nil {
+				log.Printf("E! AMQP connection failed: %s", err)
+				time.Sleep(10 * time.Second)
+				continue
+			}
+
+			a.wg.Add(1)
+			go a.process(msgs, acc)
+			break
+		}
+	}()
+
+	return nil
+}
+
+func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, error) {
+	conn, err := amqp.DialConfig(a.URL, *amqpConf)
+	if err != nil {
+		return nil, err
+	}
+	a.conn = conn
+
+	ch, err := conn.Channel()
+	if err != nil {
+		return nil, fmt.Errorf("Failed to open a channel: %s", err)
+	}
+
+	err = ch.ExchangeDeclare(
+		a.Exchange, // name
+		"topic",    // type
+		true,       // durable
+		false,      // auto-deleted
+		false,      // internal
+		false,      // no-wait
+		nil,        // arguments
+	)
+	if err != nil {
+		return nil, fmt.Errorf("Failed to declare an exchange: %s", err)
+	}
+
+	q, err := ch.QueueDeclare(
+		a.Queue, // queue
+		true,    // durable
+		false,   // delete when unused
+		false,   // exclusive
+		false,   // no-wait
+		nil,     // arguments
+	)
+	if err != nil {
+		return nil, fmt.Errorf("Failed to declare a queue: %s", err)
+	}
+
+	err = ch.QueueBind(
+		q.Name,       // queue
+		a.BindingKey, // binding-key
+		a.Exchange,   // exchange
+		false,
+		nil,
+	)
+	if err != nil {
+		return nil, fmt.Errorf("Failed to bind a queue: %s", err)
+	}
+
+	err = ch.Qos(
+		a.PrefetchCount,
+		0,     // prefetch-size
+		false, // global
+	)
+	if err != nil {
+		return nil, fmt.Errorf("Failed to set QoS: %s", err)
+	}
+
+	msgs, err := ch.Consume(
+		q.Name, // queue
+		"",     // consumer
+		false,  // auto-ack
+		false,  // exclusive
+		false,  // no-local
+		false,  // no-wait
+		nil,    // arguments
+	)
+	if err != nil {
+		return nil, fmt.Errorf("Failed establishing connection to queue: %s", err)
+	}
+
+	log.Println("I! Started AMQP consumer")
+	return msgs, err
+}
+
+// Read messages from queue and add them to the Accumulator
+func (a *AMQPConsumer) process(msgs <-chan amqp.Delivery, acc telegraf.Accumulator) {
+	defer a.wg.Done()
+	for d := range msgs {
+		metrics, err := a.parser.Parse(d.Body)
+		if err != nil {
+			log.Printf("E! %v: error parsing metric - %v", err, string(d.Body))
+		} else {
+			for _, m := range metrics {
+				acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
+			}
+		}
+
+		d.Ack(false)
+	}
+	log.Printf("I! AMQP consumer queue closed")
+}
+
+func (a *AMQPConsumer) Stop() {
+	err := a.conn.Close()
+	if err != nil && err != amqp.ErrClosed {
+		log.Printf("E! Error closing AMQP connection: %s", err)
+		return
+	}
+	a.wg.Wait()
+	log.Println("I! Stopped AMQP service")
+}
+
+func init() {
+	inputs.Add("amqp_consumer", func() telegraf.Input {
+		return &AMQPConsumer{
+			AuthMethod:    DefaultAuthMethod,
+			PrefetchCount: DefaultPrefetchCount,
+		}
+	})
+}
diff --git a/plugins/outputs/amqp/README.md b/plugins/outputs/amqp/README.md
index d49c507b8..208ae934c 100644
--- a/plugins/outputs/amqp/README.md
+++ b/plugins/outputs/amqp/README.md
@@ -1,13 +1,18 @@
 # AMQP Output Plugin
 
-This plugin writes to a AMQP exchange using tag, defined in configuration file
-as RoutingTag, as a routing key.
+This plugin writes to a AMQP 0-9-1 Exchange, a promenent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/).
+
+Metrics are written to a topic exchange using tag, defined in configuration file as RoutingTag, as a routing key.
 
 If RoutingTag is empty, then empty routing key will be used.
 Metrics are grouped in batches by RoutingTag.
 
 This plugin doesn't bind exchange to a queue, so it should be done by consumer.
 
+For an introduction to AMQP see:
+- https://www.rabbitmq.com/tutorials/amqp-concepts.html
+- https://www.rabbitmq.com/getstarted.html
+
 ### Configuration:
 
 ```
@@ -18,6 +23,8 @@ This plugin doesn't bind exchange to a queue, so it should be done by consumer.
   ## AMQP exchange
   exchange = "telegraf"
   ## Auth method. PLAIN and EXTERNAL are supported
+  ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
+  ## described here: https://www.rabbitmq.com/plugins.html
   # auth_method = "PLAIN"
   ## Telegraf tag to use as a routing key
   ##  ie, if this tag exists, it's value will be used as the routing key
diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go
index d86cac596..7b4c7d4c9 100644
--- a/plugins/outputs/amqp/amqp.go
+++ b/plugins/outputs/amqp/amqp.go
@@ -40,6 +40,7 @@ type AMQP struct {
 	// Use SSL but skip chain & host verification
 	InsecureSkipVerify bool
 
+	conn    *amqp.Connection
 	channel *amqp.Channel
 	sync.Mutex
 	headers amqp.Table
@@ -68,6 +69,8 @@ var sampleConfig = `
   ## AMQP exchange
   exchange = "telegraf"
   ## Auth method. PLAIN and EXTERNAL are supported
+  ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
+  ## described here: https://www.rabbitmq.com/plugins.html
   # auth_method = "PLAIN"
   ## Telegraf tag to use as a routing key
   ##  ie, if this tag exists, it's value will be used as the routing key
@@ -129,6 +132,8 @@ func (q *AMQP) Connect() error {
 	if err != nil {
 		return err
 	}
+	q.conn = connection
+
 	channel, err := connection.Channel()
 	if err != nil {
 		return fmt.Errorf("Failed to open a channel: %s", err)
@@ -148,7 +153,11 @@ func (q *AMQP) Connect() error {
 	}
 	q.channel = channel
 	go func() {
-		log.Printf("I! Closing: %s", <-connection.NotifyClose(make(chan *amqp.Error)))
+		err := <-connection.NotifyClose(make(chan *amqp.Error))
+		if err == nil {
+			return
+		}
+		log.Printf("I! Closing: %s", err)
 		log.Printf("I! Trying to reconnect")
 		for err := q.Connect(); err != nil; err = q.Connect() {
 			log.Println("E! ", err.Error())
@@ -160,7 +169,12 @@ func (q *AMQP) Connect() error {
 }
 
 func (q *AMQP) Close() error {
-	return q.channel.Close()
+	err := q.conn.Close()
+	if err != nil && err != amqp.ErrClosed {
+		log.Printf("E! Error closing AMQP connection: %s", err)
+		return err
+	}
+	return nil
 }
 
 func (q *AMQP) SampleConfig() string {
@@ -207,7 +221,7 @@ func (q *AMQP) Write(metrics []telegraf.Metric) error {
 				Body:        buf,
 			})
 		if err != nil {
-			return fmt.Errorf("FAILED to send amqp message: %s", err)
+			return fmt.Errorf("Failed to send AMQP message: %s", err)
 		}
 	}
 	return nil

From 76bcdecd21e7aa63734564715563392fa64ad0a6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Fran=C3=A7ois=20de=20Metz?= 
Date: Mon, 6 Mar 2017 12:34:41 +0000
Subject: [PATCH 0142/1302] Respond 200 when receiving a ping event. (#2492)

---
 plugins/inputs/webhooks/github/github_webhooks.go      | 9 ++++++---
 plugins/inputs/webhooks/github/github_webhooks_test.go | 4 ++++
 2 files changed, 10 insertions(+), 3 deletions(-)

diff --git a/plugins/inputs/webhooks/github/github_webhooks.go b/plugins/inputs/webhooks/github/github_webhooks.go
index 139c76971..a31c6fdf2 100644
--- a/plugins/inputs/webhooks/github/github_webhooks.go
+++ b/plugins/inputs/webhooks/github/github_webhooks.go
@@ -34,9 +34,10 @@ func (gh *GithubWebhook) eventHandler(w http.ResponseWriter, r *http.Request) {
 		w.WriteHeader(http.StatusBadRequest)
 		return
 	}
-
-	p := e.NewMetric()
-	gh.acc.AddFields("github_webhooks", p.Fields(), p.Tags(), p.Time())
+	if e != nil {
+		p := e.NewMetric()
+		gh.acc.AddFields("github_webhooks", p.Fields(), p.Tags(), p.Time())
+	}
 
 	w.WriteHeader(http.StatusOK)
 }
@@ -84,6 +85,8 @@ func NewEvent(data []byte, name string) (Event, error) {
 		return generateEvent(data, &MembershipEvent{})
 	case "page_build":
 		return generateEvent(data, &PageBuildEvent{})
+	case "ping":
+		return nil, nil
 	case "public":
 		return generateEvent(data, &PublicEvent{})
 	case "pull_request":
diff --git a/plugins/inputs/webhooks/github/github_webhooks_test.go b/plugins/inputs/webhooks/github/github_webhooks_test.go
index 7bee5372d..0ec991726 100644
--- a/plugins/inputs/webhooks/github/github_webhooks_test.go
+++ b/plugins/inputs/webhooks/github/github_webhooks_test.go
@@ -25,6 +25,10 @@ func TestCommitCommentEvent(t *testing.T) {
 	GithubWebhookRequest("commit_comment", CommitCommentEventJSON(), t)
 }
 
+func TestPingEvent(t *testing.T) {
+	GithubWebhookRequest("ping", "", t)
+}
+
 func TestDeleteEvent(t *testing.T) {
 	GithubWebhookRequest("delete", DeleteEventJSON(), t)
 }

From 7a8e8217318236b9fc0e5b306cbe91b1142a7472 Mon Sep 17 00:00:00 2001
From: Cameron Sparr 
Date: Mon, 6 Mar 2017 15:59:36 +0000
Subject: [PATCH 0143/1302] Revert "Procstat: don't cache PIDs" (#2479)

---
 CHANGELOG.md                              |  2 -
 plugins/inputs/procstat/procstat.go       | 44 ++++++++++++++++---
 plugins/inputs/procstat/procstat_test.go  |  2 +
 plugins/inputs/procstat/spec_processor.go | 53 ++++++++++-------------
 4 files changed, 61 insertions(+), 40 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 323b23915..5773179b6 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -61,8 +61,6 @@ be deprecated eventually.
 
 - [#2077](https://github.com/influxdata/telegraf/issues/2077): SQL Server Input - Arithmetic overflow error converting numeric to data type int.
 - [#2262](https://github.com/influxdata/telegraf/issues/2262): Flush jitter can inhibit metric collection.
-- [#2287](https://github.com/influxdata/telegraf/issues/2287): Kubernetes input: Handle null startTime for stopped pods
-- [#1636](https://github.com/influxdata/telegraf/issues/1636): procstat - stop caching PIDs.
 - [#2318](https://github.com/influxdata/telegraf/issues/2318): haproxy input - Add missing fields.
 - [#2287](https://github.com/influxdata/telegraf/issues/2287): Kubernetes input: Handle null startTime for stopped pods.
 - [#2356](https://github.com/influxdata/telegraf/issues/2356): cpu input panic when /proc/stat is empty.
diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go
index 565d0ebd1..929490e4a 100644
--- a/plugins/inputs/procstat/procstat.go
+++ b/plugins/inputs/procstat/procstat.go
@@ -8,6 +8,8 @@ import (
 	"strconv"
 	"strings"
 
+	"github.com/shirou/gopsutil/process"
+
 	"github.com/influxdata/telegraf"
 	"github.com/influxdata/telegraf/plugins/inputs"
 )
@@ -21,12 +23,15 @@ type Procstat struct {
 	User        string
 	PidTag      bool
 
+	// pidmap maps a pid to a process object, so we don't recreate every gather
+	pidmap map[int32]*process.Process
 	// tagmap maps a pid to a map of tags for that pid
 	tagmap map[int32]map[string]string
 }
 
 func NewProcstat() *Procstat {
 	return &Procstat{
+		pidmap: make(map[int32]*process.Process),
 		tagmap: make(map[int32]map[string]string),
 	}
 }
@@ -62,26 +67,51 @@ func (_ *Procstat) Description() string {
 }
 
 func (p *Procstat) Gather(acc telegraf.Accumulator) error {
-	pids, err := p.getAllPids()
+	err := p.createProcesses()
 	if err != nil {
 		log.Printf("E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s",
 			p.Exe, p.PidFile, p.Pattern, p.User, err.Error())
 	} else {
-		for _, pid := range pids {
+		for pid, proc := range p.pidmap {
 			if p.PidTag {
 				p.tagmap[pid]["pid"] = fmt.Sprint(pid)
 			}
-			p := NewSpecProcessor(p.ProcessName, p.Prefix, pid, acc, p.tagmap[pid])
-			err := p.pushMetrics()
-			if err != nil {
-				log.Printf("E! Error: procstat: %s", err.Error())
-			}
+			p := NewSpecProcessor(p.ProcessName, p.Prefix, pid, acc, proc, p.tagmap[pid])
+			p.pushMetrics()
 		}
 	}
 
 	return nil
 }
 
+func (p *Procstat) createProcesses() error {
+	var errstring string
+	var outerr error
+
+	pids, err := p.getAllPids()
+	if err != nil {
+		errstring += err.Error() + " "
+	}
+
+	for _, pid := range pids {
+		_, ok := p.pidmap[pid]
+		if !ok {
+			proc, err := process.NewProcess(pid)
+			if err == nil {
+				p.pidmap[pid] = proc
+			} else {
+				errstring += err.Error() + " "
+			}
+		}
+	}
+
+	if errstring != "" {
+		outerr = fmt.Errorf("%s", errstring)
+	}
+
+	return outerr
+}
+
 func (p *Procstat) getAllPids() ([]int32, error) {
 	var pids []int32
 	var err error
diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go
index 001537178..ccc72bdbb 100644
--- a/plugins/inputs/procstat/procstat_test.go
+++ b/plugins/inputs/procstat/procstat_test.go
@@ -6,6 +6,7 @@ import (
 	"strconv"
 	"testing"
 
+	"github.com/shirou/gopsutil/process"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 
@@ -23,6 +24,7 @@ func TestGather(t *testing.T) {
 	p := Procstat{
 		PidFile: file.Name(),
 		Prefix:  "foo",
+		pidmap:  make(map[int32]*process.Process),
 		tagmap:  make(map[int32]map[string]string),
 	}
 	p.Gather(&acc)
diff --git a/plugins/inputs/procstat/spec_processor.go b/plugins/inputs/procstat/spec_processor.go
index 1b9f63126..3b56fbc3e 100644
--- a/plugins/inputs/procstat/spec_processor.go
+++ b/plugins/inputs/procstat/spec_processor.go
@@ -1,7 +1,6 @@
 package procstat
 
 import (
-	"fmt"
 	"time"
 
 	"github.com/shirou/gopsutil/process"
@@ -10,13 +9,12 @@ import (
 )
 
 type SpecProcessor struct {
-	ProcessName string
-	Prefix      string
-	pid         int32
-	tags        map[string]string
-	fields      map[string]interface{}
-	acc         telegraf.Accumulator
-	proc        *process.Process
+	Prefix string
+	pid    int32
+	tags   map[string]string
+	fields map[string]interface{}
+	acc    telegraf.Accumulator
+	proc   *process.Process
 }
 
 func NewSpecProcessor(
@@ -24,35 +22,29 @@ func NewSpecProcessor(
 	prefix string,
 	pid int32,
 	acc telegraf.Accumulator,
+	p *process.Process,
 	tags map[string]string,
 ) *SpecProcessor {
+	if processName != "" {
+		tags["process_name"] = processName
+	} else {
+		name, err := p.Name()
+		if err == nil {
+			tags["process_name"] = name
+		}
+	}
 	return &SpecProcessor{
-		ProcessName: processName,
-		Prefix:      prefix,
-		pid:         pid,
-		tags:        tags,
-		fields:      make(map[string]interface{}),
-		acc:         acc,
+		Prefix: prefix,
+		pid:    pid,
+		tags:   tags,
+		fields: make(map[string]interface{}),
+		acc:    acc,
+		proc:   p,
 	}
 }
 
-func (p *SpecProcessor) pushMetrics() error {
+func (p *SpecProcessor) pushMetrics() {
 	var prefix string
-	proc, err := process.NewProcess(p.pid)
-	if err != nil {
-		return fmt.Errorf("Failed to open process with pid '%d'. Error: '%s'",
-			p.pid, err)
-	}
-	p.proc = proc
-	if p.ProcessName != "" {
-		p.tags["process_name"] = p.ProcessName
-	} else {
-		name, err := p.proc.Name()
-		if err == nil {
-			p.tags["process_name"] = name
-		}
-	}
-
 	if p.Prefix != "" {
 		prefix = p.Prefix + "_"
 	}
@@ -115,5 +107,4 @@ func (p *SpecProcessor) pushMetrics() error {
 	}
 
 	p.acc.AddFields("procstat", fields, p.tags)
-	return nil
 }

From ceb36adac7b459d1b286ac20c411ea288c1558b4 Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Mon, 6 Mar 2017 11:20:53 -0800
Subject: [PATCH 0144/1302] Update issue template

---
 .github/ISSUE_TEMPLATE.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index b59da651a..f4190e3ec 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -1,7 +1,7 @@
 ## Directions
 
 GitHub Issues are reserved for actionable bug reports and feature requests.
-General questions should be sent to the [InfluxDB mailing list](https://groups.google.com/forum/#!forum/influxdb).
+General questions should be asked at the [InfluxData Community](https://community.influxdata.com) site.
 
 Before opening an issue, search for similar bug reports or feature requests on GitHub Issues.
 If no similar issue can be found, fill out either the "Bug Report" or the "Feature Request" section below.

From 9df2974a0fa227c39725cc6bc2f9a4176343f996 Mon Sep 17 00:00:00 2001
From: Cameron Sparr 
Date: Tue, 28 Feb 2017 12:24:41 +0000
Subject: [PATCH 0145/1302] update gopsutil for file close fixes

hopefully this will fix #2472
---
 Godeps | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/Godeps b/Godeps
index de326cb19..2d0419ef6 100644
--- a/Godeps
+++ b/Godeps
@@ -44,7 +44,7 @@ github.com/prometheus/common dd2f054febf4a6c00f2343686efb775948a8bff4
 github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
 github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
 github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
-github.com/shirou/gopsutil 77b5d0080adb6f028e457906f1944d9fcca34442
+github.com/shirou/gopsutil d371ba1293cb48fedc6850526ea48b3846c54f2c
 github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
 github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
 github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987

From 7513fcac4e3fc811549bdc132e35380c4668a88a Mon Sep 17 00:00:00 2001
From: vvvkamper 
Date: Thu, 2 Mar 2017 19:15:33 +0700
Subject: [PATCH 0146/1302] Fix part 2 of #1291

added PDH_FMT_NOCAP100 format option

closes #2483
---
 CHANGELOG.md                            | 1 +
 plugins/inputs/win_perf_counters/pdh.go | 4 ++--
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5773179b6..8418ffd21 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -74,6 +74,7 @@ be deprecated eventually.
 - [#2178](https://github.com/influxdata/telegraf/issues/2178): logparser: regexp with lookahead.
 - [#2466](https://github.com/influxdata/telegraf/issues/2466): Telegraf can crash in LoadDirectory on 0600 files.
 - [#2215](https://github.com/influxdata/telegraf/issues/2215): Iptables input: document better that rules without a comment are ignored.
+- [#2483](https://github.com/influxdata/telegraf/pull/2483): Fix win_perf_counters capping values at 100.
 
 ## v1.2.1 [2017-02-01]
 
diff --git a/plugins/inputs/win_perf_counters/pdh.go b/plugins/inputs/win_perf_counters/pdh.go
index 36563d6b7..fa00e0603 100644
--- a/plugins/inputs/win_perf_counters/pdh.go
+++ b/plugins/inputs/win_perf_counters/pdh.go
@@ -331,7 +331,7 @@ func PdhCollectQueryData(hQuery PDH_HQUERY) uint32 {
 func PdhGetFormattedCounterValueDouble(hCounter PDH_HCOUNTER, lpdwType *uint32, pValue *PDH_FMT_COUNTERVALUE_DOUBLE) uint32 {
 	ret, _, _ := pdh_GetFormattedCounterValue.Call(
 		uintptr(hCounter),
-		uintptr(PDH_FMT_DOUBLE),
+		uintptr(PDH_FMT_DOUBLE|PDH_FMT_NOCAP100),
 		uintptr(unsafe.Pointer(lpdwType)),
 		uintptr(unsafe.Pointer(pValue)))
 
@@ -378,7 +378,7 @@ func PdhGetFormattedCounterValueDouble(hCounter PDH_HCOUNTER, lpdwType *uint32,
 func PdhGetFormattedCounterArrayDouble(hCounter PDH_HCOUNTER, lpdwBufferSize *uint32, lpdwBufferCount *uint32, itemBuffer *PDH_FMT_COUNTERVALUE_ITEM_DOUBLE) uint32 {
 	ret, _, _ := pdh_GetFormattedCounterArrayW.Call(
 		uintptr(hCounter),
-		uintptr(PDH_FMT_DOUBLE),
+		uintptr(PDH_FMT_DOUBLE|PDH_FMT_NOCAP100),
 		uintptr(unsafe.Pointer(lpdwBufferSize)),
 		uintptr(unsafe.Pointer(lpdwBufferCount)),
 		uintptr(unsafe.Pointer(itemBuffer)))

From 56aa89e5c86f38e566c6ecadb568c68cd9dcaea7 Mon Sep 17 00:00:00 2001
From: Robpol86 
Date: Wed, 8 Mar 2017 08:38:36 -0800
Subject: [PATCH 0147/1302] Exporting Ipmi.Path to be set by config. (#2498)

* Exporting Ipmi.Path to be set by config.

Currently "path" is not exported, giving this error when users try to
override the variable via telegraf.conf as per the sample config:

`field corresponding to `path' is not defined in `*ipmi_sensor.Ipmi'`

Exporting the variable solves the problem.

* Updating changelog.
---
 CHANGELOG.md                            | 1 +
 plugins/inputs/ipmi_sensor/ipmi.go      | 8 ++++----
 plugins/inputs/ipmi_sensor/ipmi_test.go | 4 ++--
 3 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8418ffd21..3a8e586f1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -75,6 +75,7 @@ be deprecated eventually.
 - [#2466](https://github.com/influxdata/telegraf/issues/2466): Telegraf can crash in LoadDirectory on 0600 files.
 - [#2215](https://github.com/influxdata/telegraf/issues/2215): Iptables input: document better that rules without a comment are ignored.
 - [#2483](https://github.com/influxdata/telegraf/pull/2483): Fix win_perf_counters capping values at 100.
+- [#2498](https://github.com/influxdata/telegraf/pull/2498): Exporting Ipmi.Path to be set by config.
 
 ## v1.2.1 [2017-02-01]
 
diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go
index b2389a675..0114812d3 100644
--- a/plugins/inputs/ipmi_sensor/ipmi.go
+++ b/plugins/inputs/ipmi_sensor/ipmi.go
@@ -17,7 +17,7 @@ var (
 )
 
 type Ipmi struct {
-	path    string
+	Path    string
 	Servers []string
 }
 
@@ -44,7 +44,7 @@ func (m *Ipmi) Description() string {
 }
 
 func (m *Ipmi) Gather(acc telegraf.Accumulator) error {
-	if len(m.path) == 0 {
+	if len(m.Path) == 0 {
 		return fmt.Errorf("ipmitool not found: verify that ipmitool is installed and that ipmitool is in your PATH")
 	}
 
@@ -76,7 +76,7 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error {
 	}
 
 	opts = append(opts, "sdr")
-	cmd := execCommand(m.path, opts...)
+	cmd := execCommand(m.Path, opts...)
 	out, err := internal.CombinedOutputTimeout(cmd, time.Second*5)
 	if err != nil {
 		return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out))
@@ -149,7 +149,7 @@ func init() {
 	m := Ipmi{}
 	path, _ := exec.LookPath("ipmitool")
 	if len(path) > 0 {
-		m.path = path
+		m.Path = path
 	}
 	inputs.Add("ipmi_sensor", func() telegraf.Input {
 		return &m
diff --git a/plugins/inputs/ipmi_sensor/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_test.go
index 94dc066c8..84bcdcac0 100644
--- a/plugins/inputs/ipmi_sensor/ipmi_test.go
+++ b/plugins/inputs/ipmi_sensor/ipmi_test.go
@@ -14,7 +14,7 @@ import (
 func TestGather(t *testing.T) {
 	i := &Ipmi{
 		Servers: []string{"USERID:PASSW0RD@lan(192.168.1.1)"},
-		path:    "ipmitool",
+		Path:    "ipmitool",
 	}
 	// overwriting exec commands with mock commands
 	execCommand = fakeExecCommand
@@ -118,7 +118,7 @@ func TestGather(t *testing.T) {
 	}
 
 	i = &Ipmi{
-		path: "ipmitool",
+		Path: "ipmitool",
 	}
 
 	err = i.Gather(&acc)

From ae6a5d2255f7eeadc910c424632a665275c8b598 Mon Sep 17 00:00:00 2001
From: jeremydenoun 
Date: Wed, 8 Mar 2017 23:08:55 +0100
Subject: [PATCH 0148/1302] Remove warning if parse empty content (#2500)

closes #2448
---
 CHANGELOG.md                     |  1 +
 metric/parse.go                  |  3 +++
 plugins/inputs/exec/exec_test.go | 29 +++++++++++++++++++++++++++++
 3 files changed, 33 insertions(+)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3a8e586f1..01eeb8bef 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -76,6 +76,7 @@ be deprecated eventually.
 - [#2215](https://github.com/influxdata/telegraf/issues/2215): Iptables input: document better that rules without a comment are ignored.
 - [#2483](https://github.com/influxdata/telegraf/pull/2483): Fix win_perf_counters capping values at 100.
 - [#2498](https://github.com/influxdata/telegraf/pull/2498): Exporting Ipmi.Path to be set by config.
+- [#2500](https://github.com/influxdata/telegraf/pull/2500): Remove warning if parse empty content
 
 ## v1.2.1 [2017-02-01]
 
diff --git a/metric/parse.go b/metric/parse.go
index fe2cffdc1..15b88e552 100644
--- a/metric/parse.go
+++ b/metric/parse.go
@@ -44,6 +44,9 @@ func Parse(buf []byte) ([]telegraf.Metric, error) {
 }
 
 func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) {
+	if len(buf) == 0 {
+		return []telegraf.Metric{}, nil
+	}
 	if len(buf) <= 6 {
 		return []telegraf.Metric{}, makeError("buffer too short", buf, 0)
 	}
diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go
index 71d33fb71..975eb9642 100644
--- a/plugins/inputs/exec/exec_test.go
+++ b/plugins/inputs/exec/exec_test.go
@@ -37,6 +37,8 @@ const malformedJson = `
 `
 
 const lineProtocol = "cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1\n"
+const lineProtocolEmpty = ""
+const lineProtocolShort = "ab"
 
 const lineProtocolMulti = `
 cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
@@ -167,6 +169,33 @@ func TestLineProtocolParse(t *testing.T) {
 	acc.AssertContainsTaggedFields(t, "cpu", fields, tags)
 }
 
+func TestLineProtocolEmptyParse(t *testing.T) {
+	parser, _ := parsers.NewInfluxParser()
+	e := &Exec{
+		runner:   newRunnerMock([]byte(lineProtocolEmpty), nil),
+		Commands: []string{"line-protocol"},
+		parser:   parser,
+	}
+
+	var acc testutil.Accumulator
+	err := e.Gather(&acc)
+	require.NoError(t, err)
+}
+
+func TestLineProtocolShortParse(t *testing.T) {
+	parser, _ := parsers.NewInfluxParser()
+	e := &Exec{
+		runner:   newRunnerMock([]byte(lineProtocolShort), nil),
+		Commands: []string{"line-protocol"},
+		parser:   parser,
+	}
+
+	var acc testutil.Accumulator
+	err := e.Gather(&acc)
+	require.Error(t, err)
+	assert.Contains(t, err.Error(), "buffer too short", "A buffer too short error was expected")
+}
+
 func TestLineProtocolParseMultiple(t *testing.T) {
 	parser, _ := parsers.NewInfluxParser()
 	e := &Exec{

From d243d69a09dd27b67c960fcead800669957fa90b Mon Sep 17 00:00:00 2001
From: Dennis Dryden 
Date: Thu, 9 Mar 2017 11:19:03 +0000
Subject: [PATCH 0149/1302] Add configuration docs to Postgresql input plugin
 (#2515)

* Add configuration docs to Postgresql input plugin

Add configuration docs to PostgreSQL input plugin README (mostly from the source code) though I've not included the configuration example that seems to use all he connections on the database[1].

[1] https://github.com/influxdata/telegraf/issues/2410

* Fix typo in readme and sampleConfig string.
---
 plugins/inputs/postgresql/README.md     | 22 ++++++++++++++++++++++
 plugins/inputs/postgresql/postgresql.go |  2 +-
 2 files changed, 23 insertions(+), 1 deletion(-)

diff --git a/plugins/inputs/postgresql/README.md b/plugins/inputs/postgresql/README.md
index e309aa80f..aed041bc6 100644
--- a/plugins/inputs/postgresql/README.md
+++ b/plugins/inputs/postgresql/README.md
@@ -29,3 +29,25 @@ _* value ignored and therefore not recorded._
 
 
 More information about the meaning of these metrics can be found in the [PostgreSQL Documentation](http://www.postgresql.org/docs/9.2/static/monitoring-stats.html#PG-STAT-DATABASE-VIEW)
+
+## Configruation
+Specify address via a url matching:
+
+  `postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]`
+
+All connection parameters are optional. Without the dbname parameter, the driver will default to a database with the same name as the user. This dbname is just for instantiating a connection with the server and doesn't restrict the databases we are trying to grab metrics for.
+  
+A  list of databases to explicitly ignore.  If not specified, metrics for all databases are gathered.  Do NOT use with the 'databases' option.
+
+  `ignored_databases = ["postgres", "template0", "template1"]`
+  
+A list of databases to pull metrics about. If not specified, metrics for all databases are gathered.  Do NOT use with the 'ignored_databases' option.
+
+  `databases = ["app_production", "testing"]`
+  
+### Configuration example
+```
+[[inputs.postgresql]]
+  address = "postgres://telegraf@localhost/someDB"
+  ignored_databases = ["template0", "template1"]
+```
diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go
index 7019762ed..7c854dfd3 100644
--- a/plugins/inputs/postgresql/postgresql.go
+++ b/plugins/inputs/postgresql/postgresql.go
@@ -43,7 +43,7 @@ var sampleConfig = `
   # ignored_databases = ["postgres", "template0", "template1"]
 
   ## A list of databases to pull metrics about. If not specified, metrics for all
-  ## databases are gathered.  Do NOT use with the 'ignore_databases' option.
+  ## databases are gathered.  Do NOT use with the 'ignored_databases' option.
   # databases = ["app_production", "testing"]
 `
 

From 49c212337f49a50729f80f7d3577c5905da638df Mon Sep 17 00:00:00 2001
From: Timothy 
Date: Thu, 9 Mar 2017 06:21:03 -0500
Subject: [PATCH 0150/1302] Update CONFIGURATION.md (#2516)

Add information about default configuration file locations.  Also mention that the config directory option is available.
---
 docs/CONFIGURATION.md | 12 +++++++++++-
 1 file changed, 11 insertions(+), 1 deletion(-)

diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md
index 9b2eb99d8..ff4814b82 100644
--- a/docs/CONFIGURATION.md
+++ b/docs/CONFIGURATION.md
@@ -24,6 +24,16 @@ Environment variables can be used anywhere in the config file, simply prepend
 them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
 for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
 
+## Configuration file locations
+
+The location of the configuration file can be set via the `--config` command
+line flag. Telegraf will also pick up all files matching the pattern `*.conf` if
+the `-config-directory` command line flag is used.
+
+On most systems, the default locations are `/etc/telegraf/telegraf.conf` for
+the main configuration file and `/etc/telegraf/telegraf.d` for the directory of
+configuration files.
+
 # Global Tags
 
 Global tags can be specified in the `[global_tags]` section of the config file
@@ -351,4 +361,4 @@ to the system load metrics due to the `namepass` parameter.
 
 [[outputs.file]]
   files = ["stdout"]
-```
\ No newline at end of file
+```

From e811e2600d16872869e09f9fb769e3c536743d45 Mon Sep 17 00:00:00 2001
From: Cameron Sparr 
Date: Wed, 8 Mar 2017 15:26:33 +0000
Subject: [PATCH 0151/1302] create telegraf.d directory in tarball

closes #2513
---
 CHANGELOG.md     | 1 +
 scripts/build.py | 3 ++-
 2 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 01eeb8bef..f6053bd66 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -77,6 +77,7 @@ be deprecated eventually.
 - [#2483](https://github.com/influxdata/telegraf/pull/2483): Fix win_perf_counters capping values at 100.
 - [#2498](https://github.com/influxdata/telegraf/pull/2498): Exporting Ipmi.Path to be set by config.
 - [#2500](https://github.com/influxdata/telegraf/pull/2500): Remove warning if parse empty content
+- [#2513](https://github.com/influxdata/telegraf/issues/2513): create /etc/telegraf/telegraf.d directory in tarball.
 
 ## v1.2.1 [2017-02-01]
 
diff --git a/scripts/build.py b/scripts/build.py
index 57208bf7f..aeaa04fd3 100755
--- a/scripts/build.py
+++ b/scripts/build.py
@@ -22,6 +22,7 @@ INSTALL_ROOT_DIR = "/usr/bin"
 LOG_DIR = "/var/log/telegraf"
 SCRIPT_DIR = "/usr/lib/telegraf/scripts"
 CONFIG_DIR = "/etc/telegraf"
+CONFIG_DIR_D = "/etc/telegraf/telegraf.d"
 LOGROTATE_DIR = "/etc/logrotate.d"
 
 INIT_SCRIPT = "scripts/init.sh"
@@ -115,7 +116,7 @@ def create_package_fs(build_root):
     logging.debug("Creating a filesystem hierarchy from directory: {}".format(build_root))
     # Using [1:] for the path names due to them being absolute
     # (will overwrite previous paths, per 'os.path.join' documentation)
-    dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:] ]
+    dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:], CONFIG_DIR_D[1:] ]
     for d in dirs:
         os.makedirs(os.path.join(build_root, d))
         os.chmod(os.path.join(build_root, d), 0o755)

From ea6e0b82595ffcd53f955496ec90d2e1377d0e07 Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Thu, 9 Mar 2017 10:13:31 -0800
Subject: [PATCH 0152/1302] Fix typo in postgresql README

---
 plugins/inputs/postgresql/README.md | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/plugins/inputs/postgresql/README.md b/plugins/inputs/postgresql/README.md
index aed041bc6..e31fcff69 100644
--- a/plugins/inputs/postgresql/README.md
+++ b/plugins/inputs/postgresql/README.md
@@ -30,21 +30,21 @@ _* value ignored and therefore not recorded._
 
 More information about the meaning of these metrics can be found in the [PostgreSQL Documentation](http://www.postgresql.org/docs/9.2/static/monitoring-stats.html#PG-STAT-DATABASE-VIEW)
 
-## Configruation
+## Configuration
 Specify address via a url matching:
 
   `postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]`
 
 All connection parameters are optional. Without the dbname parameter, the driver will default to a database with the same name as the user. This dbname is just for instantiating a connection with the server and doesn't restrict the databases we are trying to grab metrics for.
-  
+
 A  list of databases to explicitly ignore.  If not specified, metrics for all databases are gathered.  Do NOT use with the 'databases' option.
 
   `ignored_databases = ["postgres", "template0", "template1"]`
-  
+
 A list of databases to pull metrics about. If not specified, metrics for all databases are gathered.  Do NOT use with the 'ignored_databases' option.
 
   `databases = ["app_production", "testing"]`
-  
+
 ### Configuration example
 ```
 [[inputs.postgresql]]

From 13f314a5076a47b208aeef5bdab12470837e26c1 Mon Sep 17 00:00:00 2001
From: jeremydenoun 
Date: Thu, 9 Mar 2017 20:28:54 +0100
Subject: [PATCH 0153/1302] Report DEAD (X) State Process (#2501)

Report count of processes in dead (X) process state from the processes input.  This process state is only valid on Linux.
---
 CHANGELOG.md                              | 1 +
 plugins/inputs/system/PROCESSES_README.md | 4 +++-
 plugins/inputs/system/processes.go        | 5 +++++
 3 files changed, 9 insertions(+), 1 deletion(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index f6053bd66..a11752b5c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -56,6 +56,7 @@ be deprecated eventually.
 - [#2339](https://github.com/influxdata/telegraf/pull/2339): Increment gather_errors for all errors emitted by inputs.
 - [#2071](https://github.com/influxdata/telegraf/issues/2071): Use official docker SDK.
 - [#1678](https://github.com/influxdata/telegraf/pull/1678): Add AMQP consumer input plugin
+- [#2501](https://github.com/influxdata/telegraf/pull/2501): Support DEAD(X) state in system input plugin.
 
 ### Bugfixes
 
diff --git a/plugins/inputs/system/PROCESSES_README.md b/plugins/inputs/system/PROCESSES_README.md
index 006e043fb..aaeb279f8 100644
--- a/plugins/inputs/system/PROCESSES_README.md
+++ b/plugins/inputs/system/PROCESSES_README.md
@@ -23,6 +23,7 @@ it requires access to execute `ps`.
     - stopped
     - total
     - zombie
+    - dead
     - wait (freebsd only)
     - idle (bsd only)
     - paging (linux only)
@@ -39,6 +40,7 @@ Linux  FreeBSD  Darwin  meaning
   R       R       R     running
   S       S       S     sleeping
   Z       Z       Z     zombie
+  X      none    none   dead
   T       T       T     stopped
  none     I       I     idle (sleeping for longer than about 20 seconds)
   D      D,L      U     blocked (waiting in uninterruptible sleep, or locked)
@@ -54,5 +56,5 @@ None
 ```
 $ telegraf -config ~/ws/telegraf.conf -input-filter processes -test
 * Plugin: processes, Collection 1
-> processes blocked=8i,running=1i,sleeping=265i,stopped=0i,total=274i,zombie=0i,paging=0i,total_threads=687i 1457478636980905042
+> processes blocked=8i,running=1i,sleeping=265i,stopped=0i,total=274i,zombie=0i,dead=0i,paging=0i,total_threads=687i 1457478636980905042
 ```
diff --git a/plugins/inputs/system/processes.go b/plugins/inputs/system/processes.go
index 0950323fd..202bdf058 100644
--- a/plugins/inputs/system/processes.go
+++ b/plugins/inputs/system/processes.go
@@ -81,6 +81,7 @@ func getEmptyFields() map[string]interface{} {
 	case "openbsd":
 		fields["idle"] = int64(0)
 	case "linux":
+		fields["dead"] = int64(0)
 		fields["paging"] = int64(0)
 		fields["total_threads"] = int64(0)
 	}
@@ -107,6 +108,8 @@ func (p *Processes) gatherFromPS(fields map[string]interface{}) error {
 			fields["blocked"] = fields["blocked"].(int64) + int64(1)
 		case 'Z':
 			fields["zombies"] = fields["zombies"].(int64) + int64(1)
+		case 'X':
+			fields["dead"] = fields["dead"].(int64) + int64(1)
 		case 'T':
 			fields["stopped"] = fields["stopped"].(int64) + int64(1)
 		case 'R':
@@ -164,6 +167,8 @@ func (p *Processes) gatherFromProc(fields map[string]interface{}) error {
 			fields["blocked"] = fields["blocked"].(int64) + int64(1)
 		case 'Z':
 			fields["zombies"] = fields["zombies"].(int64) + int64(1)
+		case 'X':
+			fields["dead"] = fields["dead"].(int64) + int64(1)
 		case 'T', 't':
 			fields["stopped"] = fields["stopped"].(int64) + int64(1)
 		case 'W':

From 7a5d8578467ac6a58575bd6a661a1c16f383455c Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Fri, 10 Mar 2017 11:27:55 -0800
Subject: [PATCH 0154/1302] Add support for new SSL configuration to mongodb
 (#2522)

closes #2519
---
 CHANGELOG.md                      |  1 +
 plugins/inputs/mongodb/README.md  |  9 ++++++++-
 plugins/inputs/mongodb/mongodb.go | 29 ++++++++++++++++++++++++++++-
 3 files changed, 37 insertions(+), 2 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index a11752b5c..20a036555 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -57,6 +57,7 @@ be deprecated eventually.
 - [#2071](https://github.com/influxdata/telegraf/issues/2071): Use official docker SDK.
 - [#1678](https://github.com/influxdata/telegraf/pull/1678): Add AMQP consumer input plugin
 - [#2501](https://github.com/influxdata/telegraf/pull/2501): Support DEAD(X) state in system input plugin.
+- [#2522](https://github.com/influxdata/telegraf/pull/2522): Add support for mongodb client certificates.
 
 ### Bugfixes
 
diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md
index 72f87feb8..678fe0777 100644
--- a/plugins/inputs/mongodb/README.md
+++ b/plugins/inputs/mongodb/README.md
@@ -11,9 +11,16 @@
   ##   10.0.0.1:10000, etc.
   servers = ["127.0.0.1:27017"]
   gather_perdb_stats = false
+
+  ## Optional SSL Config
+  # ssl_ca = "/etc/telegraf/ca.pem"
+  # ssl_cert = "/etc/telegraf/cert.pem"
+  # ssl_key = "/etc/telegraf/key.pem"
+  ## Use SSL but skip chain & host verification
+  # insecure_skip_verify = false
 ```
 
-For authenticated mongodb istances use connection mongdb connection URI
+For authenticated mongodb instances use `mongodb://` connection URI
 
 ```toml
 [[inputs.mongodb]]
diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go
index 0bf822a4c..a80b94690 100644
--- a/plugins/inputs/mongodb/mongodb.go
+++ b/plugins/inputs/mongodb/mongodb.go
@@ -10,6 +10,7 @@ import (
 	"time"
 
 	"github.com/influxdata/telegraf"
+	"github.com/influxdata/telegraf/internal"
 	"github.com/influxdata/telegraf/internal/errchan"
 	"github.com/influxdata/telegraf/plugins/inputs"
 	"gopkg.in/mgo.v2"
@@ -20,6 +21,15 @@ type MongoDB struct {
 	Ssl              Ssl
 	mongos           map[string]*Server
 	GatherPerdbStats bool
+
+	// Path to CA file
+	SSLCA string `toml:"ssl_ca"`
+	// Path to host cert file
+	SSLCert string `toml:"ssl_cert"`
+	// Path to cert key file
+	SSLKey string `toml:"ssl_key"`
+	// Use SSL but skip chain & host verification
+	InsecureSkipVerify bool
 }
 
 type Ssl struct {
@@ -35,6 +45,13 @@ var sampleConfig = `
   ##   10.0.0.1:10000, etc.
   servers = ["127.0.0.1:27017"]
   gather_perdb_stats = false
+
+  ## Optional SSL Config
+  # ssl_ca = "/etc/telegraf/ca.pem"
+  # ssl_cert = "/etc/telegraf/cert.pem"
+  # ssl_key = "/etc/telegraf/key.pem"
+  ## Use SSL but skip chain & host verification
+  # insecure_skip_verify = false
 `
 
 func (m *MongoDB) SampleConfig() string {
@@ -105,8 +122,11 @@ func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error {
 		dialInfo.Direct = true
 		dialInfo.Timeout = 5 * time.Second
 
+		var tlsConfig *tls.Config
+
 		if m.Ssl.Enabled {
-			tlsConfig := &tls.Config{}
+			// Deprecated SSL config
+			tlsConfig = &tls.Config{}
 			if len(m.Ssl.CaCerts) > 0 {
 				roots := x509.NewCertPool()
 				for _, caCert := range m.Ssl.CaCerts {
@@ -119,6 +139,13 @@ func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error {
 			} else {
 				tlsConfig.InsecureSkipVerify = true
 			}
+		} else {
+			tlsConfig, err = internal.GetTLSConfig(
+				m.SSLCert, m.SSLKey, m.SSLCA, m.InsecureSkipVerify)
+		}
+
+		// If configured to use TLS, add a dial function
+		if tlsConfig != nil {
 			dialInfo.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) {
 				conn, err := tls.Dial("tcp", addr.String(), tlsConfig)
 				if err != nil {

From 426182b81a3b78164212e3b3e9ebab0d89023934 Mon Sep 17 00:00:00 2001
From: Antoine Augusti 
Date: Wed, 15 Mar 2017 23:20:18 +0100
Subject: [PATCH 0155/1302] Update default value for Cloudwatch rate limit
 (#2520)

---
 CHANGELOG.md                                 | 1 +
 plugins/inputs/cloudwatch/README.md          | 7 ++++---
 plugins/inputs/cloudwatch/cloudwatch.go      | 9 +++++----
 plugins/inputs/cloudwatch/cloudwatch_test.go | 4 ++--
 4 files changed, 12 insertions(+), 9 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 20a036555..ea1ccca4d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -79,6 +79,7 @@ be deprecated eventually.
 - [#2483](https://github.com/influxdata/telegraf/pull/2483): Fix win_perf_counters capping values at 100.
 - [#2498](https://github.com/influxdata/telegraf/pull/2498): Exporting Ipmi.Path to be set by config.
 - [#2500](https://github.com/influxdata/telegraf/pull/2500): Remove warning if parse empty content
+- [#2520](https://github.com/influxdata/telegraf/pull/2520): Update default value for Cloudwatch rate limit
 - [#2513](https://github.com/influxdata/telegraf/issues/2513): create /etc/telegraf/telegraf.d directory in tarball.
 
 ## v1.2.1 [2017-02-01]
diff --git a/plugins/inputs/cloudwatch/README.md b/plugins/inputs/cloudwatch/README.md
index 643e18c3b..3a3c708a2 100644
--- a/plugins/inputs/cloudwatch/README.md
+++ b/plugins/inputs/cloudwatch/README.md
@@ -42,9 +42,10 @@ API endpoint. In the following order the plugin will attempt to authenticate.
   namespace = "AWS/ELB"
 
   ## Maximum requests per second. Note that the global default AWS rate limit is
-  ## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
-  ## maximum of 10. Optional - default value is 10.
-  ratelimit = 10
+  ## 400 reqs/sec, so if you define multiple namespaces, these should add up to a
+  ## maximum of 400. Optional - default value is 200.
+  ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
+  ratelimit = 200
 
   ## Metrics to Pull (optional)
   ## Defaults to all Metrics in Namespace if nothing is provided
diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go
index a812c1265..f0a067001 100644
--- a/plugins/inputs/cloudwatch/cloudwatch.go
+++ b/plugins/inputs/cloudwatch/cloudwatch.go
@@ -105,9 +105,10 @@ func (c *CloudWatch) SampleConfig() string {
   namespace = "AWS/ELB"
 
   ## Maximum requests per second. Note that the global default AWS rate limit is
-  ## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
-  ## maximum of 10. Optional - default value is 10.
-  ratelimit = 10
+  ## 400 reqs/sec, so if you define multiple namespaces, these should add up to a
+  ## maximum of 400. Optional - default value is 200.
+  ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
+  ratelimit = 200
 
   ## Metrics to Pull (optional)
   ## Defaults to all Metrics in Namespace if nothing is provided
@@ -214,7 +215,7 @@ func init() {
 		ttl, _ := time.ParseDuration("1hr")
 		return &CloudWatch{
 			CacheTTL:  internal.Duration{Duration: ttl},
-			RateLimit: 10,
+			RateLimit: 200,
 		}
 	})
 }
diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go
index a1bd7464b..f2d58a00c 100644
--- a/plugins/inputs/cloudwatch/cloudwatch_test.go
+++ b/plugins/inputs/cloudwatch/cloudwatch_test.go
@@ -58,7 +58,7 @@ func TestGather(t *testing.T) {
 		Namespace: "AWS/ELB",
 		Delay:     internalDuration,
 		Period:    internalDuration,
-		RateLimit: 10,
+		RateLimit: 200,
 	}
 
 	var acc testutil.Accumulator
@@ -146,7 +146,7 @@ func TestSelectMetrics(t *testing.T) {
 		Namespace: "AWS/ELB",
 		Delay:     internalDuration,
 		Period:    internalDuration,
-		RateLimit: 10,
+		RateLimit: 200,
 		Metrics: []*Metric{
 			&Metric{
 				MetricNames: []string{"Latency", "RequestCount"},

From 8514acdc3cbdcedce660017b6a55068743710d2e Mon Sep 17 00:00:00 2001
From: Patrick Hemmer 
Date: Fri, 17 Mar 2017 13:14:03 -0400
Subject: [PATCH 0156/1302] return error on unsupported serializer data format
 (#2542)

---
 CHANGELOG.md                    | 1 +
 plugins/serializers/registry.go | 4 ++++
 2 files changed, 5 insertions(+)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index ea1ccca4d..5ef7f580b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -81,6 +81,7 @@ be deprecated eventually.
 - [#2500](https://github.com/influxdata/telegraf/pull/2500): Remove warning if parse empty content
 - [#2520](https://github.com/influxdata/telegraf/pull/2520): Update default value for Cloudwatch rate limit
 - [#2513](https://github.com/influxdata/telegraf/issues/2513): create /etc/telegraf/telegraf.d directory in tarball.
+- [#2541](https://github.com/influxdata/telegraf/issues/2541): Return error on unsupported serializer data format.
 
 ## v1.2.1 [2017-02-01]
 
diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go
index 83be4900b..cb1e03b46 100644
--- a/plugins/serializers/registry.go
+++ b/plugins/serializers/registry.go
@@ -1,6 +1,8 @@
 package serializers
 
 import (
+	"fmt"
+
 	"github.com/influxdata/telegraf"
 
 	"github.com/influxdata/telegraf/plugins/serializers/graphite"
@@ -49,6 +51,8 @@ func NewSerializer(config *Config) (Serializer, error) {
 		serializer, err = NewGraphiteSerializer(config.Prefix, config.Template)
 	case "json":
 		serializer, err = NewJsonSerializer()
+	default:
+		err = fmt.Errorf("Invalid data format: %s", config.DataFormat)
 	}
 	return serializer, err
 }

From a962e958ebf64118cdd48fd3d6ff1583a56c7702 Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Fri, 17 Mar 2017 16:49:11 -0700
Subject: [PATCH 0157/1302] Refactor procstat input (#2540)

fixes #1636
fixes #2315
---
 plugins/inputs/procstat/pgrep.go          |  91 +++++++
 plugins/inputs/procstat/process.go        |  60 +++++
 plugins/inputs/procstat/procstat.go       | 312 +++++++++++-----------
 plugins/inputs/procstat/procstat_test.go  | 299 +++++++++++++++++++--
 plugins/inputs/procstat/spec_processor.go | 110 --------
 testutil/accumulator.go                   |  23 ++
 6 files changed, 608 insertions(+), 287 deletions(-)
 create mode 100644 plugins/inputs/procstat/pgrep.go
 create mode 100644 plugins/inputs/procstat/process.go
 delete mode 100644 plugins/inputs/procstat/spec_processor.go

diff --git a/plugins/inputs/procstat/pgrep.go b/plugins/inputs/procstat/pgrep.go
new file mode 100644
index 000000000..bae5161e4
--- /dev/null
+++ b/plugins/inputs/procstat/pgrep.go
@@ -0,0 +1,91 @@
+package procstat
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os/exec"
+	"strconv"
+	"strings"
+)
+
+type PIDFinder interface {
+	PidFile(path string) ([]PID, error)
+	Pattern(pattern string) ([]PID, error)
+	Uid(user string) ([]PID, error)
+	FullPattern(path string) ([]PID, error)
+}
+
+// Implemention of PIDGatherer that execs pgrep to find processes
+type Pgrep struct {
+	path string
+}
+
+func NewPgrep() (PIDFinder, error) {
+	path, err := exec.LookPath("pgrep")
+	if err != nil {
+		return nil, fmt.Errorf("Could not find pgrep binary: %s", err)
+	}
+	return &Pgrep{path}, nil
+}
+
+func (pg *Pgrep) PidFile(path string) ([]PID, error) {
+	var pids []PID
+	pidString, err := ioutil.ReadFile(path)
+	if err != nil {
+		return pids, fmt.Errorf("Failed to read pidfile '%s'. Error: '%s'",
+			path, err)
+	}
+	pid, err := strconv.Atoi(strings.TrimSpace(string(pidString)))
+	if err != nil {
+		return pids, err
+	}
+	pids = append(pids, PID(pid))
+	return pids, nil
+}
+
+func (pg *Pgrep) Pattern(pattern string) ([]PID, error) {
+	args := []string{pattern}
+	return find(pg.path, args)
+}
+
+func (pg *Pgrep) Uid(user string) ([]PID, error) {
+	args := []string{"-u", user}
+	return find(pg.path, args)
+}
+
+func (pg *Pgrep) FullPattern(pattern string) ([]PID, error) {
+	args := []string{"-f", pattern}
+	return find(pg.path, args)
+}
+
+func find(path string, args []string) ([]PID, error) {
+	out, err := run(path, args)
+	if err != nil {
+		return nil, err
+	}
+
+	return parseOutput(out)
+}
+
+func run(path string, args []string) (string, error) {
+	out, err := exec.Command(path, args...).Output()
+	if err != nil {
+		return "", fmt.Errorf("Error running %s: %s", path, err)
+	}
+	return string(out), err
+}
+
+func parseOutput(out string) ([]PID, error) {
+	pids := []PID{}
+	fields := strings.Fields(out)
+	for _, field := range fields {
+		pid, err := strconv.Atoi(field)
+		if err != nil {
+			return nil, err
+		}
+		if err == nil {
+			pids = append(pids, PID(pid))
+		}
+	}
+	return pids, nil
+}
diff --git a/plugins/inputs/procstat/process.go b/plugins/inputs/procstat/process.go
new file mode 100644
index 000000000..ec2363f6e
--- /dev/null
+++ b/plugins/inputs/procstat/process.go
@@ -0,0 +1,60 @@
+package procstat
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/shirou/gopsutil/cpu"
+	"github.com/shirou/gopsutil/process"
+)
+
+type Process interface {
+	PID() PID
+	Tags() map[string]string
+
+	IOCounters() (*process.IOCountersStat, error)
+	MemoryInfo() (*process.MemoryInfoStat, error)
+	Name() (string, error)
+	NumCtxSwitches() (*process.NumCtxSwitchesStat, error)
+	NumFDs() (int32, error)
+	NumThreads() (int32, error)
+	Percent(interval time.Duration) (float64, error)
+	Times() (*cpu.TimesStat, error)
+}
+
+type Proc struct {
+	hasCPUTimes bool
+	tags        map[string]string
+	*process.Process
+}
+
+func NewProc(pid PID) (Process, error) {
+	process, err := process.NewProcess(int32(pid))
+	if err != nil {
+		return nil, err
+	}
+
+	proc := &Proc{
+		Process:     process,
+		hasCPUTimes: false,
+		tags:        make(map[string]string),
+	}
+	return proc, nil
+}
+
+func (p *Proc) Tags() map[string]string {
+	return p.tags
+}
+
+func (p *Proc) PID() PID {
+	return PID(p.Process.Pid)
+}
+
+func (p *Proc) Percent(interval time.Duration) (float64, error) {
+	cpu_perc, err := p.Process.Percent(time.Duration(0))
+	if !p.hasCPUTimes && err == nil {
+		p.hasCPUTimes = true
+		return 0, fmt.Errorf("Must call Percent twice to compute percent cpu.")
+	}
+	return cpu_perc, err
+}
diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go
index 929490e4a..46b88fbcf 100644
--- a/plugins/inputs/procstat/procstat.go
+++ b/plugins/inputs/procstat/procstat.go
@@ -2,18 +2,20 @@ package procstat
 
 import (
 	"fmt"
-	"io/ioutil"
-	"log"
-	"os/exec"
 	"strconv"
-	"strings"
-
-	"github.com/shirou/gopsutil/process"
+	"time"
 
 	"github.com/influxdata/telegraf"
 	"github.com/influxdata/telegraf/plugins/inputs"
 )
 
+var (
+	defaultPIDFinder = NewPgrep
+	defaultProcess   = NewProc
+)
+
+type PID int32
+
 type Procstat struct {
 	PidFile     string `toml:"pid_file"`
 	Exe         string
@@ -23,17 +25,10 @@ type Procstat struct {
 	User        string
 	PidTag      bool
 
-	// pidmap maps a pid to a process object, so we don't recreate every gather
-	pidmap map[int32]*process.Process
-	// tagmap maps a pid to a map of tags for that pid
-	tagmap map[int32]map[string]string
-}
-
-func NewProcstat() *Procstat {
-	return &Procstat{
-		pidmap: make(map[int32]*process.Process),
-		tagmap: make(map[int32]map[string]string),
-	}
+	pidFinder       PIDFinder
+	createPIDFinder func() (PIDFinder, error)
+	procs           map[PID]Process
+	createProcess   func(PID) (Process, error)
 }
 
 var sampleConfig = `
@@ -67,174 +62,179 @@ func (_ *Procstat) Description() string {
 }
 
 func (p *Procstat) Gather(acc telegraf.Accumulator) error {
-	err := p.createProcesses()
+	procs, err := p.updateProcesses(p.procs)
 	if err != nil {
-		log.Printf("E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s",
+		return fmt.Errorf(
+			"E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s",
 			p.Exe, p.PidFile, p.Pattern, p.User, err.Error())
-	} else {
-		for pid, proc := range p.pidmap {
-			if p.PidTag {
-				p.tagmap[pid]["pid"] = fmt.Sprint(pid)
-			}
-			p := NewSpecProcessor(p.ProcessName, p.Prefix, pid, acc, proc, p.tagmap[pid])
-			p.pushMetrics()
-		}
+	}
+	p.procs = procs
+
+	for _, proc := range p.procs {
+		p.addMetrics(proc, acc)
 	}
 
 	return nil
 }
 
-func (p *Procstat) createProcesses() error {
-	var errstring string
-	var outerr error
-
-	pids, err := p.getAllPids()
-	if err != nil {
-		errstring += err.Error() + " "
+// Add metrics a single Process
+func (p *Procstat) addMetrics(proc Process, acc telegraf.Accumulator) {
+	var prefix string
+	if p.Prefix != "" {
+		prefix = p.Prefix + "_"
 	}
 
-	for _, pid := range pids {
-		_, ok := p.pidmap[pid]
-		if !ok {
-			proc, err := process.NewProcess(pid)
-			if err == nil {
-				p.pidmap[pid] = proc
-			} else {
-				errstring += err.Error() + " "
-			}
+	fields := map[string]interface{}{}
+
+	//If process_name tag is not already set, set to actual name
+	if _, nameInTags := proc.Tags()["process_name"]; !nameInTags {
+		name, err := proc.Name()
+		if err == nil {
+			proc.Tags()["process_name"] = name
 		}
 	}
 
-	if errstring != "" {
-		outerr = fmt.Errorf("%s", errstring)
+	//If pid is not present as a tag, include it as a field.
+	if _, pidInTags := proc.Tags()["pid"]; !pidInTags {
+		fields["pid"] = int32(proc.PID())
 	}
 
-	return outerr
+	numThreads, err := proc.NumThreads()
+	if err == nil {
+		fields[prefix+"num_threads"] = numThreads
+	}
+
+	fds, err := proc.NumFDs()
+	if err == nil {
+		fields[prefix+"num_fds"] = fds
+	}
+
+	ctx, err := proc.NumCtxSwitches()
+	if err == nil {
+		fields[prefix+"voluntary_context_switches"] = ctx.Voluntary
+		fields[prefix+"involuntary_context_switches"] = ctx.Involuntary
+	}
+
+	io, err := proc.IOCounters()
+	if err == nil {
+		fields[prefix+"read_count"] = io.ReadCount
+		fields[prefix+"write_count"] = io.WriteCount
+		fields[prefix+"read_bytes"] = io.ReadBytes
+		fields[prefix+"write_bytes"] = io.WriteBytes
+	}
+
+	cpu_time, err := proc.Times()
+	if err == nil {
+		fields[prefix+"cpu_time_user"] = cpu_time.User
+		fields[prefix+"cpu_time_system"] = cpu_time.System
+		fields[prefix+"cpu_time_idle"] = cpu_time.Idle
+		fields[prefix+"cpu_time_nice"] = cpu_time.Nice
+		fields[prefix+"cpu_time_iowait"] = cpu_time.Iowait
+		fields[prefix+"cpu_time_irq"] = cpu_time.Irq
+		fields[prefix+"cpu_time_soft_irq"] = cpu_time.Softirq
+		fields[prefix+"cpu_time_steal"] = cpu_time.Steal
+		fields[prefix+"cpu_time_stolen"] = cpu_time.Stolen
+		fields[prefix+"cpu_time_guest"] = cpu_time.Guest
+		fields[prefix+"cpu_time_guest_nice"] = cpu_time.GuestNice
+	}
+
+	cpu_perc, err := proc.Percent(time.Duration(0))
+	if err == nil {
+		fields[prefix+"cpu_usage"] = cpu_perc
+	}
+
+	mem, err := proc.MemoryInfo()
+	if err == nil {
+		fields[prefix+"memory_rss"] = mem.RSS
+		fields[prefix+"memory_vms"] = mem.VMS
+		fields[prefix+"memory_swap"] = mem.Swap
+	}
+
+	acc.AddFields("procstat", fields, proc.Tags())
 }
 
-func (p *Procstat) getAllPids() ([]int32, error) {
-	var pids []int32
+// Update monitored Processes
+func (p *Procstat) updateProcesses(prevInfo map[PID]Process) (map[PID]Process, error) {
+	pids, tags, err := p.findPids()
+	if err != nil {
+		return nil, err
+	}
+
+	procs := make(map[PID]Process, len(prevInfo))
+
+	for _, pid := range pids {
+		info, ok := prevInfo[pid]
+		if ok {
+			procs[pid] = info
+		} else {
+			proc, err := p.createProcess(pid)
+			if err != nil {
+				// No problem; process may have ended after we found it
+				continue
+			}
+			procs[pid] = proc
+
+			// Add initial tags
+			for k, v := range tags {
+				proc.Tags()[k] = v
+			}
+
+			// Add pid tag if needed
+			if p.PidTag {
+				proc.Tags()["pid"] = strconv.Itoa(int(pid))
+			}
+			if p.ProcessName != "" {
+				proc.Tags()["process_name"] = p.ProcessName
+			}
+		}
+	}
+	return procs, nil
+}
+
+// Create and return PIDGatherer lazily
+func (p *Procstat) getPIDFinder() (PIDFinder, error) {
+	if p.pidFinder == nil {
+		f, err := p.createPIDFinder()
+		if err != nil {
+			return nil, err
+		}
+		p.pidFinder = f
+	}
+	return p.pidFinder, nil
+}
+
+// Get matching PIDs and their initial tags
+func (p *Procstat) findPids() ([]PID, map[string]string, error) {
+	var pids []PID
+	var tags map[string]string
 	var err error
 
+	f, err := p.getPIDFinder()
+	if err != nil {
+		return nil, nil, err
+	}
+
 	if p.PidFile != "" {
-		pids, err = p.pidsFromFile()
+		pids, err = f.PidFile(p.PidFile)
+		tags = map[string]string{"pidfile": p.PidFile}
 	} else if p.Exe != "" {
-		pids, err = p.pidsFromExe()
+		pids, err = f.Pattern(p.Exe)
+		tags = map[string]string{"exe": p.Exe}
 	} else if p.Pattern != "" {
-		pids, err = p.pidsFromPattern()
+		pids, err = f.FullPattern(p.Pattern)
+		tags = map[string]string{"pattern": p.Pattern}
 	} else if p.User != "" {
-		pids, err = p.pidsFromUser()
+		pids, err = f.Uid(p.User)
+		tags = map[string]string{"user": p.User}
 	} else {
 		err = fmt.Errorf("Either exe, pid_file, user, or pattern has to be specified")
 	}
 
-	return pids, err
-}
-
-func (p *Procstat) pidsFromFile() ([]int32, error) {
-	var out []int32
-	var outerr error
-	pidString, err := ioutil.ReadFile(p.PidFile)
-	if err != nil {
-		outerr = fmt.Errorf("Failed to read pidfile '%s'. Error: '%s'",
-			p.PidFile, err)
-	} else {
-		pid, err := strconv.Atoi(strings.TrimSpace(string(pidString)))
-		if err != nil {
-			outerr = err
-		} else {
-			out = append(out, int32(pid))
-			p.tagmap[int32(pid)] = map[string]string{
-				"pidfile": p.PidFile,
-			}
-		}
-	}
-	return out, outerr
-}
-
-func (p *Procstat) pidsFromExe() ([]int32, error) {
-	var out []int32
-	var outerr error
-	bin, err := exec.LookPath("pgrep")
-	if err != nil {
-		return out, fmt.Errorf("Couldn't find pgrep binary: %s", err)
-	}
-	pgrep, err := exec.Command(bin, p.Exe).Output()
-	if err != nil {
-		return out, fmt.Errorf("Failed to execute %s. Error: '%s'", bin, err)
-	} else {
-		pids := strings.Fields(string(pgrep))
-		for _, pid := range pids {
-			ipid, err := strconv.Atoi(pid)
-			if err == nil {
-				out = append(out, int32(ipid))
-				p.tagmap[int32(ipid)] = map[string]string{
-					"exe": p.Exe,
-				}
-			} else {
-				outerr = err
-			}
-		}
-	}
-	return out, outerr
-}
-
-func (p *Procstat) pidsFromPattern() ([]int32, error) {
-	var out []int32
-	var outerr error
-	bin, err := exec.LookPath("pgrep")
-	if err != nil {
-		return out, fmt.Errorf("Couldn't find pgrep binary: %s", err)
-	}
-	pgrep, err := exec.Command(bin, "-f", p.Pattern).Output()
-	if err != nil {
-		return out, fmt.Errorf("Failed to execute %s. Error: '%s'", bin, err)
-	} else {
-		pids := strings.Fields(string(pgrep))
-		for _, pid := range pids {
-			ipid, err := strconv.Atoi(pid)
-			if err == nil {
-				out = append(out, int32(ipid))
-				p.tagmap[int32(ipid)] = map[string]string{
-					"pattern": p.Pattern,
-				}
-			} else {
-				outerr = err
-			}
-		}
-	}
-	return out, outerr
-}
-
-func (p *Procstat) pidsFromUser() ([]int32, error) {
-	var out []int32
-	var outerr error
-	bin, err := exec.LookPath("pgrep")
-	if err != nil {
-		return out, fmt.Errorf("Couldn't find pgrep binary: %s", err)
-	}
-	pgrep, err := exec.Command(bin, "-u", p.User).Output()
-	if err != nil {
-		return out, fmt.Errorf("Failed to execute %s. Error: '%s'", bin, err)
-	} else {
-		pids := strings.Fields(string(pgrep))
-		for _, pid := range pids {
-			ipid, err := strconv.Atoi(pid)
-			if err == nil {
-				out = append(out, int32(ipid))
-				p.tagmap[int32(ipid)] = map[string]string{
-					"user": p.User,
-				}
-			} else {
-				outerr = err
-			}
-		}
-	}
-	return out, outerr
+	return pids, tags, err
 }
 
 func init() {
 	inputs.Add("procstat", func() telegraf.Input {
-		return NewProcstat()
+		return &Procstat{}
 	})
 }
diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go
index ccc72bdbb..1f6f27642 100644
--- a/plugins/inputs/procstat/procstat_test.go
+++ b/plugins/inputs/procstat/procstat_test.go
@@ -1,33 +1,290 @@
 package procstat
 
 import (
-	"io/ioutil"
+	"fmt"
 	"os"
-	"strconv"
 	"testing"
+	"time"
 
+	"github.com/influxdata/telegraf/testutil"
+	"github.com/shirou/gopsutil/cpu"
 	"github.com/shirou/gopsutil/process"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
-
-	"github.com/influxdata/telegraf/testutil"
 )
 
-func TestGather(t *testing.T) {
-	var acc testutil.Accumulator
-	pid := os.Getpid()
-	file, err := ioutil.TempFile(os.TempDir(), "telegraf")
-	require.NoError(t, err)
-	file.Write([]byte(strconv.Itoa(pid)))
-	file.Close()
-	defer os.Remove(file.Name())
-	p := Procstat{
-		PidFile: file.Name(),
-		Prefix:  "foo",
-		pidmap:  make(map[int32]*process.Process),
-		tagmap:  make(map[int32]map[string]string),
-	}
-	p.Gather(&acc)
-	assert.True(t, acc.HasFloatField("procstat", "foo_cpu_time_user"))
-	assert.True(t, acc.HasUIntField("procstat", "foo_memory_vms"))
+type testPgrep struct {
+	pids []PID
+	err  error
+}
+
+func pidFinder(pids []PID, err error) func() (PIDFinder, error) {
+	return func() (PIDFinder, error) {
+		return &testPgrep{
+			pids: pids,
+			err:  err,
+		}, nil
+	}
+}
+
+func (pg *testPgrep) PidFile(path string) ([]PID, error) {
+	return pg.pids, pg.err
+}
+
+func (pg *testPgrep) Pattern(pattern string) ([]PID, error) {
+	return pg.pids, pg.err
+}
+
+func (pg *testPgrep) Uid(user string) ([]PID, error) {
+	return pg.pids, pg.err
+}
+
+func (pg *testPgrep) FullPattern(pattern string) ([]PID, error) {
+	return pg.pids, pg.err
+}
+
+type testProc struct {
+	pid  PID
+	tags map[string]string
+}
+
+func newTestProc(pid PID) (Process, error) {
+	proc := &testProc{
+		tags: make(map[string]string),
+	}
+	return proc, nil
+}
+
+func (p *testProc) PID() PID {
+	return p.pid
+}
+
+func (p *testProc) Tags() map[string]string {
+	return p.tags
+}
+
+func (p *testProc) IOCounters() (*process.IOCountersStat, error) {
+	return &process.IOCountersStat{}, nil
+}
+
+func (p *testProc) MemoryInfo() (*process.MemoryInfoStat, error) {
+	return &process.MemoryInfoStat{}, nil
+}
+
+func (p *testProc) Name() (string, error) {
+	return "test_proc", nil
+}
+
+func (p *testProc) NumCtxSwitches() (*process.NumCtxSwitchesStat, error) {
+	return &process.NumCtxSwitchesStat{}, nil
+}
+
+func (p *testProc) NumFDs() (int32, error) {
+	return 0, nil
+}
+
+func (p *testProc) NumThreads() (int32, error) {
+	return 0, nil
+}
+
+func (p *testProc) Percent(interval time.Duration) (float64, error) {
+	return 0, nil
+}
+
+func (p *testProc) Times() (*cpu.TimesStat, error) {
+	return &cpu.TimesStat{}, nil
+}
+
+var pid PID = PID(42)
+var exe string = "foo"
+
+func TestGather_CreateProcessErrorOk(t *testing.T) {
+	var acc testutil.Accumulator
+
+	p := Procstat{
+		Exe:             exe,
+		createPIDFinder: pidFinder([]PID{pid}, nil),
+		createProcess: func(PID) (Process, error) {
+			return nil, fmt.Errorf("createProcess error")
+		},
+	}
+	require.NoError(t, p.Gather(&acc))
+}
+
+func TestGather_CreatePIDFinderError(t *testing.T) {
+	var acc testutil.Accumulator
+
+	p := Procstat{
+		createPIDFinder: func() (PIDFinder, error) {
+			return nil, fmt.Errorf("createPIDFinder error")
+		},
+		createProcess: newTestProc,
+	}
+	require.Error(t, p.Gather(&acc))
+}
+
+func TestGather_ProcessName(t *testing.T) {
+	var acc testutil.Accumulator
+
+	p := Procstat{
+		Exe:             exe,
+		ProcessName:     "custom_name",
+		createPIDFinder: pidFinder([]PID{pid}, nil),
+		createProcess:   newTestProc,
+	}
+	require.NoError(t, p.Gather(&acc))
+
+	assert.Equal(t, "custom_name", acc.TagValue("procstat", "process_name"))
+}
+
+func TestGather_NoProcessNameUsesReal(t *testing.T) {
+	var acc testutil.Accumulator
+	pid := PID(os.Getpid())
+
+	p := Procstat{
+		Exe:             exe,
+		createPIDFinder: pidFinder([]PID{pid}, nil),
+		createProcess:   newTestProc,
+	}
+	require.NoError(t, p.Gather(&acc))
+
+	assert.True(t, acc.HasTag("procstat", "process_name"))
+}
+
+func TestGather_NoPidTag(t *testing.T) {
+	var acc testutil.Accumulator
+
+	p := Procstat{
+		Exe:             exe,
+		createPIDFinder: pidFinder([]PID{pid}, nil),
+		createProcess:   newTestProc,
+	}
+	require.NoError(t, p.Gather(&acc))
+	assert.True(t, acc.HasInt32Field("procstat", "pid"))
+	assert.False(t, acc.HasTag("procstat", "pid"))
+}
+
+func TestGather_PidTag(t *testing.T) {
+	var acc testutil.Accumulator
+
+	p := Procstat{
+		Exe:             exe,
+		PidTag:          true,
+		createPIDFinder: pidFinder([]PID{pid}, nil),
+		createProcess:   newTestProc,
+	}
+	require.NoError(t, p.Gather(&acc))
+	assert.Equal(t, "42", acc.TagValue("procstat", "pid"))
+	assert.False(t, acc.HasInt32Field("procstat", "pid"))
+}
+
+func TestGather_Prefix(t *testing.T) {
+	var acc testutil.Accumulator
+
+	p := Procstat{
+		Exe:             exe,
+		Prefix:          "custom_prefix",
+		createPIDFinder: pidFinder([]PID{pid}, nil),
+		createProcess:   newTestProc,
+	}
+	require.NoError(t, p.Gather(&acc))
+	assert.True(t, acc.HasInt32Field("procstat", "custom_prefix_num_fds"))
+}
+
+func TestGather_Exe(t *testing.T) {
+	var acc testutil.Accumulator
+
+	p := Procstat{
+		Exe:             exe,
+		createPIDFinder: pidFinder([]PID{pid}, nil),
+		createProcess:   newTestProc,
+	}
+	require.NoError(t, p.Gather(&acc))
+
+	assert.Equal(t, exe, acc.TagValue("procstat", "exe"))
+}
+
+func TestGather_User(t *testing.T) {
+	var acc testutil.Accumulator
+	user := "ada"
+
+	p := Procstat{
+		User:            user,
+		createPIDFinder: pidFinder([]PID{pid}, nil),
+		createProcess:   newTestProc,
+	}
+	require.NoError(t, p.Gather(&acc))
+
+	assert.Equal(t, user, acc.TagValue("procstat", "user"))
+}
+
+func TestGather_Pattern(t *testing.T) {
+	var acc testutil.Accumulator
+	pattern := "foo"
+
+	p := Procstat{
+		Pattern:         pattern,
+		createPIDFinder: pidFinder([]PID{pid}, nil),
+		createProcess:   newTestProc,
+	}
+	require.NoError(t, p.Gather(&acc))
+
+	assert.Equal(t, pattern, acc.TagValue("procstat", "pattern"))
+}
+
+func TestGather_MissingPidMethod(t *testing.T) {
+	var acc testutil.Accumulator
+
+	p := Procstat{
+		createPIDFinder: pidFinder([]PID{pid}, nil),
+		createProcess:   newTestProc,
+	}
+	require.Error(t, p.Gather(&acc))
+}
+
+func TestGather_PidFile(t *testing.T) {
+	var acc testutil.Accumulator
+	pidfile := "/path/to/pidfile"
+
+	p := Procstat{
+		PidFile:         pidfile,
+		createPIDFinder: pidFinder([]PID{pid}, nil),
+		createProcess:   newTestProc,
+	}
+	require.NoError(t, p.Gather(&acc))
+
+	assert.Equal(t, pidfile, acc.TagValue("procstat", "pidfile"))
+}
+
+func TestGather_PercentFirstPass(t *testing.T) {
+	var acc testutil.Accumulator
+	pid := PID(os.Getpid())
+
+	p := Procstat{
+		Pattern:         "foo",
+		PidTag:          true,
+		createPIDFinder: pidFinder([]PID{pid}, nil),
+		createProcess:   NewProc,
+	}
+	require.NoError(t, p.Gather(&acc))
+
+	assert.True(t, acc.HasFloatField("procstat", "cpu_time_user"))
+	assert.False(t, acc.HasFloatField("procstat", "cpu_usage"))
+}
+
+func TestGather_PercentSecondPass(t *testing.T) {
+	var acc testutil.Accumulator
+	pid := PID(os.Getpid())
+
+	p := Procstat{
+		Pattern:         "foo",
+		PidTag:          true,
+		createPIDFinder: pidFinder([]PID{pid}, nil),
+		createProcess:   NewProc,
+	}
+	require.NoError(t, p.Gather(&acc))
+	require.NoError(t, p.Gather(&acc))
+
+	assert.True(t, acc.HasFloatField("procstat", "cpu_time_user"))
+	assert.True(t, acc.HasFloatField("procstat", "cpu_usage"))
 }
diff --git a/plugins/inputs/procstat/spec_processor.go b/plugins/inputs/procstat/spec_processor.go
deleted file mode 100644
index 3b56fbc3e..000000000
--- a/plugins/inputs/procstat/spec_processor.go
+++ /dev/null
@@ -1,110 +0,0 @@
-package procstat
-
-import (
-	"time"
-
-	"github.com/shirou/gopsutil/process"
-
-	"github.com/influxdata/telegraf"
-)
-
-type SpecProcessor struct {
-	Prefix string
-	pid    int32
-	tags   map[string]string
-	fields map[string]interface{}
-	acc    telegraf.Accumulator
-	proc   *process.Process
-}
-
-func NewSpecProcessor(
-	processName string,
-	prefix string,
-	pid int32,
-	acc telegraf.Accumulator,
-	p *process.Process,
-	tags map[string]string,
-) *SpecProcessor {
-	if processName != "" {
-		tags["process_name"] = processName
-	} else {
-		name, err := p.Name()
-		if err == nil {
-			tags["process_name"] = name
-		}
-	}
-	return &SpecProcessor{
-		Prefix: prefix,
-		pid:    pid,
-		tags:   tags,
-		fields: make(map[string]interface{}),
-		acc:    acc,
-		proc:   p,
-	}
-}
-
-func (p *SpecProcessor) pushMetrics() {
-	var prefix string
-	if p.Prefix != "" {
-		prefix = p.Prefix + "_"
-	}
-	fields := map[string]interface{}{}
-
-	//If pid is not present as a tag, include it as a field.
-	if _, pidInTags := p.tags["pid"]; !pidInTags {
-		fields["pid"] = p.pid
-	}
-
-	numThreads, err := p.proc.NumThreads()
-	if err == nil {
-		fields[prefix+"num_threads"] = numThreads
-	}
-
-	fds, err := p.proc.NumFDs()
-	if err == nil {
-		fields[prefix+"num_fds"] = fds
-	}
-
-	ctx, err := p.proc.NumCtxSwitches()
-	if err == nil {
-		fields[prefix+"voluntary_context_switches"] = ctx.Voluntary
-		fields[prefix+"involuntary_context_switches"] = ctx.Involuntary
-	}
-
-	io, err := p.proc.IOCounters()
-	if err == nil {
-		fields[prefix+"read_count"] = io.ReadCount
-		fields[prefix+"write_count"] = io.WriteCount
-		fields[prefix+"read_bytes"] = io.ReadBytes
-		fields[prefix+"write_bytes"] = io.WriteBytes
-	}
-
-	cpu_time, err := p.proc.Times()
-	if err == nil {
-		fields[prefix+"cpu_time_user"] = cpu_time.User
-		fields[prefix+"cpu_time_system"] = cpu_time.System
-		fields[prefix+"cpu_time_idle"] = cpu_time.Idle
-		fields[prefix+"cpu_time_nice"] = cpu_time.Nice
-		fields[prefix+"cpu_time_iowait"] = cpu_time.Iowait
-		fields[prefix+"cpu_time_irq"] = cpu_time.Irq
-		fields[prefix+"cpu_time_soft_irq"] = cpu_time.Softirq
-		fields[prefix+"cpu_time_steal"] = cpu_time.Steal
-		fields[prefix+"cpu_time_stolen"] = cpu_time.Stolen
-		fields[prefix+"cpu_time_guest"] = cpu_time.Guest
-		fields[prefix+"cpu_time_guest_nice"] = cpu_time.GuestNice
-	}
-
-	cpu_perc, err := p.proc.Percent(time.Duration(0))
-	if err == nil && cpu_perc != 0 {
-		fields[prefix+"cpu_usage"] = cpu_perc
-	}
-
-	mem, err := p.proc.MemoryInfo()
-	if err == nil {
-		fields[prefix+"memory_rss"] = mem.RSS
-		fields[prefix+"memory_vms"] = mem.VMS
-		fields[prefix+"memory_swap"] = mem.Swap
-	}
-
-	p.acc.AddFields("procstat", fields, p.tags)
-}
diff --git a/testutil/accumulator.go b/testutil/accumulator.go
index 25e60920b..63dfddd7a 100644
--- a/testutil/accumulator.go
+++ b/testutil/accumulator.go
@@ -161,6 +161,29 @@ func (a *Accumulator) Get(measurement string) (*Metric, bool) {
 	return nil, false
 }
 
+func (a *Accumulator) HasTag(measurement string, key string) bool {
+	for _, p := range a.Metrics {
+		if p.Measurement == measurement {
+			_, ok := p.Tags[key]
+			return ok
+		}
+	}
+	return false
+}
+
+func (a *Accumulator) TagValue(measurement string, key string) string {
+	for _, p := range a.Metrics {
+		if p.Measurement == measurement {
+			v, ok := p.Tags[key]
+			if !ok {
+				return ""
+			}
+			return v
+		}
+	}
+	return ""
+}
+
 // NFields returns the total number of fields in the accumulator, across all
 // measurements
 func (a *Accumulator) NFields() int {

From bb28fb256b4595676714f22b2742738f96a184e5 Mon Sep 17 00:00:00 2001
From: Leandro Piccilli 
Date: Tue, 21 Mar 2017 01:47:57 +0100
Subject: [PATCH 0158/1302] Add Elasticsearch 5.x output (#2332)

---
 Godeps                                        |   1 +
 Makefile                                      |   6 +-
 README.md                                     |   1 +
 plugins/outputs/all/all.go                    |   1 +
 plugins/outputs/elasticsearch/README.md       | 218 +++++++++++++
 .../outputs/elasticsearch/elasticsearch.go    | 308 ++++++++++++++++++
 .../elasticsearch/elasticsearch_test.go       | 126 +++++++
 7 files changed, 659 insertions(+), 2 deletions(-)
 create mode 100644 plugins/outputs/elasticsearch/README.md
 create mode 100644 plugins/outputs/elasticsearch/elasticsearch.go
 create mode 100644 plugins/outputs/elasticsearch/elasticsearch_test.go

diff --git a/Godeps b/Godeps
index 2d0419ef6..6cbe9efa7 100644
--- a/Godeps
+++ b/Godeps
@@ -59,4 +59,5 @@ golang.org/x/text 506f9d5c962f284575e88337e7d9296d27e729d3
 gopkg.in/dancannon/gorethink.v1 edc7a6a68e2d8015f5ffe1b2560eed989f8a45be
 gopkg.in/fatih/pool.v2 6e328e67893eb46323ad06f0e92cb9536babbabc
 gopkg.in/mgo.v2 3f83fa5005286a7fe593b055f0d7771a7dce4655
+gopkg.in/olivere/elastic.v5 ee3ebceab960cf68ab9a89ee6d78c031ef5b4a4e
 gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6
diff --git a/Makefile b/Makefile
index 79276f887..d2bad656d 100644
--- a/Makefile
+++ b/Makefile
@@ -51,6 +51,7 @@ docker-run:
 		-e ADVERTISED_PORT=9092 \
 		-p "2181:2181" -p "9092:9092" \
 		-d spotify/kafka
+	docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5
 	docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql
 	docker run --name memcached -p "11211:11211" -d memcached
 	docker run --name postgres -p "5432:5432" -d postgres
@@ -69,6 +70,7 @@ docker-run-circle:
 		-e ADVERTISED_PORT=9092 \
 		-p "2181:2181" -p "9092:9092" \
 		-d spotify/kafka
+	docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5
 	docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
 	docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
 	docker run --name riemann -p "5555:5555" -d stealthly/docker-riemann
@@ -76,8 +78,8 @@ docker-run-circle:
 
 # Kill all docker containers, ignore errors
 docker-kill:
-	-docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
-	-docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
+	-docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats elasticsearch
+	-docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats elasticsearch
 
 # Run full unit tests using docker containers (includes setup and teardown)
 test: vet docker-kill docker-run
diff --git a/README.md b/README.md
index 915c7b761..906862714 100644
--- a/README.md
+++ b/README.md
@@ -211,6 +211,7 @@ Telegraf can also collect metrics via the following service plugins:
 * [aws cloudwatch](./plugins/outputs/cloudwatch)
 * [datadog](./plugins/outputs/datadog)
 * [discard](./plugins/outputs/discard)
+* [elasticsearch](./plugins/outputs/elasticsearch)
 * [file](./plugins/outputs/file)
 * [graphite](./plugins/outputs/graphite)
 * [graylog](./plugins/outputs/graylog)
diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go
index eec2b95e3..089a56909 100644
--- a/plugins/outputs/all/all.go
+++ b/plugins/outputs/all/all.go
@@ -6,6 +6,7 @@ import (
 	_ "github.com/influxdata/telegraf/plugins/outputs/cloudwatch"
 	_ "github.com/influxdata/telegraf/plugins/outputs/datadog"
 	_ "github.com/influxdata/telegraf/plugins/outputs/discard"
+	_ "github.com/influxdata/telegraf/plugins/outputs/elasticsearch"
 	_ "github.com/influxdata/telegraf/plugins/outputs/file"
 	_ "github.com/influxdata/telegraf/plugins/outputs/graphite"
 	_ "github.com/influxdata/telegraf/plugins/outputs/graylog"
diff --git a/plugins/outputs/elasticsearch/README.md b/plugins/outputs/elasticsearch/README.md
new file mode 100644
index 000000000..620d5a82c
--- /dev/null
+++ b/plugins/outputs/elasticsearch/README.md
@@ -0,0 +1,218 @@
+## Elasticsearch Output Plugin for Telegraf
+
+This plugin writes to [Elasticsearch](https://www.elastic.co) via HTTP using Elastic (http://olivere.github.io/elastic/).
+
+Currently it only supports Elasticsearch 5.x series.
+
+## Elasticsearch indexes and templates
+
+### Indexes per time-frame
+
+This plugin can manage indexes per time-frame, as commonly done in other tools with Elasticsearch.
+
+The timestamp of the metric collected will be used to decide the index destination.
+
+For more information about this usage on Elasticsearch, check https://www.elastic.co/guide/en/elasticsearch/guide/master/time-based.html#index-per-timeframe
+
+### Template management
+
+Index templates are used in Elasticsearch to define settings and mappings for the indexes and how the fields should be analyzed.
+For more information on how this works, see https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html
+
+This plugin can create a working template for use with telegraf metrics. It uses Elasticsearch dynamic templates feature to set proper types for the tags and metrics fields.
+If the template specified already exists, it will not overwrite unless you configure this plugin to do so. Thus you can customize this template after its creation if necessary.
+
+Example of an index template created by telegraf:
+
+```json
+{
+  "order": 0,
+  "template": "telegraf-*",
+  "settings": {
+    "index": {
+      "mapping": {
+        "total_fields": {
+          "limit": "5000"
+        }
+      },
+      "refresh_interval": "10s"
+    }
+  },
+  "mappings": {
+    "_default_": {
+      "dynamic_templates": [
+        {
+          "tags": {
+            "path_match": "tag.*",
+            "mapping": {
+              "ignore_above": 512,
+              "type": "keyword"
+            },
+            "match_mapping_type": "string"
+          }
+        },
+        {
+          "metrics_long": {
+            "mapping": {
+              "index": false,
+              "type": "float"
+            },
+            "match_mapping_type": "long"
+          }
+        },
+        {
+          "metrics_double": {
+            "mapping": {
+              "index": false,
+              "type": "float"
+            },
+            "match_mapping_type": "double"
+          }
+        },
+        {
+          "text_fields": {
+            "mapping": {
+              "norms": false
+            },
+            "match": "*"
+          }
+        }
+      ],
+      "_all": {
+        "enabled": false
+      },
+      "properties": {
+        "@timestamp": {
+          "type": "date"
+        },
+        "measurement_name": {
+          "type": "keyword"
+        }
+      }
+    }
+  },
+  "aliases": {}
+}
+
+```
+
+### Example events:
+
+This plugin will format the events in the following way:
+
+```json
+{
+  "@timestamp": "2017-01-01T00:00:00+00:00",
+  "measurement_name": "cpu",
+  "cpu": {
+    "usage_guest": 0,
+    "usage_guest_nice": 0,
+    "usage_idle": 71.85413456197966,
+    "usage_iowait": 0.256805341656516,
+    "usage_irq": 0,
+    "usage_nice": 0,
+    "usage_softirq": 0.2054442732579466,
+    "usage_steal": 0,
+    "usage_system": 15.04879301548127,
+    "usage_user": 12.634822807288275
+  },
+  "tag": {
+    "cpu": "cpu-total",
+    "host": "elastichost",
+    "dc": "datacenter1"
+  }
+}
+```
+
+```json
+{
+  "@timestamp": "2017-01-01T00:00:00+00:00",
+  "measurement_name": "system",
+  "system": {
+    "load1": 0.78,
+    "load15": 0.8,
+    "load5": 0.8,
+    "n_cpus": 2,
+    "n_users": 2
+  },
+  "tag": {
+    "host": "elastichost",
+    "dc": "datacenter1"
+  }
+}
+```
+
+### Configuration:
+
+```toml
+# Configuration for Elasticsearch to send metrics to.
+[[outputs.elasticsearch]]
+  ## The full HTTP endpoint URL for your Elasticsearch instance
+  ## Multiple urls can be specified as part of the same cluster,
+  ## this means that only ONE of the urls will be written to each interval.
+  urls = [ "http://node1.es.example.com:9200" ] # required.
+  ## Elasticsearch client timeout, defaults to "5s" if not set.
+  timeout = "5s"
+  ## Set to true to ask Elasticsearch a list of all cluster nodes,
+  ## thus it is not necessary to list all nodes in the urls config option
+  enable_sniffer = false
+  ## Set the interval to check if the Elasticsearch nodes are available
+  ## Setting to "0s" will disable the health check (not recommended in production)
+  health_check_interval = "10s"
+  ## HTTP basic authentication details (eg. when using Shield)
+  # username = "telegraf"
+  # password = "mypassword"
+
+  ## Index Config
+  ## The target index for metrics (Elasticsearch will create if it not exists).
+  ## You can use the date specifiers below to create indexes per time frame.
+  ## The metric timestamp will be used to decide the destination index name
+  # %Y - year (2016)
+  # %y - last two digits of year (00..99)
+  # %m - month (01..12)
+  # %d - day of month (e.g., 01)
+  # %H - hour (00..23)
+  index_name = "telegraf-%Y.%m.%d" # required.
+
+  ## Template Config
+  ## Set to true if you want telegraf to manage its index template.
+  ## If enabled it will create a recommended index template for telegraf indexes
+  manage_template = true
+  ## The template name used for telegraf indexes
+  template_name = "telegraf"
+  ## Set to true if you want telegraf to overwrite an existing template
+  overwrite_template = false
+```
+
+### Required parameters:
+
+* `urls`: A list containing the full HTTP URL of one or more nodes from your Elasticsearch instance.
+* `index_name`: The target index for metrics. You can use the date specifiers below to create indexes per time frame.
+
+```   %Y - year (2017)
+  %y - last two digits of year (00..99)
+  %m - month (01..12)
+  %d - day of month (e.g., 01)
+  %H - hour (00..23)
+```
+
+### Optional parameters:
+
+* `timeout`: Elasticsearch client timeout, defaults to "5s" if not set.
+* `enable_sniffer`: Set to true to ask Elasticsearch a list of all cluster nodes, thus it is not necessary to list all nodes in the urls config option.
+* `health_check_interval`: Set the interval to check if the nodes are available, in seconds. Setting to 0 will disable the health check (not recommended in production).
+* `username`: The username for HTTP basic authentication details (eg. when using Shield).
+* `password`: The password for HTTP basic authentication details (eg. when using Shield).
+* `manage_template`: Set to true if you want telegraf to manage its index template. If enabled it will create a recommended index template for telegraf indexes.
+* `template_name`: The template name used for telegraf indexes.
+* `overwrite_template`: Set to true if you want telegraf to overwrite an existing template.
+
+## Known issues
+
+Integer values collected that are bigger than 2^63 and smaller than 1e21 (or in this exact same window of their negative counterparts) are encoded by golang JSON encoder in decimal format and that is not fully supported by Elasticsearch dynamic field mapping. This causes the metrics with such values to be dropped in case a field mapping has not been created yet on the telegraf index. If that's the case you will see an exception on Elasticsearch side like this:
+
+```{"error":{"root_cause":[{"type":"mapper_parsing_exception","reason":"failed to parse"}],"type":"mapper_parsing_exception","reason":"failed to parse","caused_by":{"type":"illegal_state_exception","reason":"No matching token for number_type [BIG_INTEGER]"}},"status":400}```
+
+The correct field mapping will be created on the telegraf index as soon as a supported JSON value is received by Elasticsearch, and subsequent insertions will work because the field mapping will already exist. 
+
+This issue is caused by the way Elasticsearch tries to detect integer fields, and by how golang encodes numbers in JSON. There is no clear workaround for this at the moment.
\ No newline at end of file
diff --git a/plugins/outputs/elasticsearch/elasticsearch.go b/plugins/outputs/elasticsearch/elasticsearch.go
new file mode 100644
index 000000000..dbd359b90
--- /dev/null
+++ b/plugins/outputs/elasticsearch/elasticsearch.go
@@ -0,0 +1,308 @@
+package elasticsearch
+
+import (
+	"context"
+	"fmt"
+	"log"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/influxdata/telegraf"
+	"github.com/influxdata/telegraf/internal"
+	"github.com/influxdata/telegraf/plugins/outputs"
+	"gopkg.in/olivere/elastic.v5"
+)
+
+type Elasticsearch struct {
+	URLs                []string `toml:"urls"`
+	IndexName           string
+	Username            string
+	Password            string
+	EnableSniffer       bool
+	Timeout             internal.Duration
+	HealthCheckInterval internal.Duration
+	ManageTemplate      bool
+	TemplateName        string
+	OverwriteTemplate   bool
+	Client              *elastic.Client
+}
+
+var sampleConfig = `
+  ## The full HTTP endpoint URL for your Elasticsearch instance
+  ## Multiple urls can be specified as part of the same cluster,
+  ## this means that only ONE of the urls will be written to each interval.
+  urls = [ "http://node1.es.example.com:9200" ] # required.
+  ## Elasticsearch client timeout, defaults to "5s" if not set. 
+  timeout = "5s"
+  ## Set to true to ask Elasticsearch a list of all cluster nodes,
+  ## thus it is not necessary to list all nodes in the urls config option.
+  enable_sniffer = false
+  ## Set the interval to check if the Elasticsearch nodes are available
+  ## Setting to "0s" will disable the health check (not recommended in production)
+  health_check_interval = "10s"
+  ## HTTP basic authentication details (eg. when using Shield)
+  # username = "telegraf"
+  # password = "mypassword"
+
+  ## Index Config
+  ## The target index for metrics (Elasticsearch will create if it not exists).
+  ## You can use the date specifiers below to create indexes per time frame.
+  ## The metric timestamp will be used to decide the destination index name
+  # %Y - year (2016)
+  # %y - last two digits of year (00..99)
+  # %m - month (01..12)
+  # %d - day of month (e.g., 01)
+  # %H - hour (00..23)
+  index_name = "telegraf-%Y.%m.%d" # required.
+
+  ## Template Config
+  ## Set to true if you want telegraf to manage its index template.
+  ## If enabled it will create a recommended index template for telegraf indexes
+  manage_template = true
+  ## The template name used for telegraf indexes
+  template_name = "telegraf"
+  ## Set to true if you want telegraf to overwrite an existing template
+  overwrite_template = false
+`
+
+func (a *Elasticsearch) Connect() error {
+	if a.URLs == nil || a.IndexName == "" {
+		return fmt.Errorf("Elasticsearch urls or index_name is not defined")
+	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), a.Timeout.Duration)
+	defer cancel()
+
+	var clientOptions []elastic.ClientOptionFunc
+
+	clientOptions = append(clientOptions,
+		elastic.SetSniff(a.EnableSniffer),
+		elastic.SetURL(a.URLs...),
+		elastic.SetHealthcheckInterval(a.HealthCheckInterval.Duration),
+	)
+
+	if a.Username != "" && a.Password != "" {
+		clientOptions = append(clientOptions,
+			elastic.SetBasicAuth(a.Username, a.Password),
+		)
+	}
+
+	if a.HealthCheckInterval.Duration == 0 {
+		clientOptions = append(clientOptions,
+			elastic.SetHealthcheck(false),
+		)
+		log.Printf("D! Elasticsearch output: disabling health check")
+	}
+
+	client, err := elastic.NewClient(clientOptions...)
+
+	if err != nil {
+		return err
+	}
+
+	// check for ES version on first node
+	esVersion, err := client.ElasticsearchVersion(a.URLs[0])
+
+	if err != nil {
+		return fmt.Errorf("Elasticsearch version check failed: %s", err)
+	}
+
+	// quit if ES version is not supported
+	i, err := strconv.Atoi(strings.Split(esVersion, ".")[0])
+	if err != nil || i < 5 {
+		return fmt.Errorf("Elasticsearch version not supported: %s", esVersion)
+	}
+
+	log.Println("I! Elasticsearch version: " + esVersion)
+
+	a.Client = client
+
+	if a.ManageTemplate {
+		err := a.manageTemplate(ctx)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *Elasticsearch) Write(metrics []telegraf.Metric) error {
+	if len(metrics) == 0 {
+		return nil
+	}
+
+	bulkRequest := a.Client.Bulk()
+
+	for _, metric := range metrics {
+		var name = metric.Name()
+
+		// index name has to be re-evaluated each time for telegraf
+		// to send the metric to the correct time-based index
+		indexName := a.GetIndexName(a.IndexName, metric.Time())
+
+		m := make(map[string]interface{})
+
+		m["@timestamp"] = metric.Time()
+		m["measurement_name"] = name
+		m["tag"] = metric.Tags()
+		m[name] = metric.Fields()
+
+		bulkRequest.Add(elastic.NewBulkIndexRequest().
+			Index(indexName).
+			Type("metrics").
+			Doc(m))
+
+	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), a.Timeout.Duration)
+	defer cancel()
+
+	res, err := bulkRequest.Do(ctx)
+
+	if err != nil {
+		return fmt.Errorf("Error sending bulk request to Elasticsearch: %s", err)
+	}
+
+	if res.Errors {
+		for id, err := range res.Failed() {
+			log.Printf("E! Elasticsearch indexing failure, id: %d, error: %s, caused by: %s, %s", id, err.Error.Reason, err.Error.CausedBy["reason"], err.Error.CausedBy["type"])
+		}
+		return fmt.Errorf("W! Elasticsearch failed to index %d metrics", len(res.Failed()))
+	}
+
+	return nil
+
+}
+
+func (a *Elasticsearch) manageTemplate(ctx context.Context) error {
+	if a.TemplateName == "" {
+		return fmt.Errorf("Elasticsearch template_name configuration not defined")
+	}
+
+	templateExists, errExists := a.Client.IndexTemplateExists(a.TemplateName).Do(ctx)
+
+	if errExists != nil {
+		return fmt.Errorf("Elasticsearch template check failed, template name: %s, error: %s", a.TemplateName, errExists)
+	}
+
+	templatePattern := a.IndexName + "*"
+
+	if strings.Contains(a.IndexName, "%") {
+		templatePattern = a.IndexName[0:strings.Index(a.IndexName, "%")] + "*"
+	}
+
+	if (a.OverwriteTemplate) || (!templateExists) {
+		// Create or update the template
+		tmpl := fmt.Sprintf(`
+			{
+				"template":"%s",
+				"settings": {
+					"index": {
+						"refresh_interval": "10s",
+						"mapping.total_fields.limit": 5000
+					}
+				},
+				"mappings" : {
+					"_default_" : {
+						"_all": { "enabled": false	  },
+						"properties" : {
+							"@timestamp" : { "type" : "date" },
+							"measurement_name" : { "type" : "keyword" }
+						},
+						"dynamic_templates": [
+							{
+								"tags": {
+									"match_mapping_type": "string",
+									"path_match": "tag.*",
+									"mapping": {
+										"ignore_above": 512,
+										"type": "keyword"
+									}
+								}
+							},
+							{
+								"metrics_long": {
+									"match_mapping_type": "long",
+									"mapping": {
+										"type": "float",
+										"index": false
+									}
+								}
+							},
+							{
+								"metrics_double": {
+									"match_mapping_type": "double",
+									"mapping": {
+										"type": "float",
+										"index": false
+									}
+								}
+							},
+							{
+								"text_fields": {
+									"match": "*",
+									"mapping": {
+										"norms": false
+									}
+								}
+							}
+						]
+					}
+				}
+			}`, templatePattern)
+		_, errCreateTemplate := a.Client.IndexPutTemplate(a.TemplateName).BodyString(tmpl).Do(ctx)
+
+		if errCreateTemplate != nil {
+			return fmt.Errorf("Elasticsearch failed to create index template %s : %s", a.TemplateName, errCreateTemplate)
+		}
+
+		log.Printf("D! Elasticsearch template %s created or updated\n", a.TemplateName)
+
+	} else {
+
+		log.Println("D! Found existing Elasticsearch template. Skipping template management")
+
+	}
+	return nil
+}
+
+func (a *Elasticsearch) GetIndexName(indexName string, eventTime time.Time) string {
+	if strings.Contains(indexName, "%") {
+		var dateReplacer = strings.NewReplacer(
+			"%Y", eventTime.UTC().Format("2006"),
+			"%y", eventTime.UTC().Format("06"),
+			"%m", eventTime.UTC().Format("01"),
+			"%d", eventTime.UTC().Format("02"),
+			"%H", eventTime.UTC().Format("15"),
+		)
+
+		indexName = dateReplacer.Replace(indexName)
+	}
+
+	return indexName
+
+}
+
+func (a *Elasticsearch) SampleConfig() string {
+	return sampleConfig
+}
+
+func (a *Elasticsearch) Description() string {
+	return "Configuration for Elasticsearch to send metrics to."
+}
+
+func (a *Elasticsearch) Close() error {
+	a.Client = nil
+	return nil
+}
+
+func init() {
+	outputs.Add("elasticsearch", func() telegraf.Output {
+		return &Elasticsearch{
+			Timeout:             internal.Duration{Duration: time.Second * 5},
+			HealthCheckInterval: internal.Duration{Duration: time.Second * 10},
+		}
+	})
+}
diff --git a/plugins/outputs/elasticsearch/elasticsearch_test.go b/plugins/outputs/elasticsearch/elasticsearch_test.go
new file mode 100644
index 000000000..9163a2bbe
--- /dev/null
+++ b/plugins/outputs/elasticsearch/elasticsearch_test.go
@@ -0,0 +1,126 @@
+package elasticsearch
+
+import (
+	"context"
+	"testing"
+	"time"
+
+	"github.com/influxdata/telegraf/internal"
+	"github.com/influxdata/telegraf/testutil"
+	"github.com/stretchr/testify/require"
+)
+
+func TestConnectAndWrite(t *testing.T) {
+	if testing.Short() {
+		t.Skip("Skipping integration test in short mode")
+	}
+
+	urls := []string{"http://" + testutil.GetLocalHost() + ":9200"}
+
+	e := &Elasticsearch{
+		URLs:                urls,
+		IndexName:           "test-%Y.%m.%d",
+		Timeout:             internal.Duration{Duration: time.Second * 5},
+		ManageTemplate:      true,
+		TemplateName:        "telegraf",
+		OverwriteTemplate:   false,
+		HealthCheckInterval: internal.Duration{Duration: time.Second * 10},
+	}
+
+	// Verify that we can connect to Elasticsearch
+	err := e.Connect()
+	require.NoError(t, err)
+
+	// Verify that we can successfully write data to Elasticsearch
+	err = e.Write(testutil.MockMetrics())
+	require.NoError(t, err)
+
+}
+
+func TestTemplateManagementEmptyTemplate(t *testing.T) {
+	urls := []string{"http://" + testutil.GetLocalHost() + ":9200"}
+
+	ctx := context.Background()
+
+	e := &Elasticsearch{
+		URLs:              urls,
+		IndexName:         "test-%Y.%m.%d",
+		Timeout:           internal.Duration{Duration: time.Second * 5},
+		ManageTemplate:    true,
+		TemplateName:      "",
+		OverwriteTemplate: true,
+	}
+
+	err := e.manageTemplate(ctx)
+	require.Error(t, err)
+
+}
+
+func TestTemplateManagement(t *testing.T) {
+	urls := []string{"http://" + testutil.GetLocalHost() + ":9200"}
+
+	e := &Elasticsearch{
+		URLs:              urls,
+		IndexName:         "test-%Y.%m.%d",
+		Timeout:           internal.Duration{Duration: time.Second * 5},
+		ManageTemplate:    true,
+		TemplateName:      "telegraf",
+		OverwriteTemplate: true,
+	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), e.Timeout.Duration)
+	defer cancel()
+
+	err := e.Connect()
+	require.NoError(t, err)
+
+	err = e.manageTemplate(ctx)
+	require.NoError(t, err)
+}
+
+func TestGetIndexName(t *testing.T) {
+	e := &Elasticsearch{}
+
+	var tests = []struct {
+		EventTime time.Time
+		IndexName string
+		Expected  string
+	}{
+		{
+			time.Date(2014, 12, 01, 23, 30, 00, 00, time.UTC),
+			"indexname",
+			"indexname",
+		},
+		{
+			time.Date(2014, 12, 01, 23, 30, 00, 00, time.UTC),
+			"indexname-%Y",
+			"indexname-2014",
+		},
+		{
+			time.Date(2014, 12, 01, 23, 30, 00, 00, time.UTC),
+			"indexname-%Y-%m",
+			"indexname-2014-12",
+		},
+		{
+			time.Date(2014, 12, 01, 23, 30, 00, 00, time.UTC),
+			"indexname-%Y-%m-%d",
+			"indexname-2014-12-01",
+		},
+		{
+			time.Date(2014, 12, 01, 23, 30, 00, 00, time.UTC),
+			"indexname-%Y-%m-%d-%H",
+			"indexname-2014-12-01-23",
+		},
+		{
+			time.Date(2014, 12, 01, 23, 30, 00, 00, time.UTC),
+			"indexname-%y-%m",
+			"indexname-14-12",
+		},
+	}
+	for _, test := range tests {
+		indexName := e.GetIndexName(test.IndexName, test.EventTime)
+		if indexName != test.Expected {
+			t.Errorf("Expected indexname %s, got %s\n", indexName, test.Expected)
+		}
+	}
+}

From 5c33c760c7acd2bae24894250606a87fc78c8425 Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Tue, 21 Mar 2017 10:59:41 -0700
Subject: [PATCH 0159/1302] Fix procstat initialization

---
 plugins/inputs/procstat/procstat.go | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go
index 46b88fbcf..20c5af9d2 100644
--- a/plugins/inputs/procstat/procstat.go
+++ b/plugins/inputs/procstat/procstat.go
@@ -235,6 +235,9 @@ func (p *Procstat) findPids() ([]PID, map[string]string, error) {
 
 func init() {
 	inputs.Add("procstat", func() telegraf.Input {
-		return &Procstat{}
+		return &Procstat{
+			createPIDFinder: defaultPIDFinder,
+			createProcess:   defaultProcess,
+		}
 	})
 }

From 70a0a848821b9e602f8cb8a3e3dce7e5db8a1f31 Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Tue, 21 Mar 2017 11:40:51 -0700
Subject: [PATCH 0160/1302] Really fix procstat initialization

---
 plugins/inputs/procstat/procstat.go | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go
index 20c5af9d2..d689ecf3e 100644
--- a/plugins/inputs/procstat/procstat.go
+++ b/plugins/inputs/procstat/procstat.go
@@ -62,6 +62,13 @@ func (_ *Procstat) Description() string {
 }
 
 func (p *Procstat) Gather(acc telegraf.Accumulator) error {
+	if p.createPIDFinder == nil {
+		p.createPIDFinder = defaultPIDFinder
+	}
+	if p.createProcess == nil {
+		p.createProcess = defaultProcess
+	}
+
 	procs, err := p.updateProcesses(p.procs)
 	if err != nil {
 		return fmt.Errorf(
@@ -235,9 +242,6 @@ func (p *Procstat) findPids() ([]PID, map[string]string, error) {
 
 func init() {
 	inputs.Add("procstat", func() telegraf.Input {
-		return &Procstat{
-			createPIDFinder: defaultPIDFinder,
-			createProcess:   defaultProcess,
-		}
+		return &Procstat{}
 	})
 }

From 616b66f5cb12ae9b95cd6e101e709a4681a56bb7 Mon Sep 17 00:00:00 2001
From: Oskar 
Date: Wed, 22 Mar 2017 20:04:58 +0100
Subject: [PATCH 0161/1302] Multi instances in win_perf_counters (#2352)

---
 CHANGELOG.md                                          | 2 ++
 plugins/inputs/win_perf_counters/win_perf_counters.go | 5 +++++
 2 files changed, 7 insertions(+)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5ef7f580b..fd1ec5136 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -82,6 +82,8 @@ be deprecated eventually.
 - [#2520](https://github.com/influxdata/telegraf/pull/2520): Update default value for Cloudwatch rate limit
 - [#2513](https://github.com/influxdata/telegraf/issues/2513): create /etc/telegraf/telegraf.d directory in tarball.
 - [#2541](https://github.com/influxdata/telegraf/issues/2541): Return error on unsupported serializer data format.
+- [#1827](https://github.com/influxdata/telegraf/issues/1827): Fix Windows Performance Counters multi instance identifier
+
 
 ## v1.2.1 [2017-02-01]
 
diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go
index da59c3040..5365dc68b 100644
--- a/plugins/inputs/win_perf_counters/win_perf_counters.go
+++ b/plugins/inputs/win_perf_counters/win_perf_counters.go
@@ -265,6 +265,11 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error {
 					} else if metric.instance == s {
 						// Catch if we set it to total or some form of it
 						add = true
+					} else if strings.Contains(metric.instance, "#") && strings.HasPrefix(metric.instance, s) {
+						// If you are using a multiple instance identifier such as "w3wp#1"
+						// phd.dll returns only the first 2 characters of the identifier.
+						add = true
+						s = metric.instance
 					} else if metric.instance == "------" {
 						add = true
 					}

From 1402c158b74789132af2b885315d619648003f83 Mon Sep 17 00:00:00 2001
From: Patrick Hemmer 
Date: Fri, 24 Mar 2017 15:03:36 -0400
Subject: [PATCH 0162/1302] remove sleep from tests (#2555)

---
 plugins/inputs/cloudwatch/cloudwatch_test.go  |  5 +-
 .../http_listener/http_listener_test.go       | 47 +++----------
 .../http_response/http_response_test.go       |  2 +-
 .../inputs/kafka_consumer/kafka_consumer.go   |  9 +--
 .../kafka_consumer/kafka_consumer_test.go     | 11 ++-
 plugins/inputs/logparser/logparser_test.go    | 10 ++-
 plugins/inputs/mongodb/mongodb_server_test.go |  4 +-
 plugins/inputs/mqtt_consumer/mqtt_consumer.go | 10 +--
 .../mqtt_consumer/mqtt_consumer_test.go       | 15 ++--
 plugins/inputs/nats_consumer/nats_consumer.go |  8 +--
 .../nats_consumer/nats_consumer_test.go       | 20 +++---
 .../socket_listener/socket_listener_test.go   | 49 +++++--------
 plugins/inputs/tail/tail.go                   | 23 +++---
 plugins/inputs/tail/tail_test.go              | 19 +++--
 .../inputs/tcp_listener/tcp_listener_test.go  | 63 +++++++++--------
 plugins/inputs/udp_listener/udp_listener.go   | 25 ++++---
 .../inputs/udp_listener/udp_listener_test.go  | 57 ++++++++-------
 plugins/outputs/graphite/graphite_test.go     | 48 ++++++-------
 plugins/outputs/influxdb/client/udp_test.go   |  1 -
 .../outputs/instrumental/instrumental_test.go | 70 +++++++++----------
 testutil/accumulator.go                       | 26 +++++--
 21 files changed, 252 insertions(+), 270 deletions(-)

diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go
index f2d58a00c..3aaab7d45 100644
--- a/plugins/inputs/cloudwatch/cloudwatch_test.go
+++ b/plugins/inputs/cloudwatch/cloudwatch_test.go
@@ -207,14 +207,13 @@ func TestGenerateStatisticsInputParams(t *testing.T) {
 }
 
 func TestMetricsCacheTimeout(t *testing.T) {
-	ttl, _ := time.ParseDuration("5ms")
 	cache := &MetricCache{
 		Metrics: []*cloudwatch.Metric{},
 		Fetched: time.Now(),
-		TTL:     ttl,
+		TTL:     time.Minute,
 	}
 
 	assert.True(t, cache.IsValid())
-	time.Sleep(ttl)
+	cache.Fetched = time.Now().Add(-time.Minute)
 	assert.False(t, cache.IsValid())
 }
diff --git a/plugins/inputs/http_listener/http_listener_test.go b/plugins/inputs/http_listener/http_listener_test.go
index b5f858fde..7e6fbc8ab 100644
--- a/plugins/inputs/http_listener/http_listener_test.go
+++ b/plugins/inputs/http_listener/http_listener_test.go
@@ -6,7 +6,6 @@ import (
 	"net/http"
 	"sync"
 	"testing"
-	"time"
 
 	"github.com/influxdata/telegraf/testutil"
 
@@ -43,14 +42,12 @@ func TestWriteHTTP(t *testing.T) {
 	require.NoError(t, listener.Start(acc))
 	defer listener.Stop()
 
-	time.Sleep(time.Millisecond * 25)
-
 	// post single message to listener
 	resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(testMsg)))
 	require.NoError(t, err)
 	require.EqualValues(t, 204, resp.StatusCode)
 
-	time.Sleep(time.Millisecond * 15)
+	acc.Wait(1)
 	acc.AssertContainsTaggedFields(t, "cpu_load_short",
 		map[string]interface{}{"value": float64(12)},
 		map[string]string{"host": "server01"},
@@ -61,7 +58,7 @@ func TestWriteHTTP(t *testing.T) {
 	require.NoError(t, err)
 	require.EqualValues(t, 204, resp.StatusCode)
 
-	time.Sleep(time.Millisecond * 15)
+	acc.Wait(2)
 	hostTags := []string{"server02", "server03",
 		"server04", "server05", "server06"}
 	for _, hostTag := range hostTags {
@@ -76,7 +73,7 @@ func TestWriteHTTP(t *testing.T) {
 	require.NoError(t, err)
 	require.EqualValues(t, 400, resp.StatusCode)
 
-	time.Sleep(time.Millisecond * 15)
+	acc.Wait(3)
 	acc.AssertContainsTaggedFields(t, "cpu_load_short",
 		map[string]interface{}{"value": float64(12)},
 		map[string]string{"host": "server01"},
@@ -91,14 +88,12 @@ func TestWriteHTTPNoNewline(t *testing.T) {
 	require.NoError(t, listener.Start(acc))
 	defer listener.Stop()
 
-	time.Sleep(time.Millisecond * 25)
-
 	// post single message to listener
 	resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(testMsgNoNewline)))
 	require.NoError(t, err)
 	require.EqualValues(t, 204, resp.StatusCode)
 
-	time.Sleep(time.Millisecond * 15)
+	acc.Wait(1)
 	acc.AssertContainsTaggedFields(t, "cpu_load_short",
 		map[string]interface{}{"value": float64(12)},
 		map[string]string{"host": "server01"},
@@ -115,8 +110,6 @@ func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) {
 	require.NoError(t, listener.Start(acc))
 	defer listener.Stop()
 
-	time.Sleep(time.Millisecond * 25)
-
 	// Post a gigantic metric to the listener and verify that it writes OK this time:
 	resp, err := http.Post("http://localhost:8296/write?db=mydb", "", bytes.NewBuffer([]byte(hugeMetric)))
 	require.NoError(t, err)
@@ -133,8 +126,6 @@ func TestWriteHTTPVerySmallMaxBody(t *testing.T) {
 	require.NoError(t, listener.Start(acc))
 	defer listener.Stop()
 
-	time.Sleep(time.Millisecond * 25)
-
 	resp, err := http.Post("http://localhost:8297/write", "", bytes.NewBuffer([]byte(hugeMetric)))
 	require.NoError(t, err)
 	require.EqualValues(t, 413, resp.StatusCode)
@@ -150,15 +141,13 @@ func TestWriteHTTPVerySmallMaxLineSize(t *testing.T) {
 	require.NoError(t, listener.Start(acc))
 	defer listener.Stop()
 
-	time.Sleep(time.Millisecond * 25)
-
 	resp, err := http.Post("http://localhost:8298/write", "", bytes.NewBuffer([]byte(testMsgs)))
 	require.NoError(t, err)
 	require.EqualValues(t, 204, resp.StatusCode)
 
-	time.Sleep(time.Millisecond * 15)
 	hostTags := []string{"server02", "server03",
 		"server04", "server05", "server06"}
+	acc.Wait(len(hostTags))
 	for _, hostTag := range hostTags {
 		acc.AssertContainsTaggedFields(t, "cpu_load_short",
 			map[string]interface{}{"value": float64(12)},
@@ -177,15 +166,13 @@ func TestWriteHTTPLargeLinesSkipped(t *testing.T) {
 	require.NoError(t, listener.Start(acc))
 	defer listener.Stop()
 
-	time.Sleep(time.Millisecond * 25)
-
 	resp, err := http.Post("http://localhost:8300/write", "", bytes.NewBuffer([]byte(hugeMetric+testMsgs)))
 	require.NoError(t, err)
 	require.EqualValues(t, 400, resp.StatusCode)
 
-	time.Sleep(time.Millisecond * 15)
 	hostTags := []string{"server02", "server03",
 		"server04", "server05", "server06"}
+	acc.Wait(len(hostTags))
 	for _, hostTag := range hostTags {
 		acc.AssertContainsTaggedFields(t, "cpu_load_short",
 			map[string]interface{}{"value": float64(12)},
@@ -204,8 +191,6 @@ func TestWriteHTTPGzippedData(t *testing.T) {
 	require.NoError(t, listener.Start(acc))
 	defer listener.Stop()
 
-	time.Sleep(time.Millisecond * 25)
-
 	data, err := ioutil.ReadFile("./testdata/testmsgs.gz")
 	require.NoError(t, err)
 
@@ -218,9 +203,9 @@ func TestWriteHTTPGzippedData(t *testing.T) {
 	require.NoError(t, err)
 	require.EqualValues(t, 204, resp.StatusCode)
 
-	time.Sleep(time.Millisecond * 50)
 	hostTags := []string{"server02", "server03",
 		"server04", "server05", "server06"}
+	acc.Wait(len(hostTags))
 	for _, hostTag := range hostTags {
 		acc.AssertContainsTaggedFields(t, "cpu_load_short",
 			map[string]interface{}{"value": float64(12)},
@@ -237,8 +222,6 @@ func TestWriteHTTPHighTraffic(t *testing.T) {
 	require.NoError(t, listener.Start(acc))
 	defer listener.Stop()
 
-	time.Sleep(time.Millisecond * 25)
-
 	// post many messages to listener
 	var wg sync.WaitGroup
 	for i := 0; i < 10; i++ {
@@ -254,9 +237,9 @@ func TestWriteHTTPHighTraffic(t *testing.T) {
 	}
 
 	wg.Wait()
-	time.Sleep(time.Millisecond * 250)
 	listener.Gather(acc)
 
+	acc.Wait(25000)
 	require.Equal(t, int64(25000), int64(acc.NMetrics()))
 }
 
@@ -267,8 +250,6 @@ func TestReceive404ForInvalidEndpoint(t *testing.T) {
 	require.NoError(t, listener.Start(acc))
 	defer listener.Stop()
 
-	time.Sleep(time.Millisecond * 25)
-
 	// post single message to listener
 	resp, err := http.Post("http://localhost:8186/foobar", "", bytes.NewBuffer([]byte(testMsg)))
 	require.NoError(t, err)
@@ -276,16 +257,12 @@ func TestReceive404ForInvalidEndpoint(t *testing.T) {
 }
 
 func TestWriteHTTPInvalid(t *testing.T) {
-	time.Sleep(time.Millisecond * 250)
-
 	listener := newTestHTTPListener()
 
 	acc := &testutil.Accumulator{}
 	require.NoError(t, listener.Start(acc))
 	defer listener.Stop()
 
-	time.Sleep(time.Millisecond * 25)
-
 	// post single message to listener
 	resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(badMsg)))
 	require.NoError(t, err)
@@ -293,16 +270,12 @@ func TestWriteHTTPInvalid(t *testing.T) {
 }
 
 func TestWriteHTTPEmpty(t *testing.T) {
-	time.Sleep(time.Millisecond * 250)
-
 	listener := newTestHTTPListener()
 
 	acc := &testutil.Accumulator{}
 	require.NoError(t, listener.Start(acc))
 	defer listener.Stop()
 
-	time.Sleep(time.Millisecond * 25)
-
 	// post single message to listener
 	resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(emptyMsg)))
 	require.NoError(t, err)
@@ -310,16 +283,12 @@ func TestWriteHTTPEmpty(t *testing.T) {
 }
 
 func TestQueryAndPingHTTP(t *testing.T) {
-	time.Sleep(time.Millisecond * 250)
-
 	listener := newTestHTTPListener()
 
 	acc := &testutil.Accumulator{}
 	require.NoError(t, listener.Start(acc))
 	defer listener.Stop()
 
-	time.Sleep(time.Millisecond * 25)
-
 	// post query to listener
 	resp, err := http.Post("http://localhost:8186/query?db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22", "", nil)
 	require.NoError(t, err)
diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go
index 236e5d88b..b65b1f954 100644
--- a/plugins/inputs/http_response/http_response_test.go
+++ b/plugins/inputs/http_response/http_response_test.go
@@ -329,7 +329,7 @@ func TestTimeout(t *testing.T) {
 		Address:         ts.URL + "/twosecondnap",
 		Body:            "{ 'test': 'data'}",
 		Method:          "GET",
-		ResponseTimeout: internal.Duration{Duration: time.Second * 1},
+		ResponseTimeout: internal.Duration{Duration: time.Millisecond},
 		Headers: map[string]string{
 			"Content-Type": "application/json",
 		},
diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go
index f4176edd3..6f1f4020b 100644
--- a/plugins/inputs/kafka_consumer/kafka_consumer.go
+++ b/plugins/inputs/kafka_consumer/kafka_consumer.go
@@ -1,6 +1,7 @@
 package kafka_consumer
 
 import (
+	"fmt"
 	"log"
 	"strings"
 	"sync"
@@ -129,13 +130,13 @@ func (k *Kafka) receiver() {
 			return
 		case err := <-k.errs:
 			if err != nil {
-				log.Printf("E! Kafka Consumer Error: %s\n", err)
+				k.acc.AddError(fmt.Errorf("Kafka Consumer Error: %s\n", err))
 			}
 		case msg := <-k.in:
 			metrics, err := k.parser.Parse(msg.Value)
 			if err != nil {
-				log.Printf("E! Kafka Message Parse Error\nmessage: %s\nerror: %s",
-					string(msg.Value), err.Error())
+				k.acc.AddError(fmt.Errorf("E! Kafka Message Parse Error\nmessage: %s\nerror: %s",
+					string(msg.Value), err.Error()))
 			}
 
 			for _, metric := range metrics {
@@ -158,7 +159,7 @@ func (k *Kafka) Stop() {
 	defer k.Unlock()
 	close(k.done)
 	if err := k.Consumer.Close(); err != nil {
-		log.Printf("E! Error closing kafka consumer: %s\n", err.Error())
+		k.acc.AddError(fmt.Errorf("E! Error closing kafka consumer: %s\n", err.Error()))
 	}
 }
 
diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go
index c4936974f..e1c24adbe 100644
--- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go
+++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go
@@ -2,7 +2,6 @@ package kafka_consumer
 
 import (
 	"testing"
-	"time"
 
 	"github.com/influxdata/telegraf/plugins/parsers"
 	"github.com/influxdata/telegraf/testutil"
@@ -43,7 +42,7 @@ func TestRunParser(t *testing.T) {
 	k.parser, _ = parsers.NewInfluxParser()
 	go k.receiver()
 	in <- saramaMsg(testMsg)
-	time.Sleep(time.Millisecond * 5)
+	acc.Wait(1)
 
 	assert.Equal(t, acc.NFields(), 1)
 }
@@ -58,7 +57,7 @@ func TestRunParserInvalidMsg(t *testing.T) {
 	k.parser, _ = parsers.NewInfluxParser()
 	go k.receiver()
 	in <- saramaMsg(invalidMsg)
-	time.Sleep(time.Millisecond * 5)
+	acc.WaitError(1)
 
 	assert.Equal(t, acc.NFields(), 0)
 }
@@ -73,7 +72,7 @@ func TestRunParserAndGather(t *testing.T) {
 	k.parser, _ = parsers.NewInfluxParser()
 	go k.receiver()
 	in <- saramaMsg(testMsg)
-	time.Sleep(time.Millisecond * 5)
+	acc.Wait(1)
 
 	k.Gather(&acc)
 
@@ -92,7 +91,7 @@ func TestRunParserAndGatherGraphite(t *testing.T) {
 	k.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil)
 	go k.receiver()
 	in <- saramaMsg(testMsgGraphite)
-	time.Sleep(time.Millisecond * 5)
+	acc.Wait(1)
 
 	k.Gather(&acc)
 
@@ -111,7 +110,7 @@ func TestRunParserAndGatherJSON(t *testing.T) {
 	k.parser, _ = parsers.NewJSONParser("kafka_json_test", []string{}, nil)
 	go k.receiver()
 	in <- saramaMsg(testMsgJSON)
-	time.Sleep(time.Millisecond * 5)
+	acc.Wait(1)
 
 	k.Gather(&acc)
 
diff --git a/plugins/inputs/logparser/logparser_test.go b/plugins/inputs/logparser/logparser_test.go
index 059bfd266..db9795f28 100644
--- a/plugins/inputs/logparser/logparser_test.go
+++ b/plugins/inputs/logparser/logparser_test.go
@@ -6,7 +6,6 @@ import (
 	"runtime"
 	"strings"
 	"testing"
-	"time"
 
 	"github.com/influxdata/telegraf/testutil"
 
@@ -41,7 +40,6 @@ func TestGrokParseLogFilesNonExistPattern(t *testing.T) {
 	acc := testutil.Accumulator{}
 	assert.Error(t, logparser.Start(&acc))
 
-	time.Sleep(time.Millisecond * 500)
 	logparser.Stop()
 }
 
@@ -61,7 +59,8 @@ func TestGrokParseLogFiles(t *testing.T) {
 	acc := testutil.Accumulator{}
 	assert.NoError(t, logparser.Start(&acc))
 
-	time.Sleep(time.Millisecond * 500)
+	acc.Wait(2)
+
 	logparser.Stop()
 
 	acc.AssertContainsTaggedFields(t, "logparser_grok",
@@ -102,14 +101,13 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) {
 	acc := testutil.Accumulator{}
 	assert.NoError(t, logparser.Start(&acc))
 
-	time.Sleep(time.Millisecond * 500)
 	assert.Equal(t, acc.NFields(), 0)
 
 	os.Symlink(
 		thisdir+"grok/testdata/test_a.log",
 		emptydir+"/test_a.log")
 	assert.NoError(t, logparser.Gather(&acc))
-	time.Sleep(time.Millisecond * 500)
+	acc.Wait(1)
 
 	logparser.Stop()
 
@@ -143,7 +141,7 @@ func TestGrokParseLogFilesOneBad(t *testing.T) {
 	acc.SetDebug(true)
 	assert.NoError(t, logparser.Start(&acc))
 
-	time.Sleep(time.Millisecond * 500)
+	acc.Wait(1)
 	logparser.Stop()
 
 	acc.AssertContainsTaggedFields(t, "logparser_grok",
diff --git a/plugins/inputs/mongodb/mongodb_server_test.go b/plugins/inputs/mongodb/mongodb_server_test.go
index 7ad0f38a2..e9d1bae9e 100644
--- a/plugins/inputs/mongodb/mongodb_server_test.go
+++ b/plugins/inputs/mongodb/mongodb_server_test.go
@@ -4,7 +4,6 @@ package mongodb
 
 import (
 	"testing"
-	"time"
 
 	"github.com/influxdata/telegraf/testutil"
 	"github.com/stretchr/testify/assert"
@@ -32,12 +31,11 @@ func TestAddDefaultStats(t *testing.T) {
 	err := server.gatherData(&acc, false)
 	require.NoError(t, err)
 
-	time.Sleep(time.Duration(1) * time.Second)
 	// need to call this twice so it can perform the diff
 	err = server.gatherData(&acc, false)
 	require.NoError(t, err)
 
 	for key, _ := range DefaultStats {
-		assert.True(t, acc.HasIntValue(key))
+		assert.True(t, acc.HasIntField("mongodb", key))
 	}
 }
diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go
index cfade2944..3ea0480b8 100644
--- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go
+++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go
@@ -142,8 +142,8 @@ func (m *MQTTConsumer) onConnect(c mqtt.Client) {
 		subscribeToken := c.SubscribeMultiple(topics, m.recvMessage)
 		subscribeToken.Wait()
 		if subscribeToken.Error() != nil {
-			log.Printf("E! MQTT Subscribe Error\ntopics: %s\nerror: %s",
-				strings.Join(m.Topics[:], ","), subscribeToken.Error())
+			m.acc.AddError(fmt.Errorf("E! MQTT Subscribe Error\ntopics: %s\nerror: %s",
+				strings.Join(m.Topics[:], ","), subscribeToken.Error()))
 		}
 		m.started = true
 	}
@@ -151,7 +151,7 @@ func (m *MQTTConsumer) onConnect(c mqtt.Client) {
 }
 
 func (m *MQTTConsumer) onConnectionLost(c mqtt.Client, err error) {
-	log.Printf("E! MQTT Connection lost\nerror: %s\nMQTT Client will try to reconnect", err.Error())
+	m.acc.AddError(fmt.Errorf("E! MQTT Connection lost\nerror: %s\nMQTT Client will try to reconnect", err.Error()))
 	return
 }
 
@@ -166,8 +166,8 @@ func (m *MQTTConsumer) receiver() {
 			topic := msg.Topic()
 			metrics, err := m.parser.Parse(msg.Payload())
 			if err != nil {
-				log.Printf("E! MQTT Parse Error\nmessage: %s\nerror: %s",
-					string(msg.Payload()), err.Error())
+				m.acc.AddError(fmt.Errorf("E! MQTT Parse Error\nmessage: %s\nerror: %s",
+					string(msg.Payload()), err.Error()))
 			}
 
 			for _, metric := range metrics {
diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go
index 2f5276191..027e4818b 100644
--- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go
+++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go
@@ -2,7 +2,6 @@ package mqtt_consumer
 
 import (
 	"testing"
-	"time"
 
 	"github.com/influxdata/telegraf/plugins/parsers"
 	"github.com/influxdata/telegraf/testutil"
@@ -86,7 +85,7 @@ func TestRunParser(t *testing.T) {
 	n.parser, _ = parsers.NewInfluxParser()
 	go n.receiver()
 	in <- mqttMsg(testMsgNeg)
-	time.Sleep(time.Millisecond * 250)
+	acc.Wait(1)
 
 	if a := acc.NFields(); a != 1 {
 		t.Errorf("got %v, expected %v", a, 1)
@@ -102,7 +101,7 @@ func TestRunParserNegativeNumber(t *testing.T) {
 	n.parser, _ = parsers.NewInfluxParser()
 	go n.receiver()
 	in <- mqttMsg(testMsg)
-	time.Sleep(time.Millisecond * 25)
+	acc.Wait(1)
 
 	if a := acc.NFields(); a != 1 {
 		t.Errorf("got %v, expected %v", a, 1)
@@ -119,11 +118,12 @@ func TestRunParserInvalidMsg(t *testing.T) {
 	n.parser, _ = parsers.NewInfluxParser()
 	go n.receiver()
 	in <- mqttMsg(invalidMsg)
-	time.Sleep(time.Millisecond * 25)
+	acc.WaitError(1)
 
 	if a := acc.NFields(); a != 0 {
 		t.Errorf("got %v, expected %v", a, 0)
 	}
+	assert.Contains(t, acc.Errors[0].Error(), "MQTT Parse Error")
 }
 
 // Test that the parser parses line format messages into metrics
@@ -136,7 +136,7 @@ func TestRunParserAndGather(t *testing.T) {
 	n.parser, _ = parsers.NewInfluxParser()
 	go n.receiver()
 	in <- mqttMsg(testMsg)
-	time.Sleep(time.Millisecond * 25)
+	acc.Wait(1)
 
 	n.Gather(&acc)
 
@@ -154,9 +154,9 @@ func TestRunParserAndGatherGraphite(t *testing.T) {
 	n.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil)
 	go n.receiver()
 	in <- mqttMsg(testMsgGraphite)
-	time.Sleep(time.Millisecond * 25)
 
 	n.Gather(&acc)
+	acc.Wait(1)
 
 	acc.AssertContainsFields(t, "cpu_load_short_graphite",
 		map[string]interface{}{"value": float64(23422)})
@@ -172,10 +172,11 @@ func TestRunParserAndGatherJSON(t *testing.T) {
 	n.parser, _ = parsers.NewJSONParser("nats_json_test", []string{}, nil)
 	go n.receiver()
 	in <- mqttMsg(testMsgJSON)
-	time.Sleep(time.Millisecond * 25)
 
 	n.Gather(&acc)
 
+	acc.Wait(1)
+
 	acc.AssertContainsFields(t, "nats_json_test",
 		map[string]interface{}{
 			"a":   float64(5),
diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go
index cbb85e016..7c9f53941 100644
--- a/plugins/inputs/nats_consumer/nats_consumer.go
+++ b/plugins/inputs/nats_consumer/nats_consumer.go
@@ -162,11 +162,11 @@ func (n *natsConsumer) receiver() {
 		case <-n.done:
 			return
 		case err := <-n.errs:
-			log.Printf("E! error reading from %s\n", err.Error())
+			n.acc.AddError(fmt.Errorf("E! error reading from %s\n", err.Error()))
 		case msg := <-n.in:
 			metrics, err := n.parser.Parse(msg.Data)
 			if err != nil {
-				log.Printf("E! subject: %s, error: %s", msg.Subject, err.Error())
+				n.acc.AddError(fmt.Errorf("E! subject: %s, error: %s", msg.Subject, err.Error()))
 			}
 
 			for _, metric := range metrics {
@@ -179,8 +179,8 @@ func (n *natsConsumer) receiver() {
 func (n *natsConsumer) clean() {
 	for _, sub := range n.Subs {
 		if err := sub.Unsubscribe(); err != nil {
-			log.Printf("E! Error unsubscribing from subject %s in queue %s: %s\n",
-				sub.Subject, sub.Queue, err.Error())
+			n.acc.AddError(fmt.Errorf("E! Error unsubscribing from subject %s in queue %s: %s\n",
+				sub.Subject, sub.Queue, err.Error()))
 		}
 	}
 
diff --git a/plugins/inputs/nats_consumer/nats_consumer_test.go b/plugins/inputs/nats_consumer/nats_consumer_test.go
index 2f4d14d73..30ba0d2af 100644
--- a/plugins/inputs/nats_consumer/nats_consumer_test.go
+++ b/plugins/inputs/nats_consumer/nats_consumer_test.go
@@ -2,11 +2,11 @@ package natsconsumer
 
 import (
 	"testing"
-	"time"
 
 	"github.com/influxdata/telegraf/plugins/parsers"
 	"github.com/influxdata/telegraf/testutil"
 	"github.com/nats-io/nats"
+	"github.com/stretchr/testify/assert"
 )
 
 const (
@@ -42,11 +42,8 @@ func TestRunParser(t *testing.T) {
 	n.wg.Add(1)
 	go n.receiver()
 	in <- natsMsg(testMsg)
-	time.Sleep(time.Millisecond * 25)
 
-	if acc.NFields() != 1 {
-		t.Errorf("got %v, expected %v", acc.NFields(), 1)
-	}
+	acc.Wait(1)
 }
 
 // Test that the parser ignores invalid messages
@@ -60,11 +57,10 @@ func TestRunParserInvalidMsg(t *testing.T) {
 	n.wg.Add(1)
 	go n.receiver()
 	in <- natsMsg(invalidMsg)
-	time.Sleep(time.Millisecond * 25)
 
-	if acc.NFields() != 0 {
-		t.Errorf("got %v, expected %v", acc.NFields(), 0)
-	}
+	acc.WaitError(1)
+	assert.Contains(t, acc.Errors[0].Error(), "E! subject: telegraf, error:  metric parsing error")
+	assert.EqualValues(t, 0, acc.NMetrics())
 }
 
 // Test that the parser parses line format messages into metrics
@@ -78,10 +74,10 @@ func TestRunParserAndGather(t *testing.T) {
 	n.wg.Add(1)
 	go n.receiver()
 	in <- natsMsg(testMsg)
-	time.Sleep(time.Millisecond * 25)
 
 	n.Gather(&acc)
 
+	acc.Wait(1)
 	acc.AssertContainsFields(t, "cpu_load_short",
 		map[string]interface{}{"value": float64(23422)})
 }
@@ -97,10 +93,10 @@ func TestRunParserAndGatherGraphite(t *testing.T) {
 	n.wg.Add(1)
 	go n.receiver()
 	in <- natsMsg(testMsgGraphite)
-	time.Sleep(time.Millisecond * 25)
 
 	n.Gather(&acc)
 
+	acc.Wait(1)
 	acc.AssertContainsFields(t, "cpu_load_short_graphite",
 		map[string]interface{}{"value": float64(23422)})
 }
@@ -116,10 +112,10 @@ func TestRunParserAndGatherJSON(t *testing.T) {
 	n.wg.Add(1)
 	go n.receiver()
 	in <- natsMsg(testMsgJSON)
-	time.Sleep(time.Millisecond * 25)
 
 	n.Gather(&acc)
 
+	acc.Wait(1)
 	acc.AssertContainsFields(t, "nats_json_test",
 		map[string]interface{}{
 			"a":   float64(5),
diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go
index 6764b6d2d..9fa472809 100644
--- a/plugins/inputs/socket_listener/socket_listener_test.go
+++ b/plugins/inputs/socket_listener/socket_listener_test.go
@@ -81,42 +81,25 @@ func testSocketListener(t *testing.T, sl *SocketListener, client net.Conn) {
 
 	acc := sl.Accumulator.(*testutil.Accumulator)
 
+	acc.Wait(3)
 	acc.Lock()
-	if len(acc.Metrics) < 1 {
-		acc.Wait()
-	}
-	require.True(t, len(acc.Metrics) >= 1)
-	m := acc.Metrics[0]
+	m1 := acc.Metrics[0]
+	m2 := acc.Metrics[1]
+	m3 := acc.Metrics[2]
 	acc.Unlock()
 
-	assert.Equal(t, "test", m.Measurement)
-	assert.Equal(t, map[string]string{"foo": "bar"}, m.Tags)
-	assert.Equal(t, map[string]interface{}{"v": int64(1)}, m.Fields)
-	assert.True(t, time.Unix(0, 123456789).Equal(m.Time))
+	assert.Equal(t, "test", m1.Measurement)
+	assert.Equal(t, map[string]string{"foo": "bar"}, m1.Tags)
+	assert.Equal(t, map[string]interface{}{"v": int64(1)}, m1.Fields)
+	assert.True(t, time.Unix(0, 123456789).Equal(m1.Time))
 
-	acc.Lock()
-	if len(acc.Metrics) < 2 {
-		acc.Wait()
-	}
-	require.True(t, len(acc.Metrics) >= 2)
-	m = acc.Metrics[1]
-	acc.Unlock()
+	assert.Equal(t, "test", m2.Measurement)
+	assert.Equal(t, map[string]string{"foo": "baz"}, m2.Tags)
+	assert.Equal(t, map[string]interface{}{"v": int64(2)}, m2.Fields)
+	assert.True(t, time.Unix(0, 123456790).Equal(m2.Time))
 
-	assert.Equal(t, "test", m.Measurement)
-	assert.Equal(t, map[string]string{"foo": "baz"}, m.Tags)
-	assert.Equal(t, map[string]interface{}{"v": int64(2)}, m.Fields)
-	assert.True(t, time.Unix(0, 123456790).Equal(m.Time))
-
-	acc.Lock()
-	if len(acc.Metrics) < 3 {
-		acc.Wait()
-	}
-	require.True(t, len(acc.Metrics) >= 3)
-	m = acc.Metrics[2]
-	acc.Unlock()
-
-	assert.Equal(t, "test", m.Measurement)
-	assert.Equal(t, map[string]string{"foo": "zab"}, m.Tags)
-	assert.Equal(t, map[string]interface{}{"v": int64(3)}, m.Fields)
-	assert.True(t, time.Unix(0, 123456791).Equal(m.Time))
+	assert.Equal(t, "test", m3.Measurement)
+	assert.Equal(t, map[string]string{"foo": "zab"}, m3.Tags)
+	assert.Equal(t, map[string]interface{}{"v": int64(3)}, m3.Fields)
+	assert.True(t, time.Unix(0, 123456791).Equal(m3.Time))
 }
diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go
index 508c1e320..0c19f9116 100644
--- a/plugins/inputs/tail/tail.go
+++ b/plugins/inputs/tail/tail.go
@@ -2,7 +2,6 @@ package tail
 
 import (
 	"fmt"
-	"log"
 	"sync"
 
 	"github.com/hpcloud/tail"
@@ -86,7 +85,7 @@ func (t *Tail) Start(acc telegraf.Accumulator) error {
 	for _, filepath := range t.Files {
 		g, err := globpath.Compile(filepath)
 		if err != nil {
-			log.Printf("E! Error Glob %s failed to compile, %s", filepath, err)
+			t.acc.AddError(fmt.Errorf("E! Error Glob %s failed to compile, %s", filepath, err))
 		}
 		for file, _ := range g.Match() {
 			tailer, err := tail.TailFile(file,
@@ -124,21 +123,21 @@ func (t *Tail) receiver(tailer *tail.Tail) {
 	var line *tail.Line
 	for line = range tailer.Lines {
 		if line.Err != nil {
-			log.Printf("E! Error tailing file %s, Error: %s\n",
-				tailer.Filename, err)
+			t.acc.AddError(fmt.Errorf("E! Error tailing file %s, Error: %s\n",
+				tailer.Filename, err))
 			continue
 		}
 		m, err = t.parser.ParseLine(line.Text)
 		if err == nil {
 			t.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
 		} else {
-			log.Printf("E! Malformed log line in %s: [%s], Error: %s\n",
-				tailer.Filename, line.Text, err)
+			t.acc.AddError(fmt.Errorf("E! Malformed log line in %s: [%s], Error: %s\n",
+				tailer.Filename, line.Text, err))
 		}
 	}
 	if err := tailer.Err(); err != nil {
-		log.Printf("E! Error tailing file %s, Error: %s\n",
-			tailer.Filename, err)
+		t.acc.AddError(fmt.Errorf("E! Error tailing file %s, Error: %s\n",
+			tailer.Filename, err))
 	}
 }
 
@@ -146,12 +145,12 @@ func (t *Tail) Stop() {
 	t.Lock()
 	defer t.Unlock()
 
-	for _, t := range t.tailers {
-		err := t.Stop()
+	for _, tailer := range t.tailers {
+		err := tailer.Stop()
 		if err != nil {
-			log.Printf("E! Error stopping tail on file %s\n", t.Filename)
+			t.acc.AddError(fmt.Errorf("E! Error stopping tail on file %s\n", tailer.Filename))
 		}
-		t.Cleanup()
+		tailer.Cleanup()
 	}
 	t.wg.Wait()
 }
diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go
index 31ecfbf30..b927d160c 100644
--- a/plugins/inputs/tail/tail_test.go
+++ b/plugins/inputs/tail/tail_test.go
@@ -3,6 +3,7 @@ package tail
 import (
 	"io/ioutil"
 	"os"
+	"runtime"
 	"testing"
 	"time"
 
@@ -30,11 +31,9 @@ func TestTailFromBeginning(t *testing.T) {
 
 	acc := testutil.Accumulator{}
 	require.NoError(t, tt.Start(&acc))
-	time.Sleep(time.Millisecond * 100)
 	require.NoError(t, tt.Gather(&acc))
-	// arbitrary sleep to wait for message to show up
-	time.Sleep(time.Millisecond * 150)
 
+	acc.Wait(1)
 	acc.AssertContainsTaggedFields(t, "cpu",
 		map[string]interface{}{
 			"usage_idle": float64(100),
@@ -60,13 +59,19 @@ func TestTailFromEnd(t *testing.T) {
 
 	acc := testutil.Accumulator{}
 	require.NoError(t, tt.Start(&acc))
-	time.Sleep(time.Millisecond * 100)
+	time.Sleep(time.Millisecond * 200) //TODO remove once https://github.com/hpcloud/tail/pull/114 is merged & added to Godeps
+	for _, tailer := range tt.tailers {
+		for n, err := tailer.Tell(); err == nil && n == 0; n, err = tailer.Tell() {
+			// wait for tailer to jump to end
+			runtime.Gosched()
+		}
+	}
 
 	_, err = tmpfile.WriteString("cpu,othertag=foo usage_idle=100\n")
 	require.NoError(t, err)
 	require.NoError(t, tt.Gather(&acc))
-	time.Sleep(time.Millisecond * 50)
 
+	acc.Wait(1)
 	acc.AssertContainsTaggedFields(t, "cpu",
 		map[string]interface{}{
 			"usage_idle": float64(100),
@@ -96,7 +101,7 @@ func TestTailBadLine(t *testing.T) {
 	_, err = tmpfile.WriteString("cpu mytag= foo usage_idle= 100\n")
 	require.NoError(t, err)
 	require.NoError(t, tt.Gather(&acc))
-	time.Sleep(time.Millisecond * 50)
 
-	assert.Len(t, acc.Metrics, 0)
+	acc.WaitError(1)
+	assert.Contains(t, acc.Errors[0].Error(), "E! Malformed log line")
 }
diff --git a/plugins/inputs/tcp_listener/tcp_listener_test.go b/plugins/inputs/tcp_listener/tcp_listener_test.go
index f7e5784d3..27ced791c 100644
--- a/plugins/inputs/tcp_listener/tcp_listener_test.go
+++ b/plugins/inputs/tcp_listener/tcp_listener_test.go
@@ -1,10 +1,15 @@
 package tcp_listener
 
 import (
+	"bufio"
+	"bytes"
 	"fmt"
+	"io"
+	"log"
 	"net"
+	"os"
+	"strings"
 	"testing"
-	"time"
 
 	"github.com/influxdata/telegraf/plugins/parsers"
 	"github.com/influxdata/telegraf/testutil"
@@ -54,7 +59,6 @@ func BenchmarkTCP(b *testing.B) {
 			panic(err)
 		}
 
-		time.Sleep(time.Millisecond * 25)
 		conn, err := net.Dial("tcp", "127.0.0.1:8198")
 		if err != nil {
 			panic(err)
@@ -62,8 +66,10 @@ func BenchmarkTCP(b *testing.B) {
 		for i := 0; i < 100000; i++ {
 			fmt.Fprintf(conn, testMsg)
 		}
-		// wait for 100,000 metrics to get added to accumulator
-		time.Sleep(time.Millisecond)
+		conn.(*net.TCPConn).CloseWrite()
+		// wait for all 100,000 metrics to be processed
+		buf := []byte{0}
+		conn.Read(buf) // will EOF when completed
 		listener.Stop()
 	}
 }
@@ -81,16 +87,18 @@ func TestHighTrafficTCP(t *testing.T) {
 	err := listener.Start(acc)
 	require.NoError(t, err)
 
-	time.Sleep(time.Millisecond * 25)
 	conn, err := net.Dial("tcp", "127.0.0.1:8199")
 	require.NoError(t, err)
 	for i := 0; i < 100000; i++ {
 		fmt.Fprintf(conn, testMsg)
 	}
-	time.Sleep(time.Millisecond)
+	conn.(*net.TCPConn).CloseWrite()
+	buf := []byte{0}
+	_, err = conn.Read(buf)
+	assert.Equal(t, err, io.EOF)
 	listener.Stop()
 
-	assert.Equal(t, 100000, len(acc.Metrics))
+	assert.Equal(t, 100000, int(acc.NMetrics()))
 }
 
 func TestConnectTCP(t *testing.T) {
@@ -105,13 +113,12 @@ func TestConnectTCP(t *testing.T) {
 	require.NoError(t, listener.Start(acc))
 	defer listener.Stop()
 
-	time.Sleep(time.Millisecond * 25)
 	conn, err := net.Dial("tcp", "127.0.0.1:8194")
 	require.NoError(t, err)
 
 	// send single message to socket
 	fmt.Fprintf(conn, testMsg)
-	time.Sleep(time.Millisecond * 15)
+	acc.Wait(1)
 	acc.AssertContainsTaggedFields(t, "cpu_load_short",
 		map[string]interface{}{"value": float64(12)},
 		map[string]string{"host": "server01"},
@@ -119,7 +126,7 @@ func TestConnectTCP(t *testing.T) {
 
 	// send multiple messages to socket
 	fmt.Fprintf(conn, testMsgs)
-	time.Sleep(time.Millisecond * 15)
+	acc.Wait(6)
 	hostTags := []string{"server02", "server03",
 		"server04", "server05", "server06"}
 	for _, hostTag := range hostTags {
@@ -143,7 +150,6 @@ func TestConcurrentConns(t *testing.T) {
 	require.NoError(t, listener.Start(acc))
 	defer listener.Stop()
 
-	time.Sleep(time.Millisecond * 25)
 	_, err := net.Dial("tcp", "127.0.0.1:8195")
 	assert.NoError(t, err)
 	_, err = net.Dial("tcp", "127.0.0.1:8195")
@@ -162,10 +168,8 @@ func TestConcurrentConns(t *testing.T) {
 			" the Telegraf tcp listener configuration.\n",
 		string(buf[:n]))
 
-	_, err = conn.Write([]byte(testMsg))
-	assert.NoError(t, err)
-	time.Sleep(time.Millisecond * 10)
-	assert.Zero(t, acc.NFields())
+	_, err = conn.Read(buf)
+	assert.Equal(t, io.EOF, err)
 }
 
 // Test that MaxTCPConections is respected when max==1
@@ -181,7 +185,6 @@ func TestConcurrentConns1(t *testing.T) {
 	require.NoError(t, listener.Start(acc))
 	defer listener.Stop()
 
-	time.Sleep(time.Millisecond * 25)
 	_, err := net.Dial("tcp", "127.0.0.1:8196")
 	assert.NoError(t, err)
 
@@ -198,10 +201,8 @@ func TestConcurrentConns1(t *testing.T) {
 			" the Telegraf tcp listener configuration.\n",
 		string(buf[:n]))
 
-	_, err = conn.Write([]byte(testMsg))
-	assert.NoError(t, err)
-	time.Sleep(time.Millisecond * 10)
-	assert.Zero(t, acc.NFields())
+	_, err = conn.Read(buf)
+	assert.Equal(t, io.EOF, err)
 }
 
 // Test that MaxTCPConections is respected
@@ -216,7 +217,6 @@ func TestCloseConcurrentConns(t *testing.T) {
 	acc := &testutil.Accumulator{}
 	require.NoError(t, listener.Start(acc))
 
-	time.Sleep(time.Millisecond * 25)
 	_, err := net.Dial("tcp", "127.0.0.1:8195")
 	assert.NoError(t, err)
 	_, err = net.Dial("tcp", "127.0.0.1:8195")
@@ -238,13 +238,9 @@ func TestRunParser(t *testing.T) {
 	go listener.tcpParser()
 
 	in <- testmsg
-	time.Sleep(time.Millisecond * 25)
 	listener.Gather(&acc)
 
-	if a := acc.NFields(); a != 1 {
-		t.Errorf("got %v, expected %v", a, 1)
-	}
-
+	acc.Wait(1)
 	acc.AssertContainsTaggedFields(t, "cpu_load_short",
 		map[string]interface{}{"value": float64(12)},
 		map[string]string{"host": "server01"},
@@ -263,11 +259,16 @@ func TestRunParserInvalidMsg(t *testing.T) {
 	listener.wg.Add(1)
 	go listener.tcpParser()
 
+	buf := bytes.NewBuffer(nil)
+	log.SetOutput(buf)
+	defer log.SetOutput(os.Stderr)
 	in <- testmsg
-	time.Sleep(time.Millisecond * 25)
 
-	if a := acc.NFields(); a != 0 {
-		t.Errorf("got %v, expected %v", a, 0)
+	scnr := bufio.NewScanner(buf)
+	for scnr.Scan() {
+		if strings.Contains(scnr.Text(), fmt.Sprintf(malformedwarn, 1)) {
+			break
+		}
 	}
 }
 
@@ -284,9 +285,9 @@ func TestRunParserGraphiteMsg(t *testing.T) {
 	go listener.tcpParser()
 
 	in <- testmsg
-	time.Sleep(time.Millisecond * 25)
 	listener.Gather(&acc)
 
+	acc.Wait(1)
 	acc.AssertContainsFields(t, "cpu_load_graphite",
 		map[string]interface{}{"value": float64(12)})
 }
@@ -304,9 +305,9 @@ func TestRunParserJSONMsg(t *testing.T) {
 	go listener.tcpParser()
 
 	in <- testmsg
-	time.Sleep(time.Millisecond * 25)
 	listener.Gather(&acc)
 
+	acc.Wait(1)
 	acc.AssertContainsFields(t, "udp_json_test",
 		map[string]interface{}{
 			"a":   float64(5),
diff --git a/plugins/inputs/udp_listener/udp_listener.go b/plugins/inputs/udp_listener/udp_listener.go
index 53c6a72f5..d0a728b3c 100644
--- a/plugins/inputs/udp_listener/udp_listener.go
+++ b/plugins/inputs/udp_listener/udp_listener.go
@@ -1,6 +1,7 @@
 package udp_listener
 
 import (
+	"fmt"
 	"log"
 	"net"
 	"sync"
@@ -107,8 +108,9 @@ func (u *UdpListener) Start(acc telegraf.Accumulator) error {
 	u.in = make(chan []byte, u.AllowedPendingMessages)
 	u.done = make(chan struct{})
 
-	u.wg.Add(2)
-	go u.udpListen()
+	u.udpListen()
+
+	u.wg.Add(1)
 	go u.udpParser()
 
 	log.Printf("I! Started UDP listener service on %s (ReadBuffer: %d)\n", u.ServiceAddress, u.UDPBufferSize)
@@ -126,32 +128,37 @@ func (u *UdpListener) Stop() {
 }
 
 func (u *UdpListener) udpListen() error {
-	defer u.wg.Done()
 	var err error
 
 	address, _ := net.ResolveUDPAddr("udp", u.ServiceAddress)
 	u.listener, err = net.ListenUDP("udp", address)
 
 	if err != nil {
-		log.Fatalf("E! Error: ListenUDP - %s", err)
+		return fmt.Errorf("E! Error: ListenUDP - %s", err)
 	}
 
 	log.Println("I! UDP server listening on: ", u.listener.LocalAddr().String())
 
-	buf := make([]byte, UDP_MAX_PACKET_SIZE)
-
 	if u.UDPBufferSize > 0 {
 		err = u.listener.SetReadBuffer(u.UDPBufferSize) // if we want to move away from OS default
 		if err != nil {
-			log.Printf("E! Failed to set UDP read buffer to %d: %s", u.UDPBufferSize, err)
-			return err
+			return fmt.Errorf("E! Failed to set UDP read buffer to %d: %s", u.UDPBufferSize, err)
 		}
 	}
 
+	u.wg.Add(1)
+	go u.udpListenLoop()
+	return nil
+}
+
+func (u *UdpListener) udpListenLoop() {
+	defer u.wg.Done()
+
+	buf := make([]byte, UDP_MAX_PACKET_SIZE)
 	for {
 		select {
 		case <-u.done:
-			return nil
+			return
 		default:
 			u.listener.SetReadDeadline(time.Now().Add(time.Second))
 
diff --git a/plugins/inputs/udp_listener/udp_listener_test.go b/plugins/inputs/udp_listener/udp_listener_test.go
index eefdd593e..4d78a1a42 100644
--- a/plugins/inputs/udp_listener/udp_listener_test.go
+++ b/plugins/inputs/udp_listener/udp_listener_test.go
@@ -1,12 +1,16 @@
 package udp_listener
 
 import (
+	"bufio"
+	"bytes"
 	"fmt"
 	"io/ioutil"
 	"log"
 	"net"
+	"os"
+	"runtime"
+	"strings"
 	"testing"
-	"time"
 
 	"github.com/influxdata/telegraf/plugins/parsers"
 	"github.com/influxdata/telegraf/testutil"
@@ -50,22 +54,27 @@ func TestHighTrafficUDP(t *testing.T) {
 	err := listener.Start(acc)
 	require.NoError(t, err)
 
-	time.Sleep(time.Millisecond * 25)
 	conn, err := net.Dial("udp", "127.0.0.1:8126")
 	require.NoError(t, err)
+	mlen := int64(len(testMsgs))
+	var sent int64
 	for i := 0; i < 20000; i++ {
-		// arbitrary, just to give the OS buffer some slack handling the
-		// packet storm.
-		time.Sleep(time.Microsecond)
-		fmt.Fprintf(conn, testMsgs)
+		for sent > listener.BytesRecv.Get()+32000 {
+			// more than 32kb sitting in OS buffer, let it drain
+			runtime.Gosched()
+		}
+		conn.Write([]byte(testMsgs))
+		sent += mlen
+	}
+	for sent > listener.BytesRecv.Get() {
+		runtime.Gosched()
+	}
+	for len(listener.in) > 0 {
+		runtime.Gosched()
 	}
-	time.Sleep(time.Millisecond)
 	listener.Stop()
 
-	// this is not an exact science, since UDP packets can easily get lost or
-	// dropped, but assume that the OS will be able to
-	// handle at least 90% of the sent UDP packets.
-	assert.InDelta(t, 100000, len(acc.Metrics), 10000)
+	assert.Equal(t, uint64(100000), acc.NMetrics())
 }
 
 func TestConnectUDP(t *testing.T) {
@@ -79,13 +88,12 @@ func TestConnectUDP(t *testing.T) {
 	require.NoError(t, listener.Start(acc))
 	defer listener.Stop()
 
-	time.Sleep(time.Millisecond * 25)
 	conn, err := net.Dial("udp", "127.0.0.1:8127")
 	require.NoError(t, err)
 
 	// send single message to socket
 	fmt.Fprintf(conn, testMsg)
-	time.Sleep(time.Millisecond * 15)
+	acc.Wait(1)
 	acc.AssertContainsTaggedFields(t, "cpu_load_short",
 		map[string]interface{}{"value": float64(12)},
 		map[string]string{"host": "server01"},
@@ -93,7 +101,7 @@ func TestConnectUDP(t *testing.T) {
 
 	// send multiple messages to socket
 	fmt.Fprintf(conn, testMsgs)
-	time.Sleep(time.Millisecond * 15)
+	acc.Wait(6)
 	hostTags := []string{"server02", "server03",
 		"server04", "server05", "server06"}
 	for _, hostTag := range hostTags {
@@ -118,13 +126,9 @@ func TestRunParser(t *testing.T) {
 	go listener.udpParser()
 
 	in <- testmsg
-	time.Sleep(time.Millisecond * 25)
 	listener.Gather(&acc)
 
-	if a := acc.NFields(); a != 1 {
-		t.Errorf("got %v, expected %v", a, 1)
-	}
-
+	acc.Wait(1)
 	acc.AssertContainsTaggedFields(t, "cpu_load_short",
 		map[string]interface{}{"value": float64(12)},
 		map[string]string{"host": "server01"},
@@ -144,11 +148,16 @@ func TestRunParserInvalidMsg(t *testing.T) {
 	listener.wg.Add(1)
 	go listener.udpParser()
 
+	buf := bytes.NewBuffer(nil)
+	log.SetOutput(buf)
+	defer log.SetOutput(os.Stderr)
 	in <- testmsg
-	time.Sleep(time.Millisecond * 25)
 
-	if a := acc.NFields(); a != 0 {
-		t.Errorf("got %v, expected %v", a, 0)
+	scnr := bufio.NewScanner(buf)
+	for scnr.Scan() {
+		if strings.Contains(scnr.Text(), fmt.Sprintf(malformedwarn, 1)) {
+			break
+		}
 	}
 }
 
@@ -166,9 +175,9 @@ func TestRunParserGraphiteMsg(t *testing.T) {
 	go listener.udpParser()
 
 	in <- testmsg
-	time.Sleep(time.Millisecond * 25)
 	listener.Gather(&acc)
 
+	acc.Wait(1)
 	acc.AssertContainsFields(t, "cpu_load_graphite",
 		map[string]interface{}{"value": float64(12)})
 }
@@ -187,9 +196,9 @@ func TestRunParserJSONMsg(t *testing.T) {
 	go listener.udpParser()
 
 	in <- testmsg
-	time.Sleep(time.Millisecond * 25)
 	listener.Gather(&acc)
 
+	acc.Wait(1)
 	acc.AssertContainsFields(t, "udp_json_test",
 		map[string]interface{}{
 			"a":   float64(5),
diff --git a/plugins/outputs/graphite/graphite_test.go b/plugins/outputs/graphite/graphite_test.go
index 4f1f2fef6..3984728af 100644
--- a/plugins/outputs/graphite/graphite_test.go
+++ b/plugins/outputs/graphite/graphite_test.go
@@ -44,9 +44,7 @@ func TestGraphiteOK(t *testing.T) {
 	// Start TCP server
 	wg.Add(1)
 	t.Log("Starting server")
-	go TCPServer1(t, &wg)
-	// Give the fake graphite TCP server some time to start:
-	time.Sleep(time.Millisecond * 100)
+	TCPServer1(t, &wg)
 
 	// Init plugin
 	g := Graphite{
@@ -88,10 +86,8 @@ func TestGraphiteOK(t *testing.T) {
 	t.Log("Finished Waiting for first data")
 	var wg2 sync.WaitGroup
 	// Start TCP server
-	time.Sleep(time.Millisecond * 100)
 	wg2.Add(1)
-	go TCPServer2(t, &wg2)
-	time.Sleep(time.Millisecond * 100)
+	TCPServer2(t, &wg2)
 	//Write but expect an error, but reconnect
 	g.Write(metrics2)
 	err3 := g.Write(metrics2)
@@ -105,27 +101,31 @@ func TestGraphiteOK(t *testing.T) {
 }
 
 func TCPServer1(t *testing.T, wg *sync.WaitGroup) {
-	defer wg.Done()
 	tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003")
-	conn, _ := (tcpServer).Accept()
-	reader := bufio.NewReader(conn)
-	tp := textproto.NewReader(reader)
-	data1, _ := tp.ReadLine()
-	assert.Equal(t, "my.prefix.192_168_0_1.mymeasurement.myfield 3.14 1289430000", data1)
-	conn.Close()
-	tcpServer.Close()
+	go func() {
+		defer wg.Done()
+		conn, _ := (tcpServer).Accept()
+		reader := bufio.NewReader(conn)
+		tp := textproto.NewReader(reader)
+		data1, _ := tp.ReadLine()
+		assert.Equal(t, "my.prefix.192_168_0_1.mymeasurement.myfield 3.14 1289430000", data1)
+		conn.Close()
+		tcpServer.Close()
+	}()
 }
 
 func TCPServer2(t *testing.T, wg *sync.WaitGroup) {
-	defer wg.Done()
 	tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003")
-	conn2, _ := (tcpServer).Accept()
-	reader := bufio.NewReader(conn2)
-	tp := textproto.NewReader(reader)
-	data2, _ := tp.ReadLine()
-	assert.Equal(t, "my.prefix.192_168_0_1.mymeasurement 3.14 1289430000", data2)
-	data3, _ := tp.ReadLine()
-	assert.Equal(t, "my.prefix.192_168_0_1.my_measurement 3.14 1289430000", data3)
-	conn2.Close()
-	tcpServer.Close()
+	go func() {
+		defer wg.Done()
+		conn2, _ := (tcpServer).Accept()
+		reader := bufio.NewReader(conn2)
+		tp := textproto.NewReader(reader)
+		data2, _ := tp.ReadLine()
+		assert.Equal(t, "my.prefix.192_168_0_1.mymeasurement 3.14 1289430000", data2)
+		data3, _ := tp.ReadLine()
+		assert.Equal(t, "my.prefix.192_168_0_1.my_measurement 3.14 1289430000", data3)
+		conn2.Close()
+		tcpServer.Close()
+	}()
 }
diff --git a/plugins/outputs/influxdb/client/udp_test.go b/plugins/outputs/influxdb/client/udp_test.go
index 31196ddca..84efe0b22 100644
--- a/plugins/outputs/influxdb/client/udp_test.go
+++ b/plugins/outputs/influxdb/client/udp_test.go
@@ -66,7 +66,6 @@ func TestUDPClient_Write(t *testing.T) {
 	}()
 
 	// test sending simple metric
-	time.Sleep(time.Second)
 	n, err := client.Write([]byte("cpu value=99\n"))
 	assert.Equal(t, n, 13)
 	assert.NoError(t, err)
diff --git a/plugins/outputs/instrumental/instrumental_test.go b/plugins/outputs/instrumental/instrumental_test.go
index d77d8eb05..0d3ce9040 100644
--- a/plugins/outputs/instrumental/instrumental_test.go
+++ b/plugins/outputs/instrumental/instrumental_test.go
@@ -16,9 +16,7 @@ import (
 func TestWrite(t *testing.T) {
 	var wg sync.WaitGroup
 	wg.Add(1)
-	go TCPServer(t, &wg)
-	// Give the fake TCP server some time to start:
-	time.Sleep(time.Millisecond * 100)
+	TCPServer(t, &wg)
 
 	i := Instrumental{
 		Host:     "127.0.0.1",
@@ -79,45 +77,47 @@ func TestWrite(t *testing.T) {
 
 func TCPServer(t *testing.T, wg *sync.WaitGroup) {
 	tcpServer, _ := net.Listen("tcp", "127.0.0.1:8000")
-	defer wg.Done()
-	conn, _ := tcpServer.Accept()
-	conn.SetDeadline(time.Now().Add(1 * time.Second))
-	reader := bufio.NewReader(conn)
-	tp := textproto.NewReader(reader)
+	go func() {
+		defer wg.Done()
+		conn, _ := tcpServer.Accept()
+		conn.SetDeadline(time.Now().Add(1 * time.Second))
+		reader := bufio.NewReader(conn)
+		tp := textproto.NewReader(reader)
 
-	hello, _ := tp.ReadLine()
-	assert.Equal(t, "hello version go/telegraf/1.1", hello)
-	auth, _ := tp.ReadLine()
-	assert.Equal(t, "authenticate abc123token", auth)
-	conn.Write([]byte("ok\nok\n"))
+		hello, _ := tp.ReadLine()
+		assert.Equal(t, "hello version go/telegraf/1.1", hello)
+		auth, _ := tp.ReadLine()
+		assert.Equal(t, "authenticate abc123token", auth)
+		conn.Write([]byte("ok\nok\n"))
 
-	data1, _ := tp.ReadLine()
-	assert.Equal(t, "gauge my.prefix.192_168_0_1.mymeasurement.myfield 3.14 1289430000", data1)
-	data2, _ := tp.ReadLine()
-	assert.Equal(t, "gauge my.prefix.192_168_0_1.mymeasurement 3.14 1289430000", data2)
+		data1, _ := tp.ReadLine()
+		assert.Equal(t, "gauge my.prefix.192_168_0_1.mymeasurement.myfield 3.14 1289430000", data1)
+		data2, _ := tp.ReadLine()
+		assert.Equal(t, "gauge my.prefix.192_168_0_1.mymeasurement 3.14 1289430000", data2)
 
-	conn, _ = tcpServer.Accept()
-	conn.SetDeadline(time.Now().Add(1 * time.Second))
-	reader = bufio.NewReader(conn)
-	tp = textproto.NewReader(reader)
+		conn, _ = tcpServer.Accept()
+		conn.SetDeadline(time.Now().Add(1 * time.Second))
+		reader = bufio.NewReader(conn)
+		tp = textproto.NewReader(reader)
 
-	hello, _ = tp.ReadLine()
-	assert.Equal(t, "hello version go/telegraf/1.1", hello)
-	auth, _ = tp.ReadLine()
-	assert.Equal(t, "authenticate abc123token", auth)
-	conn.Write([]byte("ok\nok\n"))
+		hello, _ = tp.ReadLine()
+		assert.Equal(t, "hello version go/telegraf/1.1", hello)
+		auth, _ = tp.ReadLine()
+		assert.Equal(t, "authenticate abc123token", auth)
+		conn.Write([]byte("ok\nok\n"))
 
-	data3, _ := tp.ReadLine()
-	assert.Equal(t, "increment my.prefix.192_168_0_1.my_histogram 3.14 1289430000", data3)
+		data3, _ := tp.ReadLine()
+		assert.Equal(t, "increment my.prefix.192_168_0_1.my_histogram 3.14 1289430000", data3)
 
-	data4, _ := tp.ReadLine()
-	assert.Equal(t, "increment my.prefix.192_168_0_1_8888_123.bad_metric_name 1 1289430000", data4)
+		data4, _ := tp.ReadLine()
+		assert.Equal(t, "increment my.prefix.192_168_0_1_8888_123.bad_metric_name 1 1289430000", data4)
 
-	data5, _ := tp.ReadLine()
-	assert.Equal(t, "increment my.prefix.192_168_0_1.my_counter 3.14 1289430000", data5)
+		data5, _ := tp.ReadLine()
+		assert.Equal(t, "increment my.prefix.192_168_0_1.my_counter 3.14 1289430000", data5)
 
-	data6, _ := tp.ReadLine()
-	assert.Equal(t, "", data6)
+		data6, _ := tp.ReadLine()
+		assert.Equal(t, "", data6)
 
-	conn.Close()
+		conn.Close()
+	}()
 }
diff --git a/testutil/accumulator.go b/testutil/accumulator.go
index 63dfddd7a..02bebf9c8 100644
--- a/testutil/accumulator.go
+++ b/testutil/accumulator.go
@@ -129,6 +129,9 @@ func (a *Accumulator) AddError(err error) {
 	}
 	a.Lock()
 	a.Errors = append(a.Errors, err)
+	if a.Cond != nil {
+		a.Cond.Broadcast()
+	}
 	a.Unlock()
 }
 
@@ -198,13 +201,28 @@ func (a *Accumulator) NFields() int {
 	return counter
 }
 
-// Wait waits for a metric to be added to the accumulator.
-// Accumulator must already be locked.
-func (a *Accumulator) Wait() {
+// Wait waits for the given number of metrics to be added to the accumulator.
+func (a *Accumulator) Wait(n int) {
+	a.Lock()
 	if a.Cond == nil {
 		a.Cond = sync.NewCond(&a.Mutex)
 	}
-	a.Cond.Wait()
+	for int(a.NMetrics()) < n {
+		a.Cond.Wait()
+	}
+	a.Unlock()
+}
+
+// WaitError waits for the given number of errors to be added to the accumulator.
+func (a *Accumulator) WaitError(n int) {
+	a.Lock()
+	if a.Cond == nil {
+		a.Cond = sync.NewCond(&a.Mutex)
+	}
+	for len(a.Errors) < n {
+		a.Cond.Wait()
+	}
+	a.Unlock()
 }
 
 func (a *Accumulator) AssertContainsTaggedFields(

From 995546e7c685136924c9d30ba5b49b30d6e2ddb1 Mon Sep 17 00:00:00 2001
From: Patrick Hemmer 
Date: Fri, 24 Mar 2017 15:06:52 -0400
Subject: [PATCH 0163/1302] snmp: support table indexes as tags (#2366)

---
 CHANGELOG.md                     |  1 +
 plugins/inputs/snmp/README.md    |  3 +++
 plugins/inputs/snmp/snmp.go      | 15 ++++++++++++---
 plugins/inputs/snmp/snmp_test.go | 21 ++++++++++++++++-----
 4 files changed, 32 insertions(+), 8 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index fd1ec5136..b1655f77d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -58,6 +58,7 @@ be deprecated eventually.
 - [#1678](https://github.com/influxdata/telegraf/pull/1678): Add AMQP consumer input plugin
 - [#2501](https://github.com/influxdata/telegraf/pull/2501): Support DEAD(X) state in system input plugin.
 - [#2522](https://github.com/influxdata/telegraf/pull/2522): Add support for mongodb client certificates.
+- [#1948](https://github.com/influxdata/telegraf/pull/1948): Support adding SNMP table indexes as tags.
 
 ### Bugfixes
 
diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md
index 473f2a52b..27b1f7571 100644
--- a/plugins/inputs/snmp/README.md
+++ b/plugins/inputs/snmp/README.md
@@ -168,6 +168,9 @@ If not specified, it defaults to the value of `oid`.  If `oid` is numeric, an at
 * `inherit_tags`:
 Which tags to inherit from the top-level config and to use in the output of this table's measurement.
 
+* `index_as_tag`:
+Adds each row's index within the table as a tag.  
+
 ### MIB lookups
 If the plugin is configured such that it needs to perform lookups from the MIB, it will use the net-snmp utilities `snmptranslate` and `snmptable`.
 
diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go
index 9296bc043..5394e57db 100644
--- a/plugins/inputs/snmp/snmp.go
+++ b/plugins/inputs/snmp/snmp.go
@@ -168,6 +168,9 @@ type Table struct {
 	// Which tags to inherit from the top-level config.
 	InheritTags []string
 
+	// Adds each row's table index as a tag.
+	IndexAsTag bool
+
 	// Fields is the tags and values to look up.
 	Fields []Field `toml:"field"`
 
@@ -464,13 +467,19 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) {
 			}
 		}
 
-		for i, v := range ifv {
-			rtr, ok := rows[i]
+		for idx, v := range ifv {
+			rtr, ok := rows[idx]
 			if !ok {
 				rtr = RTableRow{}
 				rtr.Tags = map[string]string{}
 				rtr.Fields = map[string]interface{}{}
-				rows[i] = rtr
+				rows[idx] = rtr
+			}
+			if t.IndexAsTag && idx != "" {
+				if idx[0] == '.' {
+					idx = idx[1:]
+				}
+				rtr.Tags["index"] = idx
 			}
 			// don't add an empty string
 			if vs, ok := v.(string); !ok || vs != "" {
diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go
index 62b19fcea..07fdeddc1 100644
--- a/plugins/inputs/snmp/snmp_test.go
+++ b/plugins/inputs/snmp/snmp_test.go
@@ -413,7 +413,8 @@ func TestGosnmpWrapper_get_retry(t *testing.T) {
 
 func TestTableBuild_walk(t *testing.T) {
 	tbl := Table{
-		Name: "mytable",
+		Name:       "mytable",
+		IndexAsTag: true,
 		Fields: []Field{
 			{
 				Name:  "myfield1",
@@ -442,7 +443,10 @@ func TestTableBuild_walk(t *testing.T) {
 
 	assert.Equal(t, tb.Name, "mytable")
 	rtr1 := RTableRow{
-		Tags: map[string]string{"myfield1": "foo"},
+		Tags: map[string]string{
+			"myfield1": "foo",
+			"index":    "0",
+		},
 		Fields: map[string]interface{}{
 			"myfield2": 1,
 			"myfield3": float64(0.123),
@@ -450,7 +454,10 @@ func TestTableBuild_walk(t *testing.T) {
 		},
 	}
 	rtr2 := RTableRow{
-		Tags: map[string]string{"myfield1": "bar"},
+		Tags: map[string]string{
+			"myfield1": "bar",
+			"index":    "1",
+		},
 		Fields: map[string]interface{}{
 			"myfield2": 2,
 			"myfield3": float64(0.456),
@@ -458,14 +465,18 @@ func TestTableBuild_walk(t *testing.T) {
 		},
 	}
 	rtr3 := RTableRow{
-		Tags: map[string]string{},
+		Tags: map[string]string{
+			"index": "2",
+		},
 		Fields: map[string]interface{}{
 			"myfield2": 0,
 			"myfield3": float64(0.0),
 		},
 	}
 	rtr4 := RTableRow{
-		Tags: map[string]string{},
+		Tags: map[string]string{
+			"index": "3",
+		},
 		Fields: map[string]interface{}{
 			"myfield3": float64(9.999),
 		},

From 0fa90014532e57139bf2987b0b908889da712a63 Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Fri, 24 Mar 2017 16:01:35 -0700
Subject: [PATCH 0164/1302] Clarify influxdb output url format

closes #2568
---
 plugins/outputs/influxdb/README.md   | 5 ++++-
 plugins/outputs/influxdb/influxdb.go | 5 ++++-
 2 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/plugins/outputs/influxdb/README.md b/plugins/outputs/influxdb/README.md
index 864177a36..5acac6cca 100644
--- a/plugins/outputs/influxdb/README.md
+++ b/plugins/outputs/influxdb/README.md
@@ -7,7 +7,10 @@ This plugin writes to [InfluxDB](https://www.influxdb.com) via HTTP or UDP.
 ```toml
 # Configuration for influxdb server to send metrics to
 [[outputs.influxdb]]
-  ## The full HTTP or UDP endpoint URL for your InfluxDB instance.
+  ## The HTTP or UDP URL for your InfluxDB instance.  Each item should be
+  ## of the form:
+  ##   scheme "://" host [ ":" port]
+  ##
   ## Multiple urls can be specified as part of the same cluster,
   ## this means that only ONE of the urls will be written to each interval.
   # urls = ["udp://localhost:8089"] # UDP endpoint example
diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go
index 6c19a35fc..6419d43ea 100644
--- a/plugins/outputs/influxdb/influxdb.go
+++ b/plugins/outputs/influxdb/influxdb.go
@@ -44,7 +44,10 @@ type InfluxDB struct {
 }
 
 var sampleConfig = `
-  ## The full HTTP or UDP endpoint URL for your InfluxDB instance.
+  ## The HTTP or UDP URL for your InfluxDB instance.  Each item should be
+  ## of the form:
+  ##   scheme "://" host [ ":" port]
+  ##
   ## Multiple urls can be specified as part of the same cluster,
   ## this means that only ONE of the urls will be written to each interval.
   # urls = ["udp://localhost:8089"] # UDP endpoint example

From 5612df48f961face923bfd3bba9bf4c174a9da97 Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Mon, 27 Mar 2017 14:49:04 -0700
Subject: [PATCH 0165/1302] Update telegraf.conf

---
 etc/telegraf.conf | 378 ++++++++++++++++++++++++++++++++++++++++------
 1 file changed, 334 insertions(+), 44 deletions(-)

diff --git a/etc/telegraf.conf b/etc/telegraf.conf
index aabdf180e..63e41d7bb 100644
--- a/etc/telegraf.conf
+++ b/etc/telegraf.conf
@@ -81,7 +81,10 @@
 
 # Configuration for influxdb server to send metrics to
 [[outputs.influxdb]]
-  ## The full HTTP or UDP endpoint URL for your InfluxDB instance.
+  ## The HTTP or UDP URL for your InfluxDB instance.  Each item should be
+  ## of the form:
+  ##   scheme "://" host [ ":" port]
+  ##
   ## Multiple urls can be specified as part of the same cluster,
   ## this means that only ONE of the urls will be written to each interval.
   # urls = ["udp://localhost:8089"] # UDP endpoint example
@@ -131,6 +134,8 @@
 #   ## AMQP exchange
 #   exchange = "telegraf"
 #   ## Auth method. PLAIN and EXTERNAL are supported
+#   ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
+#   ## described here: https://www.rabbitmq.com/plugins.html
 #   # auth_method = "PLAIN"
 #   ## Telegraf tag to use as a routing key
 #   ##  ie, if this tag exists, it's value will be used as the routing key
@@ -193,6 +198,45 @@
 #   # no configuration
 
 
+# # Configuration for Elasticsearch to send metrics to.
+# [[outputs.elasticsearch]]
+#   ## The full HTTP endpoint URL for your Elasticsearch instance
+#   ## Multiple urls can be specified as part of the same cluster,
+#   ## this means that only ONE of the urls will be written to each interval.
+#   urls = [ "http://node1.es.example.com:9200" ] # required.
+#   ## Elasticsearch client timeout, defaults to "5s" if not set.
+#   timeout = "5s"
+#   ## Set to true to ask Elasticsearch a list of all cluster nodes,
+#   ## thus it is not necessary to list all nodes in the urls config option.
+#   enable_sniffer = false
+#   ## Set the interval to check if the Elasticsearch nodes are available
+#   ## Setting to "0s" will disable the health check (not recommended in production)
+#   health_check_interval = "10s"
+#   ## HTTP basic authentication details (eg. when using Shield)
+#   # username = "telegraf"
+#   # password = "mypassword"
+#
+#   ## Index Config
+#   ## The target index for metrics (Elasticsearch will create if it not exists).
+#   ## You can use the date specifiers below to create indexes per time frame.
+#   ## The metric timestamp will be used to decide the destination index name
+#   # %Y - year (2016)
+#   # %y - last two digits of year (00..99)
+#   # %m - month (01..12)
+#   # %d - day of month (e.g., 01)
+#   # %H - hour (00..23)
+#   index_name = "telegraf-%Y.%m.%d" # required.
+#
+#   ## Template Config
+#   ## Set to true if you want telegraf to manage its index template.
+#   ## If enabled it will create a recommended index template for telegraf indexes
+#   manage_template = true
+#   ## The template name used for telegraf indexes
+#   template_name = "telegraf"
+#   ## Set to true if you want telegraf to overwrite an existing template
+#   overwrite_template = false
+
+
 # # Send telegraf metrics to file(s)
 # [[outputs.file]]
 #   ## Files to write to, "stdout" is a specially handled file.
@@ -443,7 +487,7 @@
 #   # expiration_interval = "60s"
 
 
-# # Configuration for Riemann server to send metrics to
+# # Configuration for the Riemann server to send metrics to
 # [[outputs.riemann]]
 #   ## The full TCP or UDP URL of the Riemann server
 #   url = "tcp://localhost:5555"
@@ -472,9 +516,12 @@
 #
 #   ## Description for Riemann event
 #   # description_text = "metrics collected from telegraf"
+#
+#   ## Riemann client write timeout, defaults to "5s" if not set.
+#   # timeout = "5s"
 
 
-# # Configuration for the legacy Riemann plugin
+# # Configuration for the Riemann server to send metrics to
 # [[outputs.riemann_legacy]]
 #   ## URL of server
 #   url = "localhost:5555"
@@ -484,6 +531,27 @@
 #   separator = " "
 
 
+# # Generic socket writer capable of handling multiple socket types.
+# [[outputs.socket_writer]]
+#   ## URL to connect to
+#   # address = "tcp://127.0.0.1:8094"
+#   # address = "tcp://example.com:http"
+#   # address = "tcp4://127.0.0.1:8094"
+#   # address = "tcp6://127.0.0.1:8094"
+#   # address = "tcp6://[2001:db8::1]:8094"
+#   # address = "udp://127.0.0.1:8094"
+#   # address = "udp4://127.0.0.1:8094"
+#   # address = "udp6://127.0.0.1:8094"
+#   # address = "unix:///tmp/telegraf.sock"
+#   # address = "unixgram:///tmp/telegraf.sock"
+#
+#   ## Data format to generate.
+#   ## Each data format has it's own unique set of configuration options, read
+#   ## more about them here:
+#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+#   # data_format = "influx"
+
+
 
 ###############################################################################
 #                            PROCESSOR PLUGINS                                #
@@ -531,7 +599,7 @@
 
   ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
   ## present on /run, /var/run, /dev/shm or /dev).
-  ignore_fs = ["tmpfs", "devtmpfs"]
+  ignore_fs = ["tmpfs", "devtmpfs", "devfs"]
 
 
 # Read metrics about disk IO by device
@@ -542,6 +610,23 @@
   # devices = ["sda", "sdb"]
   ## Uncomment the following line if you need disk serial numbers.
   # skip_serial_number = false
+  #
+  ## On systems which support it, device metadata can be added in the form of
+  ## tags.
+  ## Currently only Linux is supported via udev properties. You can view
+  ## available properties for a device by running:
+  ## 'udevadm info -q property -n /dev/sda'
+  # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
+  #
+  ## Using the same metadata source as device_tags, you can also customize the
+  ## name of the device via templates.
+  ## The 'name_templates' parameter is a list of templates to try and apply to
+  ## the device. The template may contain variables in the form of '$PROPERTY' or
+  ## '${PROPERTY}'. The first template which does not contain any variables not
+  ## present for the device is used as the device name tag.
+  ## The typical use case is for LVM volumes, to get the VG/LV name instead of
+  ## the near-meaningless DM-0 name.
+  # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
 
 
 # Get kernel statistics from /proc/stat
@@ -658,7 +743,7 @@
 #   gather_admin_socket_stats = true
 #
 #   ## Whether to gather statistics via ceph commands
-#   gather_cluster_stats = true
+#   gather_cluster_stats = false
 
 
 # # Read specific statistics per cgroup
@@ -677,6 +762,12 @@
 #   # files = ["memory.*usage*", "memory.limit_in_bytes"]
 
 
+# # Get standard chrony metrics, requires chronyc executable.
+# [[inputs.chrony]]
+#   ## If true, chronyc tries to perform a DNS lookup for the time server.
+#   # dns_lookup = false
+
+
 # # Pull Metric Statistics from Amazon CloudWatch
 # [[inputs.cloudwatch]]
 #   ## Amazon Region
@@ -722,9 +813,10 @@
 #   namespace = "AWS/ELB"
 #
 #   ## Maximum requests per second. Note that the global default AWS rate limit is
-#   ## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
-#   ## maximum of 10. Optional - default value is 10.
-#   ratelimit = 10
+#   ## 400 reqs/sec, so if you define multiple namespaces, these should add up to a
+#   ## maximum of 400. Optional - default value is 200.
+#   ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
+#   ratelimit = 200
 #
 #   ## Metrics to Pull (optional)
 #   ## Defaults to all Metrics in Namespace if nothing is provided
@@ -738,6 +830,22 @@
 #   #    value = "p-example"
 
 
+# # Collects conntrack stats from the configured directories and files.
+# [[inputs.conntrack]]
+#    ## The following defaults would work with multiple versions of conntrack.
+#    ## Note the nf_ and ip_ filename prefixes are mutually exclusive across
+#    ## kernel versions, as are the directory locations.
+#
+#    ## Superset of filenames to look for within the conntrack dirs.
+#    ## Missing files will be ignored.
+#    files = ["ip_conntrack_count","ip_conntrack_max",
+#             "nf_conntrack_count","nf_conntrack_max"]
+#
+#    ## Directories to search within for the conntrack files above.
+#    ## Missing directrories will be ignored.
+#    dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
+
+
 # # Gather health check statuses from services registered in Consul
 # [[inputs.consul]]
 #   ## Most of these values defaults to the one configured on a Consul's agent level.
@@ -957,6 +1065,24 @@
 #   ## Server address not starting with 'http' will be treated as a possible
 #   ## socket, so both examples below are valid.
 #   ## servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
+#   #
+#   ## By default, some of the fields are renamed from what haproxy calls them.
+#   ## Setting this option to true results in the plugin keeping the original
+#   ## field names.
+#   ## keep_field_names = true
+
+
+# # Monitor disks' temperatures using hddtemp
+# [[inputs.hddtemp]]
+#   ## By default, telegraf gathers temps data from all disks detected by the
+#   ## hddtemp.
+#   ##
+#   ## Only collect temps from the selected disks.
+#   ##
+#   ## A * as the device name will return the temperature values of all disks.
+#   ##
+#   # address = "127.0.0.1:7634"
+#   # devices = ["sda", "*"]
 
 
 # # HTTP/HTTPS request given an address a method and a timeout
@@ -977,6 +1103,11 @@
 #   # {'fake':'data'}
 #   # '''
 #
+#   ## Optional substring or regex match in body of the response
+#   ## response_string_match = "\"service_status\": \"up\""
+#   ## response_string_match = "ok"
+#   ## response_string_match = "\".*_status\".?:.?\"up\""
+#
 #   ## Optional SSL Config
 #   # ssl_ca = "/etc/telegraf/ca.pem"
 #   # ssl_cert = "/etc/telegraf/cert.pem"
@@ -1050,14 +1181,37 @@
 #   # collect_memstats = true
 
 
-# # Read metrics from one or many bare metal servers
+# # Read metrics from the bare metal servers via IPMI
 # [[inputs.ipmi_sensor]]
-#   ## specify servers via a url matching:
+#   ## optionally specify the path to the ipmitool executable
+#   # path = "/usr/bin/ipmitool"
+#   #
+#   ## optionally specify one or more servers via a url matching
 #   ##  [username[:password]@][protocol[(address)]]
 #   ##  e.g.
 #   ##    root:passwd@lan(127.0.0.1)
 #   ##
-#   servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
+#   ## if no servers are specified, local machine sensor stats will be queried
+#   ##
+#   # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
+
+
+# # Gather packets and bytes throughput from iptables
+# [[inputs.iptables]]
+#   ## iptables require root access on most systems.
+#   ## Setting 'use_sudo' to true will make use of sudo to run iptables.
+#   ## Users must configure sudo to allow telegraf user to run iptables with no password.
+#   ## iptables can be restricted to only list command "iptables -nvL".
+#   use_sudo = false
+#   ## Setting 'use_lock' to true runs iptables with the "-w" option.
+#   ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl")
+#   use_lock = false
+#   ## defines the table to monitor:
+#   table = "filter"
+#   ## defines the chains to monitor.
+#   ## NOTE: iptables rules without a comment will not be monitored.
+#   ## Read the plugin documentation for more information.
+#   chains = [ "INPUT" ]
 
 
 # # Read JMX metrics through Jolokia
@@ -1087,6 +1241,13 @@
 #   ## Includes connection time, any redirects, and reading the response body.
 #   # client_timeout = "4s"
 #
+#   ## Attribute delimiter
+#   ##
+#   ## When multiple attributes are returned for a single
+#   ## [inputs.jolokia.metrics], the field name is a concatenation of the metric
+#   ## name, and the attribute name, separated by the given delimiter.
+#   # delimiter = "_"
+#
 #   ## List of servers exposing jolokia read service
 #   [[inputs.jolokia.servers]]
 #     name = "as-server-01"
@@ -1117,6 +1278,11 @@
 #     attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
 
 
+# # Get kernel statistics from /proc/vmstat
+# [[inputs.kernel_vmstat]]
+#   # no configuration
+
+
 # # Read metrics from the kubernetes kubelet api
 # [[inputs.kubernetes]]
 #   ## URL for the kubelet
@@ -1216,6 +1382,13 @@
 #   ##   10.0.0.1:10000, etc.
 #   servers = ["127.0.0.1:27017"]
 #   gather_perdb_stats = false
+#
+#   ## Optional SSL Config
+#   # ssl_ca = "/etc/telegraf/ca.pem"
+#   # ssl_cert = "/etc/telegraf/cert.pem"
+#   # ssl_key = "/etc/telegraf/key.pem"
+#   ## Use SSL but skip chain & host verification
+#   # insecure_skip_verify = false
 
 
 # # Read metrics from one or many mysql servers
@@ -1243,9 +1416,15 @@
 #   ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
 #   gather_process_list                       = true
 #   #
+#   ## gather thread state counts from INFORMATION_SCHEMA.USER_STATISTICS
+#   gather_user_statistics                    = true
+#   #
 #   ## gather auto_increment columns and max values from information schema
 #   gather_info_schema_auto_inc               = true
 #   #
+#   ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS
+#   gather_innodb_metrics                     = true
+#   #
 #   ## gather metrics from SHOW SLAVE STATUS command output
 #   gather_slave_status                       = true
 #   #
@@ -1417,7 +1596,7 @@
 #   # ignored_databases = ["postgres", "template0", "template1"]
 #
 #   ## A list of databases to pull metrics about. If not specified, metrics for all
-#   ## databases are gathered.  Do NOT use with the 'ignore_databases' option.
+#   ## databases are gathered.  Do NOT use with the 'ignored_databases' option.
 #   # databases = ["app_production", "testing"]
 
 
@@ -1599,6 +1778,13 @@
 #   servers = ["http://localhost:8098"]
 
 
+# # Monitor sensors, requires lm-sensors package
+# [[inputs.sensors]]
+#   ## Remove numbers from field names.
+#   ## If true, a field name like 'temp1_input' will be changed to 'temp_input'.
+#   # remove_numbers = true
+
+
 # # Retrieves SNMP values from remote agents
 # [[inputs.snmp]]
 #   agents = [ "127.0.0.1:161" ]
@@ -1775,6 +1961,68 @@
 #   # ]
 
 
+# # Sysstat metrics collector
+# [[inputs.sysstat]]
+#   ## Path to the sadc command.
+#   #
+#   ## Common Defaults:
+#   ##   Debian/Ubuntu: /usr/lib/sysstat/sadc
+#   ##   Arch:          /usr/lib/sa/sadc
+#   ##   RHEL/CentOS:   /usr/lib64/sa/sadc
+#   sadc_path = "/usr/lib/sa/sadc" # required
+#   #
+#   #
+#   ## Path to the sadf command, if it is not in PATH
+#   # sadf_path = "/usr/bin/sadf"
+#   #
+#   #
+#   ## Activities is a list of activities, that are passed as argument to the
+#   ## sadc collector utility (e.g: DISK, SNMP etc...)
+#   ## The more activities that are added, the more data is collected.
+#   # activities = ["DISK"]
+#   #
+#   #
+#   ## Group metrics to measurements.
+#   ##
+#   ## If group is false each metric will be prefixed with a description
+#   ## and represents itself a measurement.
+#   ##
+#   ## If Group is true, corresponding metrics are grouped to a single measurement.
+#   # group = true
+#   #
+#   #
+#   ## Options for the sadf command. The values on the left represent the sadf
+#   ## options and the values on the right their description (wich are used for
+#   ## grouping and prefixing metrics).
+#   ##
+#   ## Run 'sar -h' or 'man sar' to find out the supported options for your
+#   ## sysstat version.
+#   [inputs.sysstat.options]
+#     -C = "cpu"
+#     -B = "paging"
+#     -b = "io"
+#     -d = "disk"             # requires DISK activity
+#     "-n ALL" = "network"
+#     "-P ALL" = "per_cpu"
+#     -q = "queue"
+#     -R = "mem"
+#     -r = "mem_util"
+#     -S = "swap_util"
+#     -u = "cpu_util"
+#     -v = "inode"
+#     -W = "swap"
+#     -w = "task"
+#   #  -H = "hugepages"        # only available for newer linux distributions
+#   #  "-I ALL" = "interrupts" # requires INT activity
+#   #
+#   #
+#   ## Device tags can be used to add additional tags for devices.
+#   ## For example the configuration below adds a tag vg with value rootvg for
+#   ## all metrics with sda devices.
+#   # [[inputs.sysstat.device_tags.sda]]
+#   #  vg = "rootvg"
+
+
 # # Inserts sine and cosine waves for demonstration purposes
 # [[inputs.trig]]
 #   ## Set the amplitude
@@ -1830,6 +2078,39 @@
 #                            SERVICE INPUT PLUGINS                            #
 ###############################################################################
 
+# # AMQP consumer plugin
+# [[inputs.amqp_consumer]]
+#   ## AMQP url
+#   url = "amqp://localhost:5672/influxdb"
+#   ## AMQP exchange
+#   exchange = "telegraf"
+#   ## AMQP queue name
+#   queue = "telegraf"
+#   ## Binding Key
+#   binding_key = "#"
+#
+#   ## Maximum number of messages server should give to the worker.
+#   prefetch_count = 50
+#
+#   ## Auth method. PLAIN and EXTERNAL are supported
+#   ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
+#   ## described here: https://www.rabbitmq.com/plugins.html
+#   # auth_method = "PLAIN"
+#
+#   ## Optional SSL Config
+#   # ssl_ca = "/etc/telegraf/ca.pem"
+#   # ssl_cert = "/etc/telegraf/cert.pem"
+#   # ssl_key = "/etc/telegraf/key.pem"
+#   ## Use SSL but skip chain & host verification
+#   # insecure_skip_verify = false
+#
+#   ## Data format to output.
+#   ## Each data format has it's own unique set of configuration options, read
+#   ## more about them here:
+#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+#   data_format = "influx"
+
+
 # # Influx HTTP write listener
 # [[inputs.http_listener]]
 #   ## Address and port to host HTTP listener on
@@ -1878,7 +2159,9 @@
 #   ##   /var/log/*/*.log    -> find all .log files with a parent dir in /var/log
 #   ##   /var/log/apache.log -> only tail the apache log file
 #   files = ["/var/log/apache/access.log"]
-#   ## Read file from beginning.
+#   ## Read files that currently exist from the beginning. Files that are created
+#   ## while telegraf is running (and that match the "files" globs) will always
+#   ## be read from the beginning.
 #   from_beginning = false
 #
 #   ## Parse logstash-style "grok" patterns:
@@ -1976,6 +2259,38 @@
 #   data_format = "influx"
 
 
+# # Generic socket listener capable of handling multiple socket types.
+# [[inputs.socket_listener]]
+#   ## URL to listen on
+#   # service_address = "tcp://:8094"
+#   # service_address = "tcp://127.0.0.1:http"
+#   # service_address = "tcp4://:8094"
+#   # service_address = "tcp6://:8094"
+#   # service_address = "tcp6://[2001:db8::1]:8094"
+#   # service_address = "udp://:8094"
+#   # service_address = "udp4://:8094"
+#   # service_address = "udp6://:8094"
+#   # service_address = "unix:///tmp/telegraf.sock"
+#   # service_address = "unixgram:///tmp/telegraf.sock"
+#
+#   ## Maximum number of concurrent connections.
+#   ## Only applies to stream sockets (e.g. TCP).
+#   ## 0 (default) is unlimited.
+#   # max_connections = 1024
+#
+#   ## Maximum socket buffer size in bytes.
+#   ## For stream sockets, once the buffer fills up, the sender will start backing up.
+#   ## For datagram sockets, once the buffer fills up, metrics will start dropping.
+#   ## Defaults to the OS default.
+#   # read_buffer_size = 65535
+#
+#   ## Data format to consume.
+#   ## Each data format has it's own unique set of configuration options, read
+#   ## more about them here:
+#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+#   # data_format = "influx"
+
+
 # # Statsd Server
 # [[inputs.statsd]]
 #   ## Address and port to host UDP listener on
@@ -2045,41 +2360,16 @@
 
 # # Generic TCP listener
 # [[inputs.tcp_listener]]
-#   ## Address and port to host TCP listener on
-#   # service_address = ":8094"
-#
-#   ## Number of TCP messages allowed to queue up. Once filled, the
-#   ## TCP listener will start dropping packets.
-#   # allowed_pending_messages = 10000
-#
-#   ## Maximum number of concurrent TCP connections to allow
-#   # max_tcp_connections = 250
-#
-#   ## Data format to consume.
-#   ## Each data format has it's own unique set of configuration options, read
-#   ## more about them here:
-#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
-#   data_format = "influx"
+#   # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
+#   # socket_listener plugin
+#   # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
 
 
 # # Generic UDP listener
 # [[inputs.udp_listener]]
-#   ## Address and port to host UDP listener on
-#   # service_address = ":8092"
-#
-#   ## Number of UDP messages allowed to queue up. Once filled, the
-#   ## UDP listener will start dropping packets.
-#   # allowed_pending_messages = 10000
-#
-#   ## Set the buffer size of the UDP connection outside of OS default (in bytes)
-#   ## If set to 0, take OS default
-#   udp_buffer_size = 16777216
-#
-#   ## Data format to consume.
-#   ## Each data format has it's own unique set of configuration options, read
-#   ## more about them here:
-#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
-#   data_format = "influx"
+#   # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
+#   # socket_listener plugin
+#   # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
 
 
 # # A Webhooks Event collector

From 84a9f91f5c090561b93fb505a87970a9165a532e Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Mon, 27 Mar 2017 15:05:06 -0700
Subject: [PATCH 0166/1302] Skip elasticsearch output integration test in short
 mode

---
 plugins/outputs/elasticsearch/elasticsearch_test.go | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/plugins/outputs/elasticsearch/elasticsearch_test.go b/plugins/outputs/elasticsearch/elasticsearch_test.go
index 9163a2bbe..9000676d9 100644
--- a/plugins/outputs/elasticsearch/elasticsearch_test.go
+++ b/plugins/outputs/elasticsearch/elasticsearch_test.go
@@ -57,6 +57,10 @@ func TestTemplateManagementEmptyTemplate(t *testing.T) {
 }
 
 func TestTemplateManagement(t *testing.T) {
+	if testing.Short() {
+		t.Skip("Skipping integration test in short mode")
+	}
+
 	urls := []string{"http://" + testutil.GetLocalHost() + ":9200"}
 
 	e := &Elasticsearch{

From 78c7f4e4af827a96cacdea3593b85293e30ff745 Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Mon, 27 Mar 2017 15:49:45 -0700
Subject: [PATCH 0167/1302] Add write timeout to Riemann output (#2576)

---
 CHANGELOG.md                       |  1 +
 plugins/outputs/riemann/README.md  |  3 +++
 plugins/outputs/riemann/riemann.go | 12 ++++++++++--
 3 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index b1655f77d..5da830d32 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -84,6 +84,7 @@ be deprecated eventually.
 - [#2513](https://github.com/influxdata/telegraf/issues/2513): create /etc/telegraf/telegraf.d directory in tarball.
 - [#2541](https://github.com/influxdata/telegraf/issues/2541): Return error on unsupported serializer data format.
 - [#1827](https://github.com/influxdata/telegraf/issues/1827): Fix Windows Performance Counters multi instance identifier
+- [#2576](https://github.com/influxdata/telegraf/pull/2576): Add write timeout to Riemann output
 
 
 ## v1.2.1 [2017-02-01]
diff --git a/plugins/outputs/riemann/README.md b/plugins/outputs/riemann/README.md
index 2338a00dc..82615728c 100644
--- a/plugins/outputs/riemann/README.md
+++ b/plugins/outputs/riemann/README.md
@@ -34,6 +34,9 @@ This plugin writes to [Riemann](http://riemann.io/) via TCP or UDP.
 
   ## Description for Riemann event
   # description_text = "metrics collected from telegraf"
+
+  ## Riemann client write timeout, defaults to "5s" if not set.
+  # timeout = "5s"
 ```
 
 ### Required parameters:
diff --git a/plugins/outputs/riemann/riemann.go b/plugins/outputs/riemann/riemann.go
index 25cf3011a..1738ca537 100644
--- a/plugins/outputs/riemann/riemann.go
+++ b/plugins/outputs/riemann/riemann.go
@@ -7,9 +7,11 @@ import (
 	"os"
 	"sort"
 	"strings"
+	"time"
 
 	"github.com/amir/raidman"
 	"github.com/influxdata/telegraf"
+	"github.com/influxdata/telegraf/internal"
 	"github.com/influxdata/telegraf/plugins/outputs"
 )
 
@@ -22,6 +24,7 @@ type Riemann struct {
 	TagKeys                []string
 	Tags                   []string
 	DescriptionText        string
+	Timeout                internal.Duration
 
 	client *raidman.Client
 }
@@ -54,6 +57,9 @@ var sampleConfig = `
 
   ## Description for Riemann event
   # description_text = "metrics collected from telegraf"
+
+  ## Riemann client write timeout, defaults to "5s" if not set.
+  # timeout = "5s"
 `
 
 func (r *Riemann) Connect() error {
@@ -62,7 +68,7 @@ func (r *Riemann) Connect() error {
 		return err
 	}
 
-	client, err := raidman.Dial(parsed_url.Scheme, parsed_url.Host)
+	client, err := raidman.DialWithTimeout(parsed_url.Scheme, parsed_url.Host, r.Timeout.Duration)
 	if err != nil {
 		r.client = nil
 		return err
@@ -212,6 +218,8 @@ func (r *Riemann) tags(tags map[string]string) []string {
 
 func init() {
 	outputs.Add("riemann", func() telegraf.Output {
-		return &Riemann{}
+		return &Riemann{
+			Timeout: internal.Duration{Duration: time.Second * 5},
+		}
 	})
 }

From 37689f4df60872948425b57baaada82e99872f6e Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Tue, 28 Mar 2017 10:22:28 -0700
Subject: [PATCH 0168/1302] Add elasticsearch output to changelog

---
 CHANGELOG.md | 1 +
 1 file changed, 1 insertion(+)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5da830d32..2650e5716 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -59,6 +59,7 @@ be deprecated eventually.
 - [#2501](https://github.com/influxdata/telegraf/pull/2501): Support DEAD(X) state in system input plugin.
 - [#2522](https://github.com/influxdata/telegraf/pull/2522): Add support for mongodb client certificates.
 - [#1948](https://github.com/influxdata/telegraf/pull/1948): Support adding SNMP table indexes as tags.
+- [#2332](https://github.com/influxdata/telegraf/pull/2332): Add Elasticsearch 5.x output
 
 ### Bugfixes
 

From 1100a98f11b3a90ee6aacf7294331caa3d3d919c Mon Sep 17 00:00:00 2001
From: mgresser 
Date: Tue, 28 Mar 2017 13:47:00 -0400
Subject: [PATCH 0169/1302] Removed duplicate evictions metric (#2577)

---
 plugins/inputs/memcached/memcached.go      | 1 -
 plugins/inputs/memcached/memcached_test.go | 2 +-
 2 files changed, 1 insertion(+), 2 deletions(-)

diff --git a/plugins/inputs/memcached/memcached.go b/plugins/inputs/memcached/memcached.go
index 5ee538e93..d174abeda 100644
--- a/plugins/inputs/memcached/memcached.go
+++ b/plugins/inputs/memcached/memcached.go
@@ -51,7 +51,6 @@ var sendMetrics = []string{
 	"decr_misses",
 	"cas_hits",
 	"cas_misses",
-	"evictions",
 	"bytes_read",
 	"bytes_written",
 	"threads",
diff --git a/plugins/inputs/memcached/memcached_test.go b/plugins/inputs/memcached/memcached_test.go
index 210adffdb..436c978f7 100644
--- a/plugins/inputs/memcached/memcached_test.go
+++ b/plugins/inputs/memcached/memcached_test.go
@@ -28,7 +28,7 @@ func TestMemcachedGeneratesMetrics(t *testing.T) {
 		"limit_maxbytes", "bytes", "uptime", "curr_items", "total_items",
 		"curr_connections", "total_connections", "connection_structures", "cmd_get",
 		"cmd_set", "delete_hits", "delete_misses", "incr_hits", "incr_misses",
-		"decr_hits", "decr_misses", "cas_hits", "cas_misses", "evictions",
+		"decr_hits", "decr_misses", "cas_hits", "cas_misses",
 		"bytes_read", "bytes_written", "threads", "conn_yields"}
 
 	for _, metric := range intMetrics {

From 9e036b2d6553b47c8d556a29b96e3b3215f4a338 Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Tue, 28 Mar 2017 12:31:36 -0700
Subject: [PATCH 0170/1302] Remove wait loop in riemann tests

This testcase still has a race condition but I believe it is when the
test does not complete quickly enough.
---
 plugins/outputs/riemann/riemann_test.go | 11 -----------
 1 file changed, 11 deletions(-)

diff --git a/plugins/outputs/riemann/riemann_test.go b/plugins/outputs/riemann/riemann_test.go
index 67a161be5..0b7c85403 100644
--- a/plugins/outputs/riemann/riemann_test.go
+++ b/plugins/outputs/riemann/riemann_test.go
@@ -193,17 +193,6 @@ func TestConnectAndWrite(t *testing.T) {
 	err = r.Write(metrics)
 	require.NoError(t, err)
 
-	start := time.Now()
-	for true {
-		events, _ := r.client.Query(`tagged "docker"`)
-		if len(events) > 0 {
-			break
-		}
-		if time.Since(start) > time.Second {
-			break
-		}
-	}
-
 	// are there any "docker" tagged events in Riemann?
 	events, err := r.client.Query(`tagged "docker"`)
 	require.NoError(t, err)

From 2d7f612bd70b8832e464dd908a7d976cb3a408c4 Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Wed, 29 Mar 2017 14:25:33 -0700
Subject: [PATCH 0171/1302] Use fork of hpcloud/tail (#2595)

---
 Godeps                                | 2 +-
 plugins/inputs/logparser/logparser.go | 2 +-
 plugins/inputs/tail/tail.go           | 2 +-
 plugins/inputs/tail/tail_test.go      | 2 --
 4 files changed, 3 insertions(+), 5 deletions(-)

diff --git a/Godeps b/Godeps
index 6cbe9efa7..9717cec2f 100644
--- a/Godeps
+++ b/Godeps
@@ -21,7 +21,7 @@ github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7
 github.com/gorilla/mux 392c28fe23e1c45ddba891b0320b3b5df220beea
 github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
 github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
-github.com/hpcloud/tail 915e5feba042395f5fda4dbe9c0e99aeab3088b3
+github.com/influxdata/tail e9ef7e826dafcb3093b40b989fefa90eeb9a8ca1
 github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
 github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
 github.com/jackc/pgx c8080fc4a1bfa44bf90383ad0fdce2f68b7d313c
diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go
index a22832277..c5641ba28 100644
--- a/plugins/inputs/logparser/logparser.go
+++ b/plugins/inputs/logparser/logparser.go
@@ -6,7 +6,7 @@ import (
 	"reflect"
 	"sync"
 
-	"github.com/hpcloud/tail"
+	"github.com/influxdata/tail"
 
 	"github.com/influxdata/telegraf"
 	"github.com/influxdata/telegraf/internal/errchan"
diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go
index 0c19f9116..f57d970cf 100644
--- a/plugins/inputs/tail/tail.go
+++ b/plugins/inputs/tail/tail.go
@@ -4,7 +4,7 @@ import (
 	"fmt"
 	"sync"
 
-	"github.com/hpcloud/tail"
+	"github.com/influxdata/tail"
 
 	"github.com/influxdata/telegraf"
 	"github.com/influxdata/telegraf/internal/globpath"
diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go
index b927d160c..7ddb502f9 100644
--- a/plugins/inputs/tail/tail_test.go
+++ b/plugins/inputs/tail/tail_test.go
@@ -5,7 +5,6 @@ import (
 	"os"
 	"runtime"
 	"testing"
-	"time"
 
 	"github.com/influxdata/telegraf/plugins/parsers"
 	"github.com/influxdata/telegraf/testutil"
@@ -59,7 +58,6 @@ func TestTailFromEnd(t *testing.T) {
 
 	acc := testutil.Accumulator{}
 	require.NoError(t, tt.Start(&acc))
-	time.Sleep(time.Millisecond * 200) //TODO remove once https://github.com/hpcloud/tail/pull/114 is merged & added to Godeps
 	for _, tailer := range tt.tailers {
 		for n, err := tailer.Tell(); err == nil && n == 0; n, err = tailer.Tell() {
 			// wait for tailer to jump to end

From cc5b2f68b69ec76886555cbae9c08b75155a6ffc Mon Sep 17 00:00:00 2001
From: djjorjinho 
Date: Wed, 29 Mar 2017 23:04:29 +0100
Subject: [PATCH 0172/1302] fix timestamp parsing on prometheus plugin (#2596)

---
 CHANGELOG.md                                 |  1 +
 plugins/inputs/prometheus/prometheus.go      | 10 +++++-----
 plugins/inputs/prometheus/prometheus_test.go |  7 +++++++
 testutil/accumulator.go                      | 13 +++++++++++++
 4 files changed, 26 insertions(+), 5 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2650e5716..f9a29d075 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -86,6 +86,7 @@ be deprecated eventually.
 - [#2541](https://github.com/influxdata/telegraf/issues/2541): Return error on unsupported serializer data format.
 - [#1827](https://github.com/influxdata/telegraf/issues/1827): Fix Windows Performance Counters multi instance identifier
 - [#2576](https://github.com/influxdata/telegraf/pull/2576): Add write timeout to Riemann output
+- [#2596](https://github.com/influxdata/telegraf/pull/2596): fix timestamp parsing on prometheus plugin
 
 
 ## v1.2.1 [2017-02-01]
diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go
index 97da17f04..c12127965 100644
--- a/plugins/inputs/prometheus/prometheus.go
+++ b/plugins/inputs/prometheus/prometheus.go
@@ -3,14 +3,15 @@ package prometheus
 import (
 	"errors"
 	"fmt"
-	"github.com/influxdata/telegraf"
-	"github.com/influxdata/telegraf/internal"
-	"github.com/influxdata/telegraf/plugins/inputs"
 	"io/ioutil"
 	"net"
 	"net/http"
 	"sync"
 	"time"
+
+	"github.com/influxdata/telegraf"
+	"github.com/influxdata/telegraf/internal"
+	"github.com/influxdata/telegraf/plugins/inputs"
 )
 
 const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3`
@@ -91,7 +92,6 @@ var client = &http.Client{
 }
 
 func (p *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error {
-	collectDate := time.Now()
 	var req, err = http.NewRequest("GET", url, nil)
 	req.Header.Add("Accept", acceptHeader)
 	var token []byte
@@ -145,7 +145,7 @@ func (p *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error {
 	for _, metric := range metrics {
 		tags := metric.Tags()
 		tags["url"] = url
-		acc.AddFields(metric.Name(), metric.Fields(), tags, collectDate)
+		acc.AddFields(metric.Name(), metric.Fields(), tags, metric.Time())
 	}
 
 	return nil
diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go
index 8a8fea9e3..4b316a3b4 100644
--- a/plugins/inputs/prometheus/prometheus_test.go
+++ b/plugins/inputs/prometheus/prometheus_test.go
@@ -5,6 +5,7 @@ import (
 	"net/http"
 	"net/http/httptest"
 	"testing"
+	"time"
 
 	"github.com/influxdata/telegraf/testutil"
 	"github.com/stretchr/testify/assert"
@@ -23,6 +24,9 @@ go_gc_duration_seconds_count 7
 # HELP go_goroutines Number of goroutines that currently exist.
 # TYPE go_goroutines gauge
 go_goroutines 15
+# HELP test_metric An untyped metric with a timestamp
+# TYPE test_metric untyped
+test_metric{label="value"} 1.0 1490802350000
 `
 
 func TestPrometheusGeneratesMetrics(t *testing.T) {
@@ -42,4 +46,7 @@ func TestPrometheusGeneratesMetrics(t *testing.T) {
 
 	assert.True(t, acc.HasFloatField("go_gc_duration_seconds", "count"))
 	assert.True(t, acc.HasFloatField("go_goroutines", "gauge"))
+	assert.True(t, acc.HasFloatField("test_metric", "value"))
+	assert.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0)))
+
 }
diff --git a/testutil/accumulator.go b/testutil/accumulator.go
index 02bebf9c8..b958e8cc9 100644
--- a/testutil/accumulator.go
+++ b/testutil/accumulator.go
@@ -275,6 +275,19 @@ func (a *Accumulator) AssertDoesNotContainMeasurement(t *testing.T, measurement
 	}
 }
 
+// HasTimestamp returns true if the measurement has a matching Time value
+func (a *Accumulator) HasTimestamp(measurement string, timestamp time.Time) bool {
+	a.Lock()
+	defer a.Unlock()
+	for _, p := range a.Metrics {
+		if p.Measurement == measurement {
+			return timestamp.Equal(p.Time)
+		}
+	}
+
+	return false
+}
+
 // HasIntField returns true if the measurement has an Int value
 func (a *Accumulator) HasIntField(measurement string, field string) bool {
 	a.Lock()

From 03ee6022f305dd0d73bb03dbb2fd3d81c9909a1f Mon Sep 17 00:00:00 2001
From: Patrick Hemmer 
Date: Wed, 29 Mar 2017 20:03:06 -0400
Subject: [PATCH 0173/1302] fix race in testutil Accumulator.Wait() (#2598)

---
 testutil/accumulator.go | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/testutil/accumulator.go b/testutil/accumulator.go
index b958e8cc9..9ebf77cf7 100644
--- a/testutil/accumulator.go
+++ b/testutil/accumulator.go
@@ -43,9 +43,9 @@ func (a *Accumulator) NMetrics() uint64 {
 }
 
 func (a *Accumulator) ClearMetrics() {
-	atomic.StoreUint64(&a.nMetrics, 0)
 	a.Lock()
 	defer a.Unlock()
+	atomic.StoreUint64(&a.nMetrics, 0)
 	a.Metrics = make([]*Metric, 0)
 }
 
@@ -56,9 +56,9 @@ func (a *Accumulator) AddFields(
 	tags map[string]string,
 	timestamp ...time.Time,
 ) {
-	atomic.AddUint64(&a.nMetrics, 1)
 	a.Lock()
 	defer a.Unlock()
+	atomic.AddUint64(&a.nMetrics, 1)
 	if a.Cond != nil {
 		a.Cond.Broadcast()
 	}

From fb1c7d01547bcb82ffd5583b1f7c38565c487335 Mon Sep 17 00:00:00 2001
From: tjmcs 
Date: Wed, 29 Mar 2017 17:12:29 -0700
Subject: [PATCH 0174/1302] Adds a new json_timestamp_units configuration
 parameter (#2587)

---
 docs/DATA_FORMATS_OUTPUT.md      | 10 ++++++++++
 internal/config/config.go        | 20 +++++++++++++++++++-
 plugins/serializers/json/json.go | 10 +++++++++-
 plugins/serializers/registry.go  | 12 ++++++++----
 4 files changed, 46 insertions(+), 6 deletions(-)

diff --git a/docs/DATA_FORMATS_OUTPUT.md b/docs/DATA_FORMATS_OUTPUT.md
index 177734d16..633460846 100644
--- a/docs/DATA_FORMATS_OUTPUT.md
+++ b/docs/DATA_FORMATS_OUTPUT.md
@@ -147,4 +147,14 @@ The JSON data format serialized Telegraf metrics in json format. The format is:
   ## more about them here:
   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
   data_format = "json"
+  json_timestamp_units = "1ns"
 ```
+
+By default, the timestamp that is output in JSON data format serialized Telegraf
+metrics is in seconds. The precision of this timestamp can be adjusted for any output
+by adding the optional `json_timestamp_units` parameter to the configuration for
+that output. This parameter can be used to set the timestamp units to  nanoseconds (`ns`),
+microseconds (`us` or `µs`), milliseconds (`ms`), or seconds (`s`). Note that this
+parameter will be truncated to the nearest power of 10 that, so if the `json_timestamp_units`
+are set to `15ms` the timestamps for the JSON format serialized Telegraf metrics will be
+output in hundredths of a second (`10ms`).
diff --git a/internal/config/config.go b/internal/config/config.go
index 651c4e9ef..013e81c12 100644
--- a/internal/config/config.go
+++ b/internal/config/config.go
@@ -6,6 +6,7 @@ import (
 	"fmt"
 	"io/ioutil"
 	"log"
+	"math"
 	"os"
 	"path/filepath"
 	"regexp"
@@ -1244,7 +1245,7 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
 // a serializers.Serializer object, and creates it, which can then be added onto
 // an Output object.
 func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error) {
-	c := &serializers.Config{}
+	c := &serializers.Config{TimestampUnits: time.Duration(1 * time.Second)}
 
 	if node, ok := tbl.Fields["data_format"]; ok {
 		if kv, ok := node.(*ast.KeyValue); ok {
@@ -1274,9 +1275,26 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
 		}
 	}
 
+	if node, ok := tbl.Fields["json_timestamp_units"]; ok {
+		if kv, ok := node.(*ast.KeyValue); ok {
+			if str, ok := kv.Value.(*ast.String); ok {
+				timestampVal, err := time.ParseDuration(str.Value)
+				if err != nil {
+					return nil, fmt.Errorf("Unable to parse json_timestamp_units as a duration, %s", err)
+				}
+				// now that we have a duration, truncate it to the nearest
+				// power of ten (just in case)
+				nearest_exponent := int64(math.Log10(float64(timestampVal.Nanoseconds())))
+				new_nanoseconds := int64(math.Pow(10.0, float64(nearest_exponent)))
+				c.TimestampUnits = time.Duration(new_nanoseconds)
+			}
+		}
+	}
+
 	delete(tbl.Fields, "data_format")
 	delete(tbl.Fields, "prefix")
 	delete(tbl.Fields, "template")
+	delete(tbl.Fields, "json_timestamp_units")
 	return serializers.NewSerializer(c)
 }
 
diff --git a/plugins/serializers/json/json.go b/plugins/serializers/json/json.go
index 3e259fafd..452364c95 100644
--- a/plugins/serializers/json/json.go
+++ b/plugins/serializers/json/json.go
@@ -2,19 +2,27 @@ package json
 
 import (
 	ejson "encoding/json"
+	"time"
 
 	"github.com/influxdata/telegraf"
 )
 
 type JsonSerializer struct {
+	TimestampUnits time.Duration
 }
 
 func (s *JsonSerializer) Serialize(metric telegraf.Metric) ([]byte, error) {
 	m := make(map[string]interface{})
+	units_nanoseconds := s.TimestampUnits.Nanoseconds()
+	// if the units passed in were less than or equal to zero,
+	// then serialize the timestamp in seconds (the default)
+	if units_nanoseconds <= 0 {
+		units_nanoseconds = 1000000000
+	}
 	m["tags"] = metric.Tags()
 	m["fields"] = metric.Fields()
 	m["name"] = metric.Name()
-	m["timestamp"] = metric.UnixNano() / 1000000000
+	m["timestamp"] = metric.UnixNano() / units_nanoseconds
 	serialized, err := ejson.Marshal(m)
 	if err != nil {
 		return []byte{}, err
diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go
index cb1e03b46..368f6f449 100644
--- a/plugins/serializers/registry.go
+++ b/plugins/serializers/registry.go
@@ -2,6 +2,7 @@ package serializers
 
 import (
 	"fmt"
+	"time"
 
 	"github.com/influxdata/telegraf"
 
@@ -29,7 +30,7 @@ type Serializer interface {
 // Config is a struct that covers the data types needed for all serializer types,
 // and can be used to instantiate _any_ of the serializers.
 type Config struct {
-	// Dataformat can be one of: influx, graphite
+	// Dataformat can be one of: influx, graphite, or json
 	DataFormat string
 
 	// Prefix to add to all measurements, only supports Graphite
@@ -38,6 +39,9 @@ type Config struct {
 	// Template for converting telegraf metrics into Graphite
 	// only supports Graphite
 	Template string
+
+	// Timestamp units to use for JSON formatted output
+	TimestampUnits time.Duration
 }
 
 // NewSerializer a Serializer interface based on the given config.
@@ -50,15 +54,15 @@ func NewSerializer(config *Config) (Serializer, error) {
 	case "graphite":
 		serializer, err = NewGraphiteSerializer(config.Prefix, config.Template)
 	case "json":
-		serializer, err = NewJsonSerializer()
+		serializer, err = NewJsonSerializer(config.TimestampUnits)
 	default:
 		err = fmt.Errorf("Invalid data format: %s", config.DataFormat)
 	}
 	return serializer, err
 }
 
-func NewJsonSerializer() (Serializer, error) {
-	return &json.JsonSerializer{}, nil
+func NewJsonSerializer(timestampUnits time.Duration) (Serializer, error) {
+	return &json.JsonSerializer{TimestampUnits: timestampUnits}, nil
 }
 
 func NewInfluxSerializer() (Serializer, error) {

From 9495b615f55a937e0f1a79670451227d3df5166d Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Wed, 29 Mar 2017 17:14:57 -0700
Subject: [PATCH 0175/1302] Update changelog for #2587

---
 CHANGELOG.md | 1 +
 1 file changed, 1 insertion(+)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index f9a29d075..2ab699600 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -60,6 +60,7 @@ be deprecated eventually.
 - [#2522](https://github.com/influxdata/telegraf/pull/2522): Add support for mongodb client certificates.
 - [#1948](https://github.com/influxdata/telegraf/pull/1948): Support adding SNMP table indexes as tags.
 - [#2332](https://github.com/influxdata/telegraf/pull/2332): Add Elasticsearch 5.x output
+- [#2587](https://github.com/influxdata/telegraf/pull/2587): Add json timestamp units configurability
 
 ### Bugfixes
 

From c980c92cd5d7f5d4d4b3f3c81401c70a65eb5a0a Mon Sep 17 00:00:00 2001
From: Dmitry Ulyanov 
Date: Thu, 30 Mar 2017 04:28:43 +0300
Subject: [PATCH 0176/1302] Added pprof tool (#2512)

---
 CHANGELOG.md             |  1 +
 cmd/telegraf/telegraf.go | 25 +++++++++++++++++++++++++
 docs/PROFILING.md        | 24 ++++++++++++++++++++++++
 3 files changed, 50 insertions(+)
 create mode 100644 docs/PROFILING.md

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2ab699600..9f90157f7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -56,6 +56,7 @@ be deprecated eventually.
 - [#2339](https://github.com/influxdata/telegraf/pull/2339): Increment gather_errors for all errors emitted by inputs.
 - [#2071](https://github.com/influxdata/telegraf/issues/2071): Use official docker SDK.
 - [#1678](https://github.com/influxdata/telegraf/pull/1678): Add AMQP consumer input plugin
+- [#2512](https://github.com/influxdata/telegraf/pull/2512): Added pprof tool.
 - [#2501](https://github.com/influxdata/telegraf/pull/2501): Support DEAD(X) state in system input plugin.
 - [#2522](https://github.com/influxdata/telegraf/pull/2522): Add support for mongodb client certificates.
 - [#1948](https://github.com/influxdata/telegraf/pull/1948): Support adding SNMP table indexes as tags.
diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go
index 16f7845d0..40e90a1ec 100644
--- a/cmd/telegraf/telegraf.go
+++ b/cmd/telegraf/telegraf.go
@@ -4,6 +4,8 @@ import (
 	"flag"
 	"fmt"
 	"log"
+	"net/http"
+	_ "net/http/pprof" // Comment this line to disable pprof endpoint.
 	"os"
 	"os/signal"
 	"runtime"
@@ -24,6 +26,8 @@ import (
 
 var fDebug = flag.Bool("debug", false,
 	"turn on debug logging")
+var pprofAddr = flag.String("pprof-addr", "",
+	"pprof address to listen on, not activate pprof if empty")
 var fQuiet = flag.Bool("quiet", false,
 	"run in quiet mode")
 var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
@@ -87,6 +91,7 @@ The commands & flags are:
   --output-filter     filter the output plugins to enable, separator is :
   --usage             print usage for a plugin, ie, 'telegraf --usage mysql'
   --debug             print metrics as they're generated to stdout
+  --pprof-addr        pprof address to listen on, format: localhost:6060 or :6060
   --quiet             run in quiet mode
 
 Examples:
@@ -105,6 +110,9 @@ Examples:
 
   # run telegraf, enabling the cpu & memory input, and influxdb output plugins
   telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
+
+  # run telegraf with pprof
+  telegraf --config telegraf.conf --pprof-addr localhost:6060
 `
 
 var stop chan struct{}
@@ -267,6 +275,23 @@ func main() {
 		processorFilters = strings.Split(":"+strings.TrimSpace(*fProcessorFilters)+":", ":")
 	}
 
+	if *pprofAddr != "" {
+		go func() {
+			pprofHostPort := *pprofAddr
+			parts := strings.Split(pprofHostPort, ":")
+			if len(parts) == 2 && parts[0] == "" {
+				pprofHostPort = fmt.Sprintf("localhost:%s", parts[1])
+			}
+			pprofHostPort = "http://" + pprofHostPort + "/debug/pprof"
+
+			log.Printf("I! Starting pprof HTTP server at: %s", pprofHostPort)
+
+			if err := http.ListenAndServe(*pprofAddr, nil); err != nil {
+				log.Fatal("E! " + err.Error())
+			}
+		}()
+	}
+
 	if len(args) > 0 {
 		switch args[0] {
 		case "version":
diff --git a/docs/PROFILING.md b/docs/PROFILING.md
new file mode 100644
index 000000000..a0851c8f1
--- /dev/null
+++ b/docs/PROFILING.md
@@ -0,0 +1,24 @@
+# Telegraf profiling
+
+Telegraf uses the standard package `net/http/pprof`. This package serves via its HTTP server runtime profiling data in the format expected by the pprof visualization tool.
+
+By default, the profiling is turned off.
+
+To enable profiling you need to specify address to config parameter `pprof-addr`, for example:
+
+```
+telegraf --config telegraf.conf --pprof-addr localhost:6060
+```
+
+There are several paths to get different profiling information:
+
+To look at the heap profile:
+
+`go tool pprof http://localhost:6060/debug/pprof/heap`
+
+or to look at a 30-second CPU profile:
+
+`go tool pprof http://localhost:6060/debug/pprof/profile?seconds=30`
+
+To view all available profiles, open `http://localhost:6060/debug/pprof/` in your browser.
+

From 540f98e2280f5fa85b0cdbd33b16cb95691cf2a4 Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Fri, 31 Mar 2017 12:45:28 -0700
Subject: [PATCH 0177/1302] Fix possible deadlock when output cannot write.
 (#2610)

---
 CHANGELOG.md              | 1 +
 internal/buffer/buffer.go | 2 ++
 2 files changed, 3 insertions(+)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9f90157f7..cf7c31c4b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -89,6 +89,7 @@ be deprecated eventually.
 - [#1827](https://github.com/influxdata/telegraf/issues/1827): Fix Windows Performance Counters multi instance identifier
 - [#2576](https://github.com/influxdata/telegraf/pull/2576): Add write timeout to Riemann output
 - [#2596](https://github.com/influxdata/telegraf/pull/2596): fix timestamp parsing on prometheus plugin
+- [#2610](https://github.com/influxdata/telegraf/pull/2610): Fix deadlock when output cannot write
 
 
 ## v1.2.1 [2017-02-01]
diff --git a/internal/buffer/buffer.go b/internal/buffer/buffer.go
index 5e7818ef1..cdc81fed3 100644
--- a/internal/buffer/buffer.go
+++ b/internal/buffer/buffer.go
@@ -45,9 +45,11 @@ func (b *Buffer) Add(metrics ...telegraf.Metric) {
 		select {
 		case b.buf <- metrics[i]:
 		default:
+			b.mu.Lock()
 			MetricsDropped.Incr(1)
 			<-b.buf
 			b.buf <- metrics[i]
+			b.mu.Unlock()
 		}
 	}
 }

From 51c99d5b67f7a6d58d801e4dfcb041f3ade74fb6 Mon Sep 17 00:00:00 2001
From: Patrick Hemmer 
Date: Fri, 31 Mar 2017 17:01:02 -0400
Subject: [PATCH 0178/1302] add support for linux sysctl fs metrics (#2609)

---
 CHANGELOG.md                                  |  1 +
 README.md                                     |  1 +
 .../inputs/system/LINUX_SYSCTL_FS_README.md   |  9 ++
 plugins/inputs/system/linux_sysctl_fs.go      | 88 +++++++++++++++++++
 plugins/inputs/system/linux_sysctl_fs_test.go | 41 +++++++++
 5 files changed, 140 insertions(+)
 create mode 100644 plugins/inputs/system/LINUX_SYSCTL_FS_README.md
 create mode 100644 plugins/inputs/system/linux_sysctl_fs.go
 create mode 100644 plugins/inputs/system/linux_sysctl_fs_test.go

diff --git a/CHANGELOG.md b/CHANGELOG.md
index cf7c31c4b..fa4b820c7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -62,6 +62,7 @@ be deprecated eventually.
 - [#1948](https://github.com/influxdata/telegraf/pull/1948): Support adding SNMP table indexes as tags.
 - [#2332](https://github.com/influxdata/telegraf/pull/2332): Add Elasticsearch 5.x output
 - [#2587](https://github.com/influxdata/telegraf/pull/2587): Add json timestamp units configurability
+- [#2597](https://github.com/influxdata/telegraf/issues/2597): Add support for Linux sysctl-fs metrics.
 
 ### Bugfixes
 
diff --git a/README.md b/README.md
index 906862714..55154e36a 100644
--- a/README.md
+++ b/README.md
@@ -174,6 +174,7 @@ configuration options.
     * processes
     * kernel (/proc/stat)
     * kernel (/proc/vmstat)
+    * linux_sysctl_fs (/proc/sys/fs)
 
 Telegraf can also collect metrics via the following service plugins:
 
diff --git a/plugins/inputs/system/LINUX_SYSCTL_FS_README.md b/plugins/inputs/system/LINUX_SYSCTL_FS_README.md
new file mode 100644
index 000000000..e9341c322
--- /dev/null
+++ b/plugins/inputs/system/LINUX_SYSCTL_FS_README.md
@@ -0,0 +1,9 @@
+# Linux Sysctl FS Input
+
+The linux_sysctl_fs input provides Linux system level file metrics. The documentation on these fields can be found at https://www.kernel.org/doc/Documentation/sysctl/fs.txt.
+
+Example output:
+
+```
+> linux_sysctl_fs,host=foo dentry-want-pages=0i,file-max=44222i,aio-max-nr=65536i,inode-preshrink-nr=0i,dentry-nr=64340i,dentry-unused-nr=55274i,file-nr=1568i,aio-nr=0i,inode-nr=35952i,inode-free-nr=12957i,dentry-age-limit=45i 1490982022000000000
+```
diff --git a/plugins/inputs/system/linux_sysctl_fs.go b/plugins/inputs/system/linux_sysctl_fs.go
new file mode 100644
index 000000000..93e426e75
--- /dev/null
+++ b/plugins/inputs/system/linux_sysctl_fs.go
@@ -0,0 +1,88 @@
+package system
+
+import (
+	"bytes"
+	"io/ioutil"
+	"strconv"
+
+	"github.com/influxdata/telegraf"
+	"github.com/influxdata/telegraf/plugins/inputs"
+)
+
+// https://www.kernel.org/doc/Documentation/sysctl/fs.txt
+type SysctlFS struct {
+	path string
+}
+
+var sysctlFSDescription = `Provides Linux sysctl fs metrics`
+var sysctlFSSampleConfig = ``
+
+func (_ SysctlFS) Description() string {
+	return sysctlFSDescription
+}
+func (_ SysctlFS) SampleConfig() string {
+	return sysctlFSSampleConfig
+}
+
+func (sfs *SysctlFS) gatherList(file string, fields map[string]interface{}, fieldNames ...string) error {
+	bs, err := ioutil.ReadFile(sfs.path + "/" + file)
+	if err != nil {
+		return err
+	}
+
+	bsplit := bytes.Split(bytes.TrimRight(bs, "\n"), []byte{'\t'})
+	for i, name := range fieldNames {
+		if i >= len(bsplit) {
+			break
+		}
+		if name == "" {
+			continue
+		}
+
+		v, err := strconv.ParseUint(string(bsplit[i]), 10, 64)
+		if err != nil {
+			return err
+		}
+		fields[name] = v
+	}
+
+	return nil
+}
+
+func (sfs *SysctlFS) gatherOne(name string, fields map[string]interface{}) error {
+	bs, err := ioutil.ReadFile(sfs.path + "/" + name)
+	if err != nil {
+		return err
+	}
+
+	v, err := strconv.ParseUint(string(bytes.TrimRight(bs, "\n")), 10, 64)
+	if err != nil {
+		return err
+	}
+
+	fields[name] = v
+	return nil
+}
+
+func (sfs *SysctlFS) Gather(acc telegraf.Accumulator) error {
+	fields := map[string]interface{}{}
+
+	for _, n := range []string{"aio-nr", "aio-max-nr", "dquot-nr", "dquot-max", "super-nr", "super-max"} {
+		sfs.gatherOne(n, fields)
+	}
+
+	sfs.gatherList("inode-state", fields, "inode-nr", "inode-free-nr", "inode-preshrink-nr")
+	sfs.gatherList("dentry-state", fields, "dentry-nr", "dentry-unused-nr", "dentry-age-limit", "dentry-want-pages")
+	sfs.gatherList("file-nr", fields, "file-nr", "", "file-max")
+
+	acc.AddFields("linux_sysctl_fs", fields, nil)
+	return nil
+}
+
+func init() {
+	inputs.Add("linux_sysctl_fs", func() telegraf.Input {
+		return &SysctlFS{
+			path: "/proc/sys/fs",
+		}
+	})
+}
diff --git a/plugins/inputs/system/linux_sysctl_fs_test.go b/plugins/inputs/system/linux_sysctl_fs_test.go
new file mode 100644
index 000000000..6561465cb
--- /dev/null
+++ b/plugins/inputs/system/linux_sysctl_fs_test.go
@@ -0,0 +1,41 @@
+package system
+
+import (
+	"io/ioutil"
+	"os"
+	"testing"
+
+	"github.com/influxdata/telegraf/testutil"
+	"github.com/stretchr/testify/require"
+)
+
+func TestSysctlFSGather(t *testing.T) {
+	td, err := ioutil.TempDir("", "")
+	require.NoError(t, err)
+	defer os.RemoveAll(td)
+
+	require.NoError(t, ioutil.WriteFile(td+"/aio-nr", []byte("100\n"), 0644))
+	require.NoError(t, ioutil.WriteFile(td+"/aio-max-nr", []byte("101\n"), 0644))
+	require.NoError(t, ioutil.WriteFile(td+"/super-nr", []byte("102\n"), 0644))
+	require.NoError(t, ioutil.WriteFile(td+"/super-max", []byte("103\n"), 0644))
+	require.NoError(t, ioutil.WriteFile(td+"/file-nr", []byte("104\t0\t106\n"), 0644))
+	require.NoError(t, ioutil.WriteFile(td+"/inode-state", []byte("107\t108\t109\t0\t0\t0\t0\n"), 0644))
+
+	sfs := &SysctlFS{
+		path: td,
+	}
+	var acc testutil.Accumulator
+	require.NoError(t, sfs.Gather(&acc))
+
+	acc.AssertContainsFields(t, "linux_sysctl_fs", map[string]interface{}{
+		"aio-nr":             uint64(100),
+		"aio-max-nr":         uint64(101),
+		"super-nr":           uint64(102),
+		"super-max":          uint64(103),
+		"file-nr":            uint64(104),
+		"file-max":           uint64(106),
+		"inode-nr":           uint64(107),
+		"inode-free-nr":      uint64(108),
+		"inode-preshrink-nr": uint64(109),
+	})
+}

From 35e439016825b49d6b41223b7494f433c21eceb4 Mon Sep 17 00:00:00 2001
From: Shakeel Sorathia 
Date: Mon, 3 Apr 2017 13:43:15 -0700
Subject: [PATCH 0179/1302] Docker: optionally add labels as tags (#2425)

---
 CHANGELOG.md                         |  1 +
 plugins/inputs/docker/README.md      | 26 ++++++++----
 plugins/inputs/docker/docker.go      | 61 ++++++++++++++++++++++++----
 plugins/inputs/docker/docker_test.go | 55 +++++++++++++++++++++++++
 plugins/inputs/docker/fake_client.go |  8 ++++
 5 files changed, 135 insertions(+), 16 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index fa4b820c7..e7da095d4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -63,6 +63,7 @@ be deprecated eventually.
 - [#2332](https://github.com/influxdata/telegraf/pull/2332): Add Elasticsearch 5.x output
 - [#2587](https://github.com/influxdata/telegraf/pull/2587): Add json timestamp units configurability
 - [#2597](https://github.com/influxdata/telegraf/issues/2597): Add support for Linux sysctl-fs metrics.
+- [#2425](https://github.com/influxdata/telegraf/pull/2425): Support to include/exclude docker container labels as tags
 
 ### Bugfixes
 
diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md
index 94965213f..849450b33 100644
--- a/plugins/inputs/docker/README.md
+++ b/plugins/inputs/docker/README.md
@@ -30,6 +30,12 @@ for the stat structure can be found
   perdevice = true
   ## Whether to report for each container total blkio and network stats or not
   total = false
+  
+  ## docker labels to include and exclude as tags.  Globs accepted.
+  ## Note that an empty array for both will include all labels as tags
+  docker_label_include = []
+  docker_label_exclude = []
+  
 ```
 
 ### Measurements & Fields:
@@ -130,30 +136,32 @@ based on the availability of per-cpu stats on your system.
 
 
 ### Tags:
-
+#### Docker Engine tags
 - docker (memory_total)
     - unit=bytes
+    - engine_host
 - docker (pool_blocksize)
     - unit=bytes
+    - engine_host
 - docker_data
     - unit=bytes
+    - engine_host
 - docker_metadata
     - unit=bytes
+    - engine_host
 
+#### Docker Container tags
+- Tags on all containers:
+    - engine_host
+    - container_image
+    - container_name
+    - container_version
 - docker_container_mem specific:
-    - container_image
-    - container_name
 - docker_container_cpu specific:
-    - container_image
-    - container_name
     - cpu
 - docker_container_net specific:
-    - container_image
-    - container_name
     - network
 - docker_container_blkio specific:
-    - container_image
-    - container_name
     - device
 
 ### Example Output:
diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go
index ec192efd5..47d1db14b 100644
--- a/plugins/inputs/docker/docker.go
+++ b/plugins/inputs/docker/docker.go
@@ -14,24 +14,34 @@ import (
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/client"
-
 	"github.com/influxdata/telegraf"
+	"github.com/influxdata/telegraf/filter"
 	"github.com/influxdata/telegraf/internal"
 	"github.com/influxdata/telegraf/plugins/inputs"
 )
 
+type DockerLabelFilter struct {
+	labelInclude filter.Filter
+	labelExclude filter.Filter
+}
+
 // Docker object
 type Docker struct {
 	Endpoint       string
 	ContainerNames []string
 	Timeout        internal.Duration
-	PerDevice      bool `toml:"perdevice"`
-	Total          bool `toml:"total"`
+	PerDevice      bool     `toml:"perdevice"`
+	Total          bool     `toml:"total"`
+	LabelInclude   []string `toml:"docker_label_include"`
+	LabelExclude   []string `toml:"docker_label_exclude"`
+
+	LabelFilter DockerLabelFilter
 
 	client      *client.Client
 	engine_host string
 
-	testing bool
+	testing             bool
+	labelFiltersCreated bool
 }
 
 // infoWrapper wraps client.Client.List for testing.
@@ -99,6 +109,10 @@ var sampleConfig = `
   ## Whether to report for each container total blkio and network stats or not
   total = false
 
+  ## docker labels to include and exclude as tags.  Globs accepted.
+  ## Note that an empty array for both will include all labels as tags
+  docker_label_include = []
+  docker_label_exclude = []
 `
 
 // Description returns input description
@@ -133,6 +147,14 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
 		}
 		d.client = c
 	}
+	// Create label filters if not already created
+	if !d.labelFiltersCreated {
+		err := d.createLabelFilters()
+		if err != nil {
+			return err
+		}
+		d.labelFiltersCreated = true
+	}
 
 	// Get daemon info
 	err := d.gatherInfo(acc)
@@ -293,7 +315,11 @@ func (d *Docker) gatherContainer(
 
 	// Add labels to tags
 	for k, label := range container.Labels {
-		tags[k] = label
+		if len(d.LabelInclude) == 0 || d.LabelFilter.labelInclude.Match(k) {
+			if len(d.LabelExclude) == 0 || !d.LabelFilter.labelExclude.Match(k) {
+				tags[k] = label
+			}
+		}
 	}
 
 	gatherContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total)
@@ -599,11 +625,32 @@ func parseSize(sizeStr string) (int64, error) {
 	return int64(size), nil
 }
 
+func (d *Docker) createLabelFilters() error {
+	if len(d.LabelInclude) != 0 && d.LabelFilter.labelInclude == nil {
+		var err error
+		d.LabelFilter.labelInclude, err = filter.Compile(d.LabelInclude)
+		if err != nil {
+			return err
+		}
+	}
+
+	if len(d.LabelExclude) != 0 && d.LabelFilter.labelExclude == nil {
+		var err error
+		d.LabelFilter.labelExclude, err = filter.Compile(d.LabelExclude)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
 func init() {
 	inputs.Add("docker", func() telegraf.Input {
 		return &Docker{
-			PerDevice: true,
-			Timeout:   internal.Duration{Duration: time.Second * 5},
+			PerDevice:           true,
+			Timeout:             internal.Duration{Duration: time.Second * 5},
+			labelFiltersCreated: false,
 		}
 	})
 }
diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go
index f0add03ea..3e2e1607b 100644
--- a/plugins/inputs/docker/docker_test.go
+++ b/plugins/inputs/docker/docker_test.go
@@ -244,6 +244,57 @@ func testStats() *types.StatsJSON {
 	return stats
 }
 
+var gatherLabelsTests = []struct {
+	include     []string
+	exclude     []string
+	expected    []string
+	notexpected []string
+}{
+	{[]string{}, []string{}, []string{"label1", "label2"}, []string{}},
+	{[]string{"*"}, []string{}, []string{"label1", "label2"}, []string{}},
+	{[]string{"lab*"}, []string{}, []string{"label1", "label2"}, []string{}},
+	{[]string{"label1"}, []string{}, []string{"label1"}, []string{"label2"}},
+	{[]string{"label1*"}, []string{}, []string{"label1"}, []string{"label2"}},
+	{[]string{}, []string{"*"}, []string{}, []string{"label1", "label2"}},
+	{[]string{}, []string{"lab*"}, []string{}, []string{"label1", "label2"}},
+	{[]string{}, []string{"label1"}, []string{"label2"}, []string{"label1"}},
+	{[]string{"*"}, []string{"*"}, []string{}, []string{"label1", "label2"}},
+}
+
+func TestDockerGatherLabels(t *testing.T) {
+	for _, tt := range gatherLabelsTests {
+		var acc testutil.Accumulator
+		d := Docker{
+			client:  nil,
+			testing: true,
+		}
+
+		for _, label := range tt.include {
+			d.LabelInclude = append(d.LabelInclude, label)
+		}
+		for _, label := range tt.exclude {
+			d.LabelExclude = append(d.LabelExclude, label)
+		}
+
+		err := d.Gather(&acc)
+		require.NoError(t, err)
+
+		for _, label := range tt.expected {
+			if !acc.HasTag("docker_container_cpu", label) {
+				t.Errorf("Didn't get expected label of %s.  Test was:  Include: %s  Exclude %s",
+					label, tt.include, tt.exclude)
+			}
+		}
+
+		for _, label := range tt.notexpected {
+			if acc.HasTag("docker_container_cpu", label) {
+				t.Errorf("Got unexpected label of %s.  Test was:  Include: %s  Exclude %s",
+					label, tt.include, tt.exclude)
+			}
+		}
+	}
+}
+
 func TestDockerGatherInfo(t *testing.T) {
 	var acc testutil.Accumulator
 	d := Docker{
@@ -294,6 +345,8 @@ func TestDockerGatherInfo(t *testing.T) {
 			"cpu":               "cpu3",
 			"container_version": "v2.2.2",
 			"engine_host":       "absol",
+			"label1":            "test_value_1",
+			"label2":            "test_value_2",
 		},
 	)
 	acc.AssertContainsTaggedFields(t,
@@ -340,6 +393,8 @@ func TestDockerGatherInfo(t *testing.T) {
 			"container_name":    "etcd2",
 			"container_image":   "quay.io:4443/coreos/etcd",
 			"container_version": "v2.2.2",
+			"label1":            "test_value_1",
+			"label2":            "test_value_2",
 		},
 	)
 
diff --git a/plugins/inputs/docker/fake_client.go b/plugins/inputs/docker/fake_client.go
index 03da23198..dcca6f235 100644
--- a/plugins/inputs/docker/fake_client.go
+++ b/plugins/inputs/docker/fake_client.go
@@ -92,6 +92,10 @@ func (d FakeDockerClient) ContainerList(octx context.Context, options types.Cont
 				IP:          "0.0.0.0",
 			},
 		},
+		Labels: map[string]string{
+			"label1": "test_value_1",
+			"label2": "test_value_2",
+		},
 		SizeRw:     0,
 		SizeRootFs: 0,
 	}
@@ -125,6 +129,10 @@ func (d FakeDockerClient) ContainerList(octx context.Context, options types.Cont
 				IP:          "0.0.0.0",
 			},
 		},
+		Labels: map[string]string{
+			"label1": "test_value_1",
+			"label2": "test_value_2",
+		},
 		SizeRw:     0,
 		SizeRootFs: 0,
 	}

From f2805fd4aa8eaba02127e68291427592bb3b0d68 Mon Sep 17 00:00:00 2001
From: Patrick Hemmer 
Date: Mon, 3 Apr 2017 21:06:51 -0400
Subject: [PATCH 0180/1302] socket_listener: clean up unix socket file on start
 & stop (#2618)

---
 .../inputs/socket_listener/socket_listener.go | 35 +++++++++++++++++--
 .../socket_listener/socket_listener_test.go   |  8 +++--
 .../socket_writer/socket_writer_test.go       |  2 ++
 3 files changed, 40 insertions(+), 5 deletions(-)

diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go
index 9d3a8e1fe..b5c0202cc 100644
--- a/plugins/inputs/socket_listener/socket_listener.go
+++ b/plugins/inputs/socket_listener/socket_listener.go
@@ -6,6 +6,7 @@ import (
 	"io"
 	"log"
 	"net"
+	"os"
 	"strings"
 	"sync"
 
@@ -32,7 +33,9 @@ func (ssl *streamSocketListener) listen() {
 	for {
 		c, err := ssl.Accept()
 		if err != nil {
-			ssl.AddError(err)
+			if !strings.HasSuffix(err.Error(), ": use of closed network connection") {
+				ssl.AddError(err)
+			}
 			break
 		}
 
@@ -78,7 +81,9 @@ func (ssl *streamSocketListener) read(c net.Conn) {
 	}
 
 	if err := scnr.Err(); err != nil {
-		ssl.AddError(err)
+		if !strings.HasSuffix(err.Error(), ": use of closed network connection") {
+			ssl.AddError(err)
+		}
 	}
 }
 
@@ -92,7 +97,9 @@ func (psl *packetSocketListener) listen() {
 	for {
 		n, _, err := psl.ReadFrom(buf)
 		if err != nil {
-			psl.AddError(err)
+			if !strings.HasSuffix(err.Error(), ": use of closed network connection") {
+				psl.AddError(err)
+			}
 			break
 		}
 
@@ -170,6 +177,13 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error {
 		return fmt.Errorf("invalid service address: %s", sl.ServiceAddress)
 	}
 
+	if spl[0] == "unix" || spl[0] == "unixpacket" || spl[0] == "unixgram" {
+		// no good way of testing for "file does not exist".
+		// Instead just ignore error and blow up when we try to listen, which will
+		// indicate "address already in use" if file existed and we couldn't remove.
+		os.Remove(spl[1])
+	}
+
 	switch spl[0] {
 	case "tcp", "tcp4", "tcp6", "unix", "unixpacket":
 		l, err := net.Listen(spl[0], spl[1])
@@ -217,6 +231,10 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error {
 		return fmt.Errorf("unknown protocol '%s' in '%s'", spl[0], sl.ServiceAddress)
 	}
 
+	if spl[0] == "unix" || spl[0] == "unixpacket" || spl[0] == "unixgram" {
+		sl.Closer = unixCloser{path: spl[1], closer: sl.Closer}
+	}
+
 	return nil
 }
 
@@ -235,6 +253,17 @@ func newSocketListener() *SocketListener {
 	}
 }
 
+type unixCloser struct {
+	path   string
+	closer io.Closer
+}
+
+func (uc unixCloser) Close() error {
+	err := uc.closer.Close()
+	os.Remove(uc.path) // ignore error
+	return err
+}
+
 func init() {
 	inputs.Add("socket_listener", func() telegraf.Input { return newSocketListener() })
 }
diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go
index 9fa472809..b263e5082 100644
--- a/plugins/inputs/socket_listener/socket_listener_test.go
+++ b/plugins/inputs/socket_listener/socket_listener_test.go
@@ -18,6 +18,7 @@ func TestSocketListener_tcp(t *testing.T) {
 	acc := &testutil.Accumulator{}
 	err := sl.Start(acc)
 	require.NoError(t, err)
+	defer sl.Stop()
 
 	client, err := net.Dial("tcp", sl.Closer.(net.Listener).Addr().String())
 	require.NoError(t, err)
@@ -32,6 +33,7 @@ func TestSocketListener_udp(t *testing.T) {
 	acc := &testutil.Accumulator{}
 	err := sl.Start(acc)
 	require.NoError(t, err)
+	defer sl.Stop()
 
 	client, err := net.Dial("udp", sl.Closer.(net.PacketConn).LocalAddr().String())
 	require.NoError(t, err)
@@ -40,13 +42,14 @@ func TestSocketListener_udp(t *testing.T) {
 }
 
 func TestSocketListener_unix(t *testing.T) {
-	defer os.Remove("/tmp/telegraf_test.sock")
+	os.Create("/tmp/telegraf_test.sock")
 	sl := newSocketListener()
 	sl.ServiceAddress = "unix:///tmp/telegraf_test.sock"
 
 	acc := &testutil.Accumulator{}
 	err := sl.Start(acc)
 	require.NoError(t, err)
+	defer sl.Stop()
 
 	client, err := net.Dial("unix", "/tmp/telegraf_test.sock")
 	require.NoError(t, err)
@@ -55,13 +58,14 @@ func TestSocketListener_unix(t *testing.T) {
 }
 
 func TestSocketListener_unixgram(t *testing.T) {
-	defer os.Remove("/tmp/telegraf_test.sock")
+	os.Create("/tmp/telegraf_test.sock")
 	sl := newSocketListener()
 	sl.ServiceAddress = "unixgram:///tmp/telegraf_test.sock"
 
 	acc := &testutil.Accumulator{}
 	err := sl.Start(acc)
 	require.NoError(t, err)
+	defer sl.Stop()
 
 	client, err := net.Dial("unixgram", "/tmp/telegraf_test.sock")
 	require.NoError(t, err)
diff --git a/plugins/outputs/socket_writer/socket_writer_test.go b/plugins/outputs/socket_writer/socket_writer_test.go
index 3ab9d1e34..6be2b0905 100644
--- a/plugins/outputs/socket_writer/socket_writer_test.go
+++ b/plugins/outputs/socket_writer/socket_writer_test.go
@@ -44,6 +44,7 @@ func TestSocketWriter_udp(t *testing.T) {
 }
 
 func TestSocketWriter_unix(t *testing.T) {
+	os.Remove("/tmp/telegraf_test.sock")
 	defer os.Remove("/tmp/telegraf_test.sock")
 	listener, err := net.Listen("unix", "/tmp/telegraf_test.sock")
 	require.NoError(t, err)
@@ -61,6 +62,7 @@ func TestSocketWriter_unix(t *testing.T) {
 }
 
 func TestSocketWriter_unixgram(t *testing.T) {
+	os.Remove("/tmp/telegraf_test.sock")
 	defer os.Remove("/tmp/telegraf_test.sock")
 	listener, err := net.ListenPacket("unixgram", "/tmp/telegraf_test.sock")
 	require.NoError(t, err)

From 8bf193dc064f27f917d3aeb0549d792e41d8f013 Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Mon, 3 Apr 2017 18:34:04 -0700
Subject: [PATCH 0181/1302] Update httpjson documentation (#2619)

closes  #2536
---
 plugins/inputs/EXAMPLE_README.md    |   2 +-
 plugins/inputs/httpjson/README.md   | 227 ++++++++++------------------
 plugins/inputs/httpjson/httpjson.go |  17 ++-
 3 files changed, 90 insertions(+), 156 deletions(-)

diff --git a/plugins/inputs/EXAMPLE_README.md b/plugins/inputs/EXAMPLE_README.md
index d6fcfdb91..a38064a7a 100644
--- a/plugins/inputs/EXAMPLE_README.md
+++ b/plugins/inputs/EXAMPLE_README.md
@@ -27,7 +27,7 @@ The example plugin gathers metrics about example things
     - tag2
 - measurement2 has the following tags:
     - tag3
-    
+
 ### Sample Queries:
 
 These are some useful queries (to generate dashboards or other) to run against data from this plugin:
diff --git a/plugins/inputs/httpjson/README.md b/plugins/inputs/httpjson/README.md
index c7c0e6797..1aa1ad1a4 100644
--- a/plugins/inputs/httpjson/README.md
+++ b/plugins/inputs/httpjson/README.md
@@ -1,128 +1,79 @@
-# HTTP JSON Plugin
+# HTTP JSON Input Plugin
 
-The httpjson plugin can collect data from remote URLs which respond with JSON. Then it flattens JSON and finds all numeric values, treating them as floats.
+The httpjson plugin collects data from HTTP URLs which respond with JSON.  It flattens the JSON and finds all numeric values, treating them as floats.
 
-For example, if you have a service called _mycollector_, which has HTTP endpoint for gathering stats at http://my.service.com/_stats, you would configure the HTTP JSON plugin like this:
+### Configuration:
 
-```
+```toml
 [[inputs.httpjson]]
-  name = "mycollector"
+  ## NOTE This plugin only reads numerical measurements, strings and booleans
+  ## will be ignored.
 
+  ## Name for the service being polled.  Will be appended to the name of the
+  ## measurement e.g. "httpjson_webserver_stats".
+  ##
+  ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.
+  name = "webserver_stats"
+
+  ## URL of each server in the service's cluster
   servers = [
-    "http://my.service.com/_stats"
+    "http://localhost:9999/stats/",
+    "http://localhost:9998/stats/",
   ]
-
-  # HTTP method to use (case-sensitive)
-  method = "GET"
-
-  # Set response_timeout (default 5 seconds)
+  ## Set response_timeout (default 5 seconds)
   response_timeout = "5s"
-```
 
-`name` is used as a prefix for the measurements.
-
-`method` specifies HTTP method to use for requests.
-
-`response_timeout` specifies timeout to wait to get the response
-
-You can also specify which keys from server response should be considered tags:
-
-```
-[[inputs.httpjson]]
-  ...
-
-  tag_keys = [
-    "role",
-    "version"
-  ]
-```
-
-If the JSON response is an array of objects, then each object will be parsed with the same configuration.
-
-You can also specify additional request parameters for the service:
-
-```
-[[inputs.httpjson]]
-  ...
-
- [inputs.httpjson.parameters]
-    event_type = "cpu_spike"
-    threshold = "0.75"
-
-```
-
-You can also specify additional request header parameters for the service:
-
-```
-[[inputs.httpjson]]
-  ...
-
- [inputs.httpjson.headers]
-    X-Auth-Token = "my-xauth-token"
-    apiVersion = "v1"
-```
-
-# Example:
-
-Let's say that we have a service named "mycollector" configured like this:
-
-```
-[[inputs.httpjson]]
-  name = "mycollector"
-  servers = [
-    "http://my.service.com/_stats"
-  ]
-  # HTTP method to use (case-sensitive)
-  method = "GET"
-  tag_keys = ["service"]
-```
-
-which responds with the following JSON:
-
-```json
-{
-    "service": "service01",
-    "a": 0.5,
-    "b": {
-        "c": "some text",
-        "d": 0.1,
-        "e": 5
-    }
-}
-```
-
-The collected metrics will be:
-```
-httpjson_mycollector_a,service='service01',server='http://my.service.com/_stats' value=0.5
-httpjson_mycollector_b_d,service='service01',server='http://my.service.com/_stats' value=0.1
-httpjson_mycollector_b_e,service='service01',server='http://my.service.com/_stats' value=5
-```
-
-# Example 2, Multiple Services:
-
-There is also the option to collect JSON from multiple services, here is an example doing that.
-
-```
-[[inputs.httpjson]]
-  name = "mycollector1"
-  servers = [
-    "http://my.service1.com/_stats"
-  ]
-  # HTTP method to use (case-sensitive)
+  ## HTTP method to use: GET or POST (case-sensitive)
   method = "GET"
 
-[[inputs.httpjson]]
-  name = "mycollector2"
-  servers = [
-    "http://service.net/json/stats"
-  ]
-  # HTTP method to use (case-sensitive)
-  method = "POST"
+  ## Tags to extract from top-level of JSON server response.
+  # tag_keys = [
+  #   "my_tag_1",
+  #   "my_tag_2"
+  # ]
+
+  ## HTTP Request Parameters (all values must be strings).  For "GET" requests, data
+  ## will be included in the query.  For "POST" requests, data will be included
+  ## in the request body as "x-www-form-urlencoded".
+  # [inputs.httpjson.parameters]
+  #   event_type = "cpu_spike"
+  #   threshold = "0.75"
+
+  ## HTTP Request Headers (all values must be strings).
+  # [inputs.httpjson.headers]
+  #   X-Auth-Token = "my-xauth-token"
+  #   apiVersion = "v1"
+
+  ## Optional SSL Config
+  # ssl_ca = "/etc/telegraf/ca.pem"
+  # ssl_cert = "/etc/telegraf/cert.pem"
+  # ssl_key = "/etc/telegraf/key.pem"
+  ## Use SSL but skip chain & host verification
+  # insecure_skip_verify = false
 ```
 
-The services respond with the following JSON:
+### Measurements & Fields:
 
-mycollector1:
+- httpjson
+	- response_time (float): Response time in seconds
+
+Additional fields are dependant on the response of the remote service being polled.
+
+### Tags:
+
+- All measurements have the following tags:
+	- server: HTTP origin as defined in configuration as `servers`.
+
+Any top level keys listed under `tag_keys` in the configuration are added as tags.  Top level keys are defined as keys in the root level of the object in a single object response, or in the root level of each object within an array of objects.
+
+
+### Examples Output:
+
+This plugin understands responses containing a single JSON object, or a JSON Array of Objects.
+
+**Object Output:**
+
+Given the following response body:
 ```json
 {
     "a": 0.5,
@@ -130,45 +81,30 @@ mycollector1:
         "c": "some text",
         "d": 0.1,
         "e": 5
-    }
+    },
+    "service": "service01"
 }
 ```
+The following metric is produced:
 
-mycollector2:
-```json
-{
-    "load": 100,
-    "users": 1335
-}
-```
+`httpjson,server=http://localhost:9999/stats/ b_d=0.1,a=0.5,b_e=5,response_time=0.001`
 
-The collected metrics will be:
+Note that only numerical values are extracted and the type is float.
 
-```
-httpjson_mycollector1_a,server='http://my.service.com/_stats' value=0.5
-httpjson_mycollector1_b_d,server='http://my.service.com/_stats' value=0.1
-httpjson_mycollector1_b_e,server='http://my.service.com/_stats' value=5
+If `tag_keys` is included in the configuration:
 
-httpjson_mycollector2_load,server='http://service.net/json/stats' value=100
-httpjson_mycollector2_users,server='http://service.net/json/stats' value=1335
-```
-
-# Example 3, Multiple Metrics in Response:
-
-The response JSON can be treated as an array of data points that are all parsed with the same configuration.
-
-```
+```toml
 [[inputs.httpjson]]
-  name = "mycollector"
-  servers = [
-    "http://my.service.com/_stats"
-  ]
-  # HTTP method to use (case-sensitive)
-  method = "GET"
   tag_keys = ["service"]
 ```
 
-which responds with the following JSON:
+Then the `service` tag will also be added:
+
+`httpjson,server=http://localhost:9999/stats/,service=service01 b_d=0.1,a=0.5,b_e=5,response_time=0.001`
+
+**Array Output:**
+
+If the service returns an array of objects, one metric is be created for each object:
 
 ```json
 [
@@ -193,12 +129,5 @@ which responds with the following JSON:
 ]
 ```
 
-The collected metrics will be:
-```
-httpjson_mycollector_a,service='service01',server='http://my.service.com/_stats' value=0.5
-httpjson_mycollector_b_d,service='service01',server='http://my.service.com/_stats' value=0.1
-httpjson_mycollector_b_e,service='service01',server='http://my.service.com/_stats' value=5
-httpjson_mycollector_a,service='service02',server='http://my.service.com/_stats' value=0.6
-httpjson_mycollector_b_d,service='service02',server='http://my.service.com/_stats' value=0.2
-httpjson_mycollector_b_e,service='service02',server='http://my.service.com/_stats' value=6
-```
+`httpjson,server=http://localhost:9999/stats/,service=service01 a=0.5,b_d=0.1,b_e=5,response_time=0.003`
+`httpjson,server=http://localhost:9999/stats/,service=service02 a=0.6,b_d=0.2,b_e=6,response_time=0.003`
diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go
index 89bfccf77..8bfe22bff 100644
--- a/plugins/inputs/httpjson/httpjson.go
+++ b/plugins/inputs/httpjson/httpjson.go
@@ -73,7 +73,10 @@ var sampleConfig = `
   ## NOTE This plugin only reads numerical measurements, strings and booleans
   ## will be ignored.
 
-  ## a name for the service being polled
+  ## Name for the service being polled.  Will be appended to the name of the
+  ## measurement e.g. httpjson_webserver_stats
+  ##
+  ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.
   name = "webserver_stats"
 
   ## URL of each server in the service's cluster
@@ -93,12 +96,14 @@ var sampleConfig = `
   #   "my_tag_2"
   # ]
 
-  ## HTTP parameters (all values must be strings)
-  [inputs.httpjson.parameters]
-    event_type = "cpu_spike"
-    threshold = "0.75"
+  ## HTTP parameters (all values must be strings).  For "GET" requests, data
+  ## will be included in the query.  For "POST" requests, data will be included
+  ## in the request body as "x-www-form-urlencoded".
+  # [inputs.httpjson.parameters]
+  #   event_type = "cpu_spike"
+  #   threshold = "0.75"
 
-  ## HTTP Header parameters (all values must be strings)
+  ## HTTP Headers (all values must be strings)
   # [inputs.httpjson.headers]
   #   X-Auth-Token = "my-xauth-token"
   #   apiVersion = "v1"

From 5ffc9fd379bcce9185346decf89fd1eab3b64d5e Mon Sep 17 00:00:00 2001
From: James 
Date: Tue, 4 Apr 2017 20:37:44 -0400
Subject: [PATCH 0182/1302] fix postgresql connection leak (#2611)

---
 CHANGELOG.md                                  |  2 +-
 Godeps                                        |  2 +-
 plugins/inputs/postgresql/connect.go          | 22 ----------------
 plugins/inputs/postgresql/postgresql.go       | 16 ++++++++----
 .../postgresql_extensible.go                  | 25 +++++++++++--------
 5 files changed, 28 insertions(+), 39 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index e7da095d4..043a51a69 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -92,7 +92,7 @@ be deprecated eventually.
 - [#2576](https://github.com/influxdata/telegraf/pull/2576): Add write timeout to Riemann output
 - [#2596](https://github.com/influxdata/telegraf/pull/2596): fix timestamp parsing on prometheus plugin
 - [#2610](https://github.com/influxdata/telegraf/pull/2610): Fix deadlock when output cannot write
-
+- [#2410](https://github.com/influxdata/telegraf/issues/2410): Fix connection leak in postgresql.
 
 ## v1.2.1 [2017-02-01]
 
diff --git a/Godeps b/Godeps
index 9717cec2f..ab72be8f2 100644
--- a/Godeps
+++ b/Godeps
@@ -24,7 +24,7 @@ github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
 github.com/influxdata/tail e9ef7e826dafcb3093b40b989fefa90eeb9a8ca1
 github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
 github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
-github.com/jackc/pgx c8080fc4a1bfa44bf90383ad0fdce2f68b7d313c
+github.com/jackc/pgx b84338d7d62598f75859b2b146d830b22f1b9ec8
 github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413
 github.com/kardianos/service 6d3a0ee7d3425d9d835debc51a0ca1ffa28f4893
 github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
diff --git a/plugins/inputs/postgresql/connect.go b/plugins/inputs/postgresql/connect.go
index 77858cda2..011ae32e0 100644
--- a/plugins/inputs/postgresql/connect.go
+++ b/plugins/inputs/postgresql/connect.go
@@ -1,15 +1,11 @@
 package postgresql
 
 import (
-	"database/sql"
 	"fmt"
 	"net"
 	"net/url"
 	"sort"
 	"strings"
-
-	"github.com/jackc/pgx"
-	"github.com/jackc/pgx/stdlib"
 )
 
 // pulled from lib/pq
@@ -79,21 +75,3 @@ func ParseURL(uri string) (string, error) {
 	sort.Strings(kvs) // Makes testing easier (not a performance concern)
 	return strings.Join(kvs, " "), nil
 }
-
-func Connect(address string) (*sql.DB, error) {
-	if strings.HasPrefix(address, "postgres://") || strings.HasPrefix(address, "postgresql://") {
-		return sql.Open("pgx", address)
-	}
-
-	config, err := pgx.ParseDSN(address)
-	if err != nil {
-		return nil, err
-	}
-
-	pool, err := pgx.NewConnPool(pgx.ConnPoolConfig{ConnConfig: config})
-	if err != nil {
-		return nil, err
-	}
-
-	return stdlib.OpenFromConnPool(pool)
-}
diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go
index 7c854dfd3..832c433ed 100644
--- a/plugins/inputs/postgresql/postgresql.go
+++ b/plugins/inputs/postgresql/postgresql.go
@@ -2,11 +2,15 @@ package postgresql
 
 import (
 	"bytes"
+	"database/sql"
 	"fmt"
 	"regexp"
 	"sort"
 	"strings"
 
+	// register in driver.
+	_ "github.com/jackc/pgx/stdlib"
+
 	"github.com/influxdata/telegraf"
 	"github.com/influxdata/telegraf/plugins/inputs"
 )
@@ -62,17 +66,19 @@ func (p *Postgresql) IgnoredColumns() map[string]bool {
 var localhost = "host=localhost sslmode=disable"
 
 func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
-	var query string
+	var (
+		err   error
+		db    *sql.DB
+		query string
+	)
 
 	if p.Address == "" || p.Address == "localhost" {
 		p.Address = localhost
 	}
 
-	db, err := Connect(p.Address)
-	if err != nil {
+	if db, err = sql.Open("pgx", p.Address); err != nil {
 		return err
 	}
-
 	defer db.Close()
 
 	if len(p.Databases) == 0 && len(p.IgnoredDatabases) == 0 {
@@ -107,7 +113,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
 			return err
 		}
 	}
-	//return rows.Err()
+
 	query = `SELECT * FROM pg_stat_bgwriter`
 
 	bg_writer_row, err := db.Query(query)
diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go
index 00729bf75..b8d3be625 100644
--- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go
+++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go
@@ -2,11 +2,15 @@ package postgresql_extensible
 
 import (
 	"bytes"
+	"database/sql"
 	"fmt"
 	"log"
 	"regexp"
 	"strings"
 
+	// register in driver.
+	_ "github.com/jackc/pgx/stdlib"
+
 	"github.com/influxdata/telegraf"
 	"github.com/influxdata/telegraf/plugins/inputs"
 	"github.com/influxdata/telegraf/plugins/inputs/postgresql"
@@ -112,23 +116,24 @@ func (p *Postgresql) IgnoredColumns() map[string]bool {
 var localhost = "host=localhost sslmode=disable"
 
 func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
-
-	var sql_query string
-	var query_addon string
-	var db_version int
-	var query string
-	var tag_value string
-	var meas_name string
+	var (
+		err         error
+		db          *sql.DB
+		sql_query   string
+		query_addon string
+		db_version  int
+		query       string
+		tag_value   string
+		meas_name   string
+	)
 
 	if p.Address == "" || p.Address == "localhost" {
 		p.Address = localhost
 	}
 
-	db, err := postgresql.Connect(p.Address)
-	if err != nil {
+	if db, err = sql.Open("pgx", p.Address); err != nil {
 		return err
 	}
-
 	defer db.Close()
 
 	// Retreiving the database version

From c9f8308f27aa849332f08acf1930a0781849d882 Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Thu, 6 Apr 2017 12:06:08 -0700
Subject: [PATCH 0183/1302] Update filtering documentation (#2631)

---
 docs/CONFIGURATION.md | 57 +++++++++++++++++++++++++------------------
 1 file changed, 33 insertions(+), 24 deletions(-)

diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md
index ff4814b82..ad6e903fc 100644
--- a/docs/CONFIGURATION.md
+++ b/docs/CONFIGURATION.md
@@ -124,31 +124,40 @@ is not specified then processor execution order will be random.
 Filters can be configured per input, output, processor, or aggregator,
 see below for examples.
 
-* **namepass**: An array of strings that is used to filter metrics generated by the
-current input. Each string in the array is tested as a glob match against
-measurement names and if it matches, the field is emitted.
-* **namedrop**: The inverse of pass, if a measurement name matches, it is not emitted.
-* **fieldpass**: An array of strings that is used to filter metrics generated by the
-current input. Each string in the array is tested as a glob match against field names
-and if it matches, the field is emitted. fieldpass is not available for outputs.
-* **fielddrop**: The inverse of pass, if a field name matches, it is not emitted.
-fielddrop is not available for outputs.
-* **tagpass**: tag names and arrays of strings that are used to filter
-measurements by the current input. Each string in the array is tested as a glob
-match against the tag name, and if it matches the measurement is emitted.
-* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not
-emitted. This is tested on measurements that have passed the tagpass test.
-* **tagexclude**: tagexclude can be used to exclude a tag from measurement(s).
-As opposed to tagdrop, which will drop an entire measurement based on it's
-tags, tagexclude simply strips the given tag keys from the measurement. This
-can be used on inputs & outputs, but it is _recommended_ to be used on inputs,
-as it is more efficient to filter out tags at the ingestion point.
-* **taginclude**: taginclude is the inverse of tagexclude. It will only include
-the tag keys in the final measurement.
+* **namepass**:
+An array of glob pattern strings.  Only points whose measurement name matches
+a pattern in this list are emitted.
+* **namedrop**:
+The inverse of `namepass`.  If a match is found the point is discarded. This
+is tested on points after they have passed the `namepass` test.
+* **fieldpass**:
+An array of glob pattern strings.  Only fields whose field key matches a
+pattern in this list are emitted.  Not available for outputs.
+* **fielddrop**:
+The inverse of `fieldpass`. Fields with a field key matching one of the
+patterns will be discarded from the point.  Not available for outputs.
+* **tagpass**:
+A table mapping tag keys to arrays of glob pattern strings.  Only points
+that contain a tag key in the table and a tag value matching one of its
+patterns is emitted.
+* **tagdrop**:
+The inverse of `tagpass`.  If a match is found the point is discarded. This
+is tested on points after they have passed the `tagpass` test.
+* **taginclude**:
+An array of glob pattern strings.  Only tags with a tag key matching one of
+the patterns are emitted.  In contrast to `tagpass`, which will pass an entire
+point based on its tag, `taginclude` removes all non matching tags from the
+point.  This filter can be used on both inputs & outputs, but it is
+_recommended_ to be used on inputs, as it is more efficient to filter out tags
+at the ingestion point.
+* **tagexclude**:
+The inverse of `taginclude`. Tags with a tag key matching one of the patterns
+will be discarded from the point.
 
-**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of
-the plugin definition, otherwise subsequent plugin config options will be
-interpreted as part of the tagpass/tagdrop map.
+**NOTE** Due to the way TOML is parsed, `tagpass` and `tagdrop` parameters
+must be defined at the _end_ of the plugin definition, otherwise subsequent
+plugin config options will be interpreted as part of the tagpass/tagdrop
+tables.
 
 #### Input Configuration Examples
 

From 92fa20cef2da6c502c93bf5b1607c7ef9bca5742 Mon Sep 17 00:00:00 2001
From: Victor Yunevich 
Date: Fri, 7 Apr 2017 00:40:34 +0300
Subject: [PATCH 0184/1302] ipmi_sensor: allow @ symbol in password (#2633)

---
 CHANGELOG.md                                  |  1 +
 plugins/inputs/ipmi_sensor/connection.go      |  2 +-
 plugins/inputs/ipmi_sensor/connection_test.go | 42 +++++++++++++++++++
 3 files changed, 44 insertions(+), 1 deletion(-)
 create mode 100644 plugins/inputs/ipmi_sensor/connection_test.go

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 043a51a69..333963bd5 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -67,6 +67,7 @@ be deprecated eventually.
 
 ### Bugfixes
 
+- [#2633](https://github.com/influxdata/telegraf/pull/2633): ipmi_sensor: allow @ symbol in password
 - [#2077](https://github.com/influxdata/telegraf/issues/2077): SQL Server Input - Arithmetic overflow error converting numeric to data type int.
 - [#2262](https://github.com/influxdata/telegraf/issues/2262): Flush jitter can inhibit metric collection.
 - [#2318](https://github.com/influxdata/telegraf/issues/2318): haproxy input - Add missing fields.
diff --git a/plugins/inputs/ipmi_sensor/connection.go b/plugins/inputs/ipmi_sensor/connection.go
index 432b4aa02..b93cda7d4 100644
--- a/plugins/inputs/ipmi_sensor/connection.go
+++ b/plugins/inputs/ipmi_sensor/connection.go
@@ -18,7 +18,7 @@ type Connection struct {
 
 func NewConnection(server string) *Connection {
 	conn := &Connection{}
-	inx1 := strings.Index(server, "@")
+	inx1 := strings.LastIndex(server, "@")
 	inx2 := strings.Index(server, "(")
 	inx3 := strings.Index(server, ")")
 
diff --git a/plugins/inputs/ipmi_sensor/connection_test.go b/plugins/inputs/ipmi_sensor/connection_test.go
new file mode 100644
index 000000000..13a62061d
--- /dev/null
+++ b/plugins/inputs/ipmi_sensor/connection_test.go
@@ -0,0 +1,42 @@
+package ipmi_sensor
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+type conTest struct {
+	Got  string
+	Want *Connection
+}
+
+func TestNewConnection(t *testing.T) {
+	testData := []struct {
+		addr string
+		con  *Connection
+	}{
+		{
+			"USERID:PASSW0RD@lan(192.168.1.1)",
+			&Connection{
+				Hostname:  "192.168.1.1",
+				Username:  "USERID",
+				Password:  "PASSW0RD",
+				Interface: "lan",
+			},
+		},
+		{
+			"USERID:PASS:!@#$%^&*(234)_+W0RD@lan(192.168.1.1)",
+			&Connection{
+				Hostname:  "192.168.1.1",
+				Username:  "USERID",
+				Password:  "PASS:!@#$%^&*(234)_+W0RD",
+				Interface: "lan",
+			},
+		},
+	}
+
+	for _, v := range testData {
+		assert.Equal(t, v.con, NewConnection(v.addr))
+	}
+}

From 7cc4ca23418b2ccb8caa96dc1f10125d7b6a0e8d Mon Sep 17 00:00:00 2001
From: Rajaseelan Ganeswaran 
Date: Fri, 7 Apr 2017 05:44:02 +0800
Subject: [PATCH 0185/1302] Add sample config stanza for CPU (#2620)

---
 plugins/inputs/system/CPU_README.md | 12 ++++++++++++
 1 file changed, 12 insertions(+)

diff --git a/plugins/inputs/system/CPU_README.md b/plugins/inputs/system/CPU_README.md
index 26eb7ffbe..01d57855b 100644
--- a/plugins/inputs/system/CPU_README.md
+++ b/plugins/inputs/system/CPU_README.md
@@ -4,6 +4,18 @@
 - **totalcpu** boolean: If true, include `cpu-total` data
 - **percpu** boolean: If true, include data on a per-cpu basis `cpu0, cpu1, etc.`
 
+
+##### Configuration:
+```
+[[inputs.cpu]]
+  ## Whether to report per-cpu stats or not
+  percpu = true
+  ## Whether to report total system cpu stats or not
+  totalcpu = true
+  ## If true, collect raw CPU time metrics.
+  collect_cpu_time = false
+```
+
 #### Description
 
 The CPU plugin collects standard CPU metrics as defined in `man proc`. All

From aa722fac9b6585069d405a9bc1772d4900d971b0 Mon Sep 17 00:00:00 2001
From: Vladimir S 
Date: Sat, 8 Apr 2017 01:39:43 +0300
Subject: [PATCH 0186/1302] Add dmcache input plugin (#1667)

---
 CHANGELOG.md                               |   1 +
 plugins/inputs/all/all.go                  |   1 +
 plugins/inputs/dmcache/README.md           |  47 +++++
 plugins/inputs/dmcache/dmcache.go          |  33 ++++
 plugins/inputs/dmcache/dmcache_linux.go    | 190 +++++++++++++++++++++
 plugins/inputs/dmcache/dmcache_notlinux.go |  15 ++
 plugins/inputs/dmcache/dmcache_test.go     | 169 ++++++++++++++++++
 7 files changed, 456 insertions(+)
 create mode 100644 plugins/inputs/dmcache/README.md
 create mode 100644 plugins/inputs/dmcache/dmcache.go
 create mode 100644 plugins/inputs/dmcache/dmcache_linux.go
 create mode 100644 plugins/inputs/dmcache/dmcache_notlinux.go
 create mode 100644 plugins/inputs/dmcache/dmcache_test.go

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 333963bd5..46d8b57d5 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -64,6 +64,7 @@ be deprecated eventually.
 - [#2587](https://github.com/influxdata/telegraf/pull/2587): Add json timestamp units configurability
 - [#2597](https://github.com/influxdata/telegraf/issues/2597): Add support for Linux sysctl-fs metrics.
 - [#2425](https://github.com/influxdata/telegraf/pull/2425): Support to include/exclude docker container labels as tags
+- [#1667](https://github.com/influxdata/telegraf/pull/1667): dmcache input plugin
 
 ### Bugfixes
 
diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go
index a9147c53e..983179e90 100644
--- a/plugins/inputs/all/all.go
+++ b/plugins/inputs/all/all.go
@@ -15,6 +15,7 @@ import (
 	_ "github.com/influxdata/telegraf/plugins/inputs/couchbase"
 	_ "github.com/influxdata/telegraf/plugins/inputs/couchdb"
 	_ "github.com/influxdata/telegraf/plugins/inputs/disque"
+	_ "github.com/influxdata/telegraf/plugins/inputs/dmcache"
 	_ "github.com/influxdata/telegraf/plugins/inputs/dns_query"
 	_ "github.com/influxdata/telegraf/plugins/inputs/docker"
 	_ "github.com/influxdata/telegraf/plugins/inputs/dovecot"
diff --git a/plugins/inputs/dmcache/README.md b/plugins/inputs/dmcache/README.md
new file mode 100644
index 000000000..536d3f518
--- /dev/null
+++ b/plugins/inputs/dmcache/README.md
@@ -0,0 +1,47 @@
+# DMCache Input Plugin
+
+This plugin provide a native collection for dmsetup based statistics for dm-cache.
+
+This plugin requires sudo, that is why you should setup and be sure that the telegraf is able to execute sudo without a password.
+
+`sudo /sbin/dmsetup status --target cache` is the full command that telegraf will run for debugging purposes.
+
+### Configuration
+
+```toml
+[[inputs.dmcache]]
+  ## Whether to report per-device stats or not
+  per_device = true
+```
+
+### Measurements & Fields:
+
+- dmcache
+    - length
+    - target
+    - metadata_blocksize
+    - metadata_used
+    - metadata_total
+    - cache_blocksize
+    - cache_used
+    - cache_total
+    - read_hits
+    - read_misses
+    - write_hits
+    - write_misses
+    - demotions
+    - promotions
+    - dirty
+
+### Tags:
+
+- All measurements have the following tags:
+    - device
+
+### Example Output:
+
+```
+$ ./telegraf --test --config /etc/telegraf/telegraf.conf --input-filter dmcache
+* Plugin: inputs.dmcache, Collection 1
+> dmcache,device=example cache_blocksize=0i,read_hits=995134034411520i,read_misses=916807089127424i,write_hits=195107267543040i,metadata_used=12861440i,write_misses=563725346013184i,promotions=3265223720960i,dirty=0i,metadata_blocksize=0i,cache_used=1099511627776ii,cache_total=0i,length=0i,metadata_total=1073741824i,demotions=3265223720960i 1491482035000000000
+```
diff --git a/plugins/inputs/dmcache/dmcache.go b/plugins/inputs/dmcache/dmcache.go
new file mode 100644
index 000000000..25a398194
--- /dev/null
+++ b/plugins/inputs/dmcache/dmcache.go
@@ -0,0 +1,33 @@
+package dmcache
+
+import (
+	"github.com/influxdata/telegraf"
+	"github.com/influxdata/telegraf/plugins/inputs"
+)
+
+type DMCache struct {
+	PerDevice        bool `toml:"per_device"`
+	getCurrentStatus func() ([]string, error)
+}
+
+var sampleConfig = `
+  ## Whether to report per-device stats or not
+  per_device = true
+`
+
+func (c *DMCache) SampleConfig() string {
+	return sampleConfig
+}
+
+func (c *DMCache) Description() string {
+	return "Provide a native collection for dmsetup based statistics for dm-cache"
+}
+
+func init() {
+	inputs.Add("dmcache", func() telegraf.Input {
+		return &DMCache{
+			PerDevice:        true,
+			getCurrentStatus: dmSetupStatus,
+		}
+	})
+}
diff --git a/plugins/inputs/dmcache/dmcache_linux.go b/plugins/inputs/dmcache/dmcache_linux.go
new file mode 100644
index 000000000..7ac1c96ca
--- /dev/null
+++ b/plugins/inputs/dmcache/dmcache_linux.go
@@ -0,0 +1,190 @@
+// +build linux
+
+package dmcache
+
+import (
+	"os/exec"
+	"strconv"
+	"strings"
+
+	"errors"
+
+	"github.com/influxdata/telegraf"
+)
+
+const metricName = "dmcache"
+
+type cacheStatus struct {
+	device            string
+	length            int
+	target            string
+	metadataBlocksize int
+	metadataUsed      int
+	metadataTotal     int
+	cacheBlocksize    int
+	cacheUsed         int
+	cacheTotal        int
+	readHits          int
+	readMisses        int
+	writeHits         int
+	writeMisses       int
+	demotions         int
+	promotions        int
+	dirty             int
+}
+
+func (c *DMCache) Gather(acc telegraf.Accumulator) error {
+	outputLines, err := c.getCurrentStatus()
+	if err != nil {
+		return err
+	}
+
+	totalStatus := cacheStatus{}
+
+	for _, s := range outputLines {
+		status, err := parseDMSetupStatus(s)
+		if err != nil {
+			return err
+		}
+
+		if c.PerDevice {
+			tags := map[string]string{"device": status.device}
+			acc.AddFields(metricName, toFields(status), tags)
+		}
+		aggregateStats(&totalStatus, status)
+	}
+
+	acc.AddFields(metricName, toFields(totalStatus), map[string]string{"device": "all"})
+
+	return nil
+}
+
+func parseDMSetupStatus(line string) (cacheStatus, error) {
+	var err error
+	parseError := errors.New("Output from dmsetup could not be parsed")
+	status := cacheStatus{}
+	values := strings.Fields(line)
+	if len(values) < 15 {
+		return cacheStatus{}, parseError
+	}
+
+	status.device = strings.TrimRight(values[0], ":")
+	status.length, err = strconv.Atoi(values[2])
+	if err != nil {
+		return cacheStatus{}, err
+	}
+	status.target = values[3]
+	status.metadataBlocksize, err = strconv.Atoi(values[4])
+	if err != nil {
+		return cacheStatus{}, err
+	}
+	metadata := strings.Split(values[5], "/")
+	if len(metadata) != 2 {
+		return cacheStatus{}, parseError
+	}
+	status.metadataUsed, err = strconv.Atoi(metadata[0])
+	if err != nil {
+		return cacheStatus{}, err
+	}
+	status.metadataTotal, err = strconv.Atoi(metadata[1])
+	if err != nil {
+		return cacheStatus{}, err
+	}
+	status.cacheBlocksize, err = strconv.Atoi(values[6])
+	if err != nil {
+		return cacheStatus{}, err
+	}
+	cache := strings.Split(values[7], "/")
+	if len(cache) != 2 {
+		return cacheStatus{}, parseError
+	}
+	status.cacheUsed, err = strconv.Atoi(cache[0])
+	if err != nil {
+		return cacheStatus{}, err
+	}
+	status.cacheTotal, err = strconv.Atoi(cache[1])
+	if err != nil {
+		return cacheStatus{}, err
+	}
+	status.readHits, err = strconv.Atoi(values[8])
+	if err != nil {
+		return cacheStatus{}, err
+	}
+	status.readMisses, err = strconv.Atoi(values[9])
+	if err != nil {
+		return cacheStatus{}, err
+	}
+	status.writeHits, err = strconv.Atoi(values[10])
+	if err != nil {
+		return cacheStatus{}, err
+	}
+	status.writeMisses, err = strconv.Atoi(values[11])
+	if err != nil {
+		return cacheStatus{}, err
+	}
+	status.demotions, err = strconv.Atoi(values[12])
+	if err != nil {
+		return cacheStatus{}, err
+	}
+	status.promotions, err = strconv.Atoi(values[13])
+	if err != nil {
+		return cacheStatus{}, err
+	}
+	status.dirty, err = strconv.Atoi(values[14])
+	if err != nil {
+		return cacheStatus{}, err
+	}
+
+	return status, nil
+}
+
+func aggregateStats(totalStatus *cacheStatus, status cacheStatus) {
+	totalStatus.length += status.length
+	totalStatus.metadataBlocksize += status.metadataBlocksize
+	totalStatus.metadataUsed += status.metadataUsed
+	totalStatus.metadataTotal += status.metadataTotal
+	totalStatus.cacheBlocksize += status.cacheBlocksize
+	totalStatus.cacheUsed += status.cacheUsed
+	totalStatus.cacheTotal += status.cacheTotal
+	totalStatus.readHits += status.readHits
+	totalStatus.readMisses += status.readMisses
+	totalStatus.writeHits += status.writeHits
+	totalStatus.writeMisses += status.writeMisses
+	totalStatus.demotions += status.demotions
+	totalStatus.promotions += status.promotions
+	totalStatus.dirty += status.dirty
+}
+
+func toFields(status cacheStatus) map[string]interface{} {
+	fields := make(map[string]interface{})
+	fields["length"] = status.length
+	fields["metadata_blocksize"] = status.metadataBlocksize
+	fields["metadata_used"] = status.metadataUsed
+	fields["metadata_total"] = status.metadataTotal
+	fields["cache_blocksize"] = status.cacheBlocksize
+	fields["cache_used"] = status.cacheUsed
+	fields["cache_total"] = status.cacheTotal
+	fields["read_hits"] = status.readHits
+	fields["read_misses"] = status.readMisses
+	fields["write_hits"] = status.writeHits
+	fields["write_misses"] = status.writeMisses
+	fields["demotions"] = status.demotions
+	fields["promotions"] = status.promotions
+	fields["dirty"] = status.dirty
+	return fields
+}
+
+func dmSetupStatus() ([]string, error) {
+	out, err := exec.Command("/bin/sh", "-c", "sudo /sbin/dmsetup status --target cache").Output()
+	if err != nil {
+		return nil, err
+	}
+	if string(out) == "No devices found\n" {
+		return []string{}, nil
+	}
+
+	outString := strings.TrimRight(string(out), "\n")
+	status := strings.Split(outString, "\n")
+
+	return status, nil
+}
diff --git a/plugins/inputs/dmcache/dmcache_notlinux.go b/plugins/inputs/dmcache/dmcache_notlinux.go
new file mode 100644
index 000000000..ee1065638
--- /dev/null
+++ b/plugins/inputs/dmcache/dmcache_notlinux.go
@@ -0,0 +1,15 @@
+// +build !linux
+
+package dmcache
+
+import (
+	"github.com/influxdata/telegraf"
+)
+
+func (c *DMCache) Gather(acc telegraf.Accumulator) error {
+	return nil
+}
+
+func dmSetupStatus() ([]string, error) {
+	return []string{}, nil
+}
diff --git a/plugins/inputs/dmcache/dmcache_test.go b/plugins/inputs/dmcache/dmcache_test.go
new file mode 100644
index 000000000..c5989c413
--- /dev/null
+++ b/plugins/inputs/dmcache/dmcache_test.go
@@ -0,0 +1,169 @@
+package dmcache
+
+import (
+	"errors"
+	"testing"
+
+	"github.com/influxdata/telegraf/testutil"
+	"github.com/stretchr/testify/require"
+)
+
+var (
+	measurement              = "dmcache"
+	badFormatOutput          = []string{"cs-1: 0 4883791872 cache 8 1018/1501122 512 7/464962 139 352643 "}
+	good2DevicesFormatOutput = []string{
+		"cs-1: 0 4883791872 cache 8 1018/1501122 512 7/464962 139 352643 15 46 0 7 0 1 writeback 2 migration_threshold 2048 mq 10 random_threshold 4 sequential_threshold 512 discard_promote_adjustment 1 read_promote_adjustment 4 write_promote_adjustment 8",
+		"cs-2: 0 4294967296 cache 8 72352/1310720 128 26/24327168 2409 286 265 524682 0 0 0 1 writethrough 2 migration_threshold 2048 mq 10 random_threshold 4 sequential_threshold 512 discard_promote_adjustment 1 read_promote_adjustment 4 write_promote_adjustment 8",
+	}
+)
+
+func TestPerDeviceGoodOutput(t *testing.T) {
+	var acc testutil.Accumulator
+	var plugin = &DMCache{
+		PerDevice: true,
+		getCurrentStatus: func() ([]string, error) {
+			return good2DevicesFormatOutput, nil
+		},
+	}
+
+	err := plugin.Gather(&acc)
+	require.NoError(t, err)
+
+	tags1 := map[string]string{
+		"device": "cs-1",
+	}
+	fields1 := map[string]interface{}{
+		"length":             4883791872,
+		"metadata_blocksize": 8,
+		"metadata_used":      1018,
+		"metadata_total":     1501122,
+		"cache_blocksize":    512,
+		"cache_used":         7,
+		"cache_total":        464962,
+		"read_hits":          139,
+		"read_misses":        352643,
+		"write_hits":         15,
+		"write_misses":       46,
+		"demotions":          0,
+		"promotions":         7,
+		"dirty":              0,
+	}
+	acc.AssertContainsTaggedFields(t, measurement, fields1, tags1)
+
+	tags2 := map[string]string{
+		"device": "cs-2",
+	}
+	fields2 := map[string]interface{}{
+		"length":             4294967296,
+		"metadata_blocksize": 8,
+		"metadata_used":      72352,
+		"metadata_total":     1310720,
+		"cache_blocksize":    128,
+		"cache_used":         26,
+		"cache_total":        24327168,
+		"read_hits":          2409,
+		"read_misses":        286,
+		"write_hits":         265,
+		"write_misses":       524682,
+		"demotions":          0,
+		"promotions":         0,
+		"dirty":              0,
+	}
+	acc.AssertContainsTaggedFields(t, measurement, fields2, tags2)
+
+	tags3 := map[string]string{
+		"device": "all",
+	}
+
+	fields3 := map[string]interface{}{
+		"length":             9178759168,
+		"metadata_blocksize": 16,
+		"metadata_used":      73370,
+		"metadata_total":     2811842,
+		"cache_blocksize":    640,
+		"cache_used":         33,
+		"cache_total":        24792130,
+		"read_hits":          2548,
+		"read_misses":        352929,
+		"write_hits":         280,
+		"write_misses":       524728,
+		"demotions":          0,
+		"promotions":         7,
+		"dirty":              0,
+	}
+	acc.AssertContainsTaggedFields(t, measurement, fields3, tags3)
+}
+
+func TestNotPerDeviceGoodOutput(t *testing.T) {
+	var acc testutil.Accumulator
+	var plugin = &DMCache{
+		PerDevice: false,
+		getCurrentStatus: func() ([]string, error) {
+			return good2DevicesFormatOutput, nil
+		},
+	}
+
+	err := plugin.Gather(&acc)
+	require.NoError(t, err)
+
+	tags := map[string]string{
+		"device": "all",
+	}
+
+	fields := map[string]interface{}{
+		"length":             9178759168,
+		"metadata_blocksize": 16,
+		"metadata_used":      73370,
+		"metadata_total":     2811842,
+		"cache_blocksize":    640,
+		"cache_used":         33,
+		"cache_total":        24792130,
+		"read_hits":          2548,
+		"read_misses":        352929,
+		"write_hits":         280,
+		"write_misses":       524728,
+		"demotions":          0,
+		"promotions":         7,
+		"dirty":              0,
+	}
+	acc.AssertContainsTaggedFields(t, measurement, fields, tags)
+}
+
+func TestNoDevicesOutput(t *testing.T) {
+	var acc testutil.Accumulator
+	var plugin = &DMCache{
+		PerDevice: true,
+		getCurrentStatus: func() ([]string, error) {
+			return []string{}, nil
+		},
+	}
+
+	err := plugin.Gather(&acc)
+	require.NoError(t, err)
+}
+
+func TestErrorDuringGettingStatus(t *testing.T) {
+	var acc testutil.Accumulator
+	var plugin = &DMCache{
+		PerDevice: true,
+		getCurrentStatus: func() ([]string, error) {
+			return nil, errors.New("dmsetup doesn't exist")
+		},
+	}
+
+	err := plugin.Gather(&acc)
+	require.Error(t, err)
+}
+
+func TestBadFormatOfStatus(t *testing.T) {
+	var acc testutil.Accumulator
+	var plugin = &DMCache{
+		PerDevice: true,
+		getCurrentStatus: func() ([]string, error) {
+			return badFormatOutput, nil
+		},
+	}
+
+	err := plugin.Gather(&acc)
+	require.Error(t, err)
+}

From 07c428ef89e225a480a7835f32890f4db3cd534d Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Mon, 10 Apr 2017 14:33:17 -0700
Subject: [PATCH 0187/1302] Use random port in http_listener tests

---
 plugins/inputs/http_listener/http_listener.go |  2 +
 .../http_listener/http_listener_test.go       | 59 +++++++++++--------
 2 files changed, 37 insertions(+), 24 deletions(-)

diff --git a/plugins/inputs/http_listener/http_listener.go b/plugins/inputs/http_listener/http_listener.go
index 0f426f809..f0ad5752e 100644
--- a/plugins/inputs/http_listener/http_listener.go
+++ b/plugins/inputs/http_listener/http_listener.go
@@ -35,6 +35,7 @@ type HTTPListener struct {
 	WriteTimeout   internal.Duration
 	MaxBodySize    int64
 	MaxLineSize    int
+	Port           int
 
 	mu sync.Mutex
 	wg sync.WaitGroup
@@ -124,6 +125,7 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
 		return err
 	}
 	h.listener = listener
+	h.Port = listener.Addr().(*net.TCPAddr).Port
 
 	h.wg.Add(1)
 	go func() {
diff --git a/plugins/inputs/http_listener/http_listener_test.go b/plugins/inputs/http_listener/http_listener_test.go
index 7e6fbc8ab..41c0e9db8 100644
--- a/plugins/inputs/http_listener/http_listener_test.go
+++ b/plugins/inputs/http_listener/http_listener_test.go
@@ -4,6 +4,8 @@ import (
 	"bytes"
 	"io/ioutil"
 	"net/http"
+	"net/url"
+	"strconv"
 	"sync"
 	"testing"
 
@@ -30,11 +32,21 @@ cpu_load_short,host=server06 value=12.0 1422568543702900257
 
 func newTestHTTPListener() *HTTPListener {
 	listener := &HTTPListener{
-		ServiceAddress: ":8186",
+		ServiceAddress: ":0",
 	}
 	return listener
 }
 
+func createURL(listener *HTTPListener, path string, rawquery string) string {
+	u := url.URL{
+		Scheme:   "http",
+		Host:     "localhost:" + strconv.Itoa(listener.Port),
+		Path:     path,
+		RawQuery: rawquery,
+	}
+	return u.String()
+}
+
 func TestWriteHTTP(t *testing.T) {
 	listener := newTestHTTPListener()
 
@@ -43,7 +55,7 @@ func TestWriteHTTP(t *testing.T) {
 	defer listener.Stop()
 
 	// post single message to listener
-	resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(testMsg)))
+	resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg)))
 	require.NoError(t, err)
 	require.EqualValues(t, 204, resp.StatusCode)
 
@@ -54,7 +66,7 @@ func TestWriteHTTP(t *testing.T) {
 	)
 
 	// post multiple message to listener
-	resp, err = http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(testMsgs)))
+	resp, err = http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs)))
 	require.NoError(t, err)
 	require.EqualValues(t, 204, resp.StatusCode)
 
@@ -69,7 +81,7 @@ func TestWriteHTTP(t *testing.T) {
 	}
 
 	// Post a gigantic metric to the listener and verify that an error is returned:
-	resp, err = http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(hugeMetric)))
+	resp, err = http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric)))
 	require.NoError(t, err)
 	require.EqualValues(t, 400, resp.StatusCode)
 
@@ -89,7 +101,7 @@ func TestWriteHTTPNoNewline(t *testing.T) {
 	defer listener.Stop()
 
 	// post single message to listener
-	resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(testMsgNoNewline)))
+	resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline)))
 	require.NoError(t, err)
 	require.EqualValues(t, 204, resp.StatusCode)
 
@@ -102,7 +114,7 @@ func TestWriteHTTPNoNewline(t *testing.T) {
 
 func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) {
 	listener := &HTTPListener{
-		ServiceAddress: ":8296",
+		ServiceAddress: ":0",
 		MaxLineSize:    128 * 1000,
 	}
 
@@ -111,14 +123,14 @@ func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) {
 	defer listener.Stop()
 
 	// Post a gigantic metric to the listener and verify that it writes OK this time:
-	resp, err := http.Post("http://localhost:8296/write?db=mydb", "", bytes.NewBuffer([]byte(hugeMetric)))
+	resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric)))
 	require.NoError(t, err)
 	require.EqualValues(t, 204, resp.StatusCode)
 }
 
 func TestWriteHTTPVerySmallMaxBody(t *testing.T) {
 	listener := &HTTPListener{
-		ServiceAddress: ":8297",
+		ServiceAddress: ":0",
 		MaxBodySize:    4096,
 	}
 
@@ -126,14 +138,14 @@ func TestWriteHTTPVerySmallMaxBody(t *testing.T) {
 	require.NoError(t, listener.Start(acc))
 	defer listener.Stop()
 
-	resp, err := http.Post("http://localhost:8297/write", "", bytes.NewBuffer([]byte(hugeMetric)))
+	resp, err := http.Post(createURL(listener, "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric)))
 	require.NoError(t, err)
 	require.EqualValues(t, 413, resp.StatusCode)
 }
 
 func TestWriteHTTPVerySmallMaxLineSize(t *testing.T) {
 	listener := &HTTPListener{
-		ServiceAddress: ":8298",
+		ServiceAddress: ":0",
 		MaxLineSize:    70,
 	}
 
@@ -141,7 +153,7 @@ func TestWriteHTTPVerySmallMaxLineSize(t *testing.T) {
 	require.NoError(t, listener.Start(acc))
 	defer listener.Stop()
 
-	resp, err := http.Post("http://localhost:8298/write", "", bytes.NewBuffer([]byte(testMsgs)))
+	resp, err := http.Post(createURL(listener, "/write", ""), "", bytes.NewBuffer([]byte(testMsgs)))
 	require.NoError(t, err)
 	require.EqualValues(t, 204, resp.StatusCode)
 
@@ -158,7 +170,7 @@ func TestWriteHTTPVerySmallMaxLineSize(t *testing.T) {
 
 func TestWriteHTTPLargeLinesSkipped(t *testing.T) {
 	listener := &HTTPListener{
-		ServiceAddress: ":8300",
+		ServiceAddress: ":0",
 		MaxLineSize:    100,
 	}
 
@@ -166,7 +178,7 @@ func TestWriteHTTPLargeLinesSkipped(t *testing.T) {
 	require.NoError(t, listener.Start(acc))
 	defer listener.Stop()
 
-	resp, err := http.Post("http://localhost:8300/write", "", bytes.NewBuffer([]byte(hugeMetric+testMsgs)))
+	resp, err := http.Post(createURL(listener, "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric+testMsgs)))
 	require.NoError(t, err)
 	require.EqualValues(t, 400, resp.StatusCode)
 
@@ -183,9 +195,7 @@ func TestWriteHTTPLargeLinesSkipped(t *testing.T) {
 
 // test that writing gzipped data works
 func TestWriteHTTPGzippedData(t *testing.T) {
-	listener := &HTTPListener{
-		ServiceAddress: ":8299",
-	}
+	listener := newTestHTTPListener()
 
 	acc := &testutil.Accumulator{}
 	require.NoError(t, listener.Start(acc))
@@ -194,7 +204,7 @@ func TestWriteHTTPGzippedData(t *testing.T) {
 	data, err := ioutil.ReadFile("./testdata/testmsgs.gz")
 	require.NoError(t, err)
 
-	req, err := http.NewRequest("POST", "http://localhost:8299/write", bytes.NewBuffer(data))
+	req, err := http.NewRequest("POST", createURL(listener, "/write", ""), bytes.NewBuffer(data))
 	require.NoError(t, err)
 	req.Header.Set("Content-Encoding", "gzip")
 
@@ -216,7 +226,7 @@ func TestWriteHTTPGzippedData(t *testing.T) {
 
 // writes 25,000 metrics to the listener with 10 different writers
 func TestWriteHTTPHighTraffic(t *testing.T) {
-	listener := &HTTPListener{ServiceAddress: ":8286"}
+	listener := newTestHTTPListener()
 
 	acc := &testutil.Accumulator{}
 	require.NoError(t, listener.Start(acc))
@@ -229,7 +239,7 @@ func TestWriteHTTPHighTraffic(t *testing.T) {
 		go func(innerwg *sync.WaitGroup) {
 			defer innerwg.Done()
 			for i := 0; i < 500; i++ {
-				resp, err := http.Post("http://localhost:8286/write?db=mydb", "", bytes.NewBuffer([]byte(testMsgs)))
+				resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs)))
 				require.NoError(t, err)
 				require.EqualValues(t, 204, resp.StatusCode)
 			}
@@ -251,7 +261,7 @@ func TestReceive404ForInvalidEndpoint(t *testing.T) {
 	defer listener.Stop()
 
 	// post single message to listener
-	resp, err := http.Post("http://localhost:8186/foobar", "", bytes.NewBuffer([]byte(testMsg)))
+	resp, err := http.Post(createURL(listener, "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg)))
 	require.NoError(t, err)
 	require.EqualValues(t, 404, resp.StatusCode)
 }
@@ -264,7 +274,7 @@ func TestWriteHTTPInvalid(t *testing.T) {
 	defer listener.Stop()
 
 	// post single message to listener
-	resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(badMsg)))
+	resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(badMsg)))
 	require.NoError(t, err)
 	require.EqualValues(t, 400, resp.StatusCode)
 }
@@ -277,7 +287,7 @@ func TestWriteHTTPEmpty(t *testing.T) {
 	defer listener.Stop()
 
 	// post single message to listener
-	resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(emptyMsg)))
+	resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(emptyMsg)))
 	require.NoError(t, err)
 	require.EqualValues(t, 204, resp.StatusCode)
 }
@@ -290,12 +300,13 @@ func TestQueryAndPingHTTP(t *testing.T) {
 	defer listener.Stop()
 
 	// post query to listener
-	resp, err := http.Post("http://localhost:8186/query?db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22", "", nil)
+	resp, err := http.Post(
+		createURL(listener, "/query", "db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22"), "", nil)
 	require.NoError(t, err)
 	require.EqualValues(t, 200, resp.StatusCode)
 
 	// post ping to listener
-	resp, err = http.Post("http://localhost:8186/ping", "", nil)
+	resp, err = http.Post(createURL(listener, "/ping", ""), "", nil)
 	require.NoError(t, err)
 	require.EqualValues(t, 204, resp.StatusCode)
 }

From 62b5c1f7e765c80a1c1d95c91b964207ef4d5dde Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Mon, 10 Apr 2017 16:39:40 -0700
Subject: [PATCH 0188/1302] Add support for precision in http_listener (#2644)

---
 CHANGELOG.md                                  |  1 +
 metric/parse.go                               | 50 +++++++++++++++++--
 metric/parse_test.go                          | 21 ++++++++
 plugins/inputs/http_listener/README.md        | 11 +++-
 plugins/inputs/http_listener/http_listener.go | 12 +++--
 .../http_listener/http_listener_test.go       | 19 +++++++
 plugins/parsers/influx/parser.go              |  6 +--
 7 files changed, 107 insertions(+), 13 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 46d8b57d5..9aab18fcd 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -65,6 +65,7 @@ be deprecated eventually.
 - [#2597](https://github.com/influxdata/telegraf/issues/2597): Add support for Linux sysctl-fs metrics.
 - [#2425](https://github.com/influxdata/telegraf/pull/2425): Support to include/exclude docker container labels as tags
 - [#1667](https://github.com/influxdata/telegraf/pull/1667): dmcache input plugin
+- [#2637](https://github.com/influxdata/telegraf/issues/2637): Add support for precision in http_listener
 
 ### Bugfixes
 
diff --git a/metric/parse.go b/metric/parse.go
index 15b88e552..92dc4918b 100644
--- a/metric/parse.go
+++ b/metric/parse.go
@@ -4,6 +4,7 @@ import (
 	"bytes"
 	"errors"
 	"fmt"
+	"strconv"
 	"time"
 
 	"github.com/influxdata/telegraf"
@@ -40,10 +41,18 @@ const (
 )
 
 func Parse(buf []byte) ([]telegraf.Metric, error) {
-	return ParseWithDefaultTime(buf, time.Now())
+	return ParseWithDefaultTimePrecision(buf, time.Now(), "")
 }
 
 func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) {
+	return ParseWithDefaultTimePrecision(buf, t, "")
+}
+
+func ParseWithDefaultTimePrecision(
+	buf []byte,
+	t time.Time,
+	precision string,
+) ([]telegraf.Metric, error) {
 	if len(buf) == 0 {
 		return []telegraf.Metric{}, nil
 	}
@@ -63,7 +72,7 @@ func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) {
 			continue
 		}
 
-		m, err := parseMetric(buf[i:i+j], t)
+		m, err := parseMetric(buf[i:i+j], t, precision)
 		if err != nil {
 			i += j + 1 // increment i past the previous newline
 			errStr += " " + err.Error()
@@ -80,7 +89,10 @@ func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) {
 	return metrics, nil
 }
 
-func parseMetric(buf []byte, defaultTime time.Time) (telegraf.Metric, error) {
+func parseMetric(buf []byte,
+	defaultTime time.Time,
+	precision string,
+) (telegraf.Metric, error) {
 	var dTime string
 	// scan the first block which is measurement[,tag1=value1,tag2=value=2...]
 	pos, key, err := scanKey(buf, 0)
@@ -114,9 +126,23 @@ func parseMetric(buf []byte, defaultTime time.Time) (telegraf.Metric, error) {
 		return nil, err
 	}
 
+	// apply precision multiplier
+	var nsec int64
+	multiplier := getPrecisionMultiplier(precision)
+	if multiplier > 1 {
+		tsint, err := parseIntBytes(ts, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		nsec := multiplier * tsint
+		ts = []byte(strconv.FormatInt(nsec, 10))
+	}
+
 	m := &metric{
 		fields: fields,
 		t:      ts,
+		nsec:   nsec,
 	}
 
 	// parse out the measurement name
@@ -628,3 +654,21 @@ func makeError(reason string, buf []byte, i int) error {
 	return fmt.Errorf("metric parsing error, reason: [%s], buffer: [%s], index: [%d]",
 		reason, buf, i)
 }
+
+// getPrecisionMultiplier will return a multiplier for the precision specified.
+func getPrecisionMultiplier(precision string) int64 {
+	d := time.Nanosecond
+	switch precision {
+	case "u":
+		d = time.Microsecond
+	case "ms":
+		d = time.Millisecond
+	case "s":
+		d = time.Second
+	case "m":
+		d = time.Minute
+	case "h":
+		d = time.Hour
+	}
+	return int64(d)
+}
diff --git a/metric/parse_test.go b/metric/parse_test.go
index 40bcf60b8..89ade9f56 100644
--- a/metric/parse_test.go
+++ b/metric/parse_test.go
@@ -364,6 +364,27 @@ func TestParseNegativeTimestamps(t *testing.T) {
 	}
 }
 
+func TestParsePrecision(t *testing.T) {
+	for _, tt := range []struct {
+		line      string
+		precision string
+		expected  int64
+	}{
+		{"test v=42 1491847420", "s", 1491847420000000000},
+		{"test v=42 1491847420123", "ms", 1491847420123000000},
+		{"test v=42 1491847420123456", "u", 1491847420123456000},
+		{"test v=42 1491847420123456789", "ns", 1491847420123456789},
+
+		{"test v=42 1491847420123456789", "1s", 1491847420123456789},
+		{"test v=42 1491847420123456789", "asdf", 1491847420123456789},
+	} {
+		metrics, err := ParseWithDefaultTimePrecision(
+			[]byte(tt.line+"\n"), time.Now(), tt.precision)
+		assert.NoError(t, err, tt)
+		assert.Equal(t, tt.expected, metrics[0].UnixNano())
+	}
+}
+
 func TestParseMaxKeyLength(t *testing.T) {
 	key := ""
 	for {
diff --git a/plugins/inputs/http_listener/README.md b/plugins/inputs/http_listener/README.md
index 9643f6a2e..994df654a 100644
--- a/plugins/inputs/http_listener/README.md
+++ b/plugins/inputs/http_listener/README.md
@@ -2,11 +2,18 @@
 
 The HTTP listener is a service input plugin that listens for messages sent via HTTP POST.
 The plugin expects messages in the InfluxDB line-protocol ONLY, other Telegraf input data formats are not supported.
-The intent of the plugin is to allow Telegraf to serve as a proxy/router for the /write endpoint of the InfluxDB HTTP API.
+The intent of the plugin is to allow Telegraf to serve as a proxy/router for the `/write` endpoint of the InfluxDB HTTP API.
+
+The `/write` endpoint supports the `precision` query parameter and can be set to one of `ns`, `u`, `ms`, `s`, `m`, `h`.  All other parameters are ignored and defer to the output plugins configuration.
+
 When chaining Telegraf instances using this plugin, CREATE DATABASE requests receive a 200 OK response with message body `{"results":[]}` but they are not relayed. The output configuration of the Telegraf instance which ultimately submits data to InfluxDB determines the destination database.
 
 See: [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx).
-Example:  curl -i -XPOST 'http://localhost:8186/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000'
+
+**Example:**
+```
+curl -i -XPOST 'http://localhost:8186/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000'
+```
 
 ### Configuration:
 
diff --git a/plugins/inputs/http_listener/http_listener.go b/plugins/inputs/http_listener/http_listener.go
index f0ad5752e..5ef260304 100644
--- a/plugins/inputs/http_listener/http_listener.go
+++ b/plugins/inputs/http_listener/http_listener.go
@@ -207,10 +207,12 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
 	}
 	now := time.Now()
 
+	precision := req.URL.Query().Get("precision")
+
 	// Handle gzip request bodies
 	body := req.Body
-	var err error
 	if req.Header.Get("Content-Encoding") == "gzip" {
+		var err error
 		body, err = gzip.NewReader(req.Body)
 		defer body.Close()
 		if err != nil {
@@ -263,7 +265,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
 
 		if err == io.ErrUnexpectedEOF {
 			// finished reading the request body
-			if err := h.parse(buf[:n+bufStart], now); err != nil {
+			if err := h.parse(buf[:n+bufStart], now, precision); err != nil {
 				log.Println("E! " + err.Error())
 				return400 = true
 			}
@@ -288,7 +290,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
 			bufStart = 0
 			continue
 		}
-		if err := h.parse(buf[:i+1], now); err != nil {
+		if err := h.parse(buf[:i+1], now, precision); err != nil {
 			log.Println("E! " + err.Error())
 			return400 = true
 		}
@@ -301,8 +303,8 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
 	}
 }
 
-func (h *HTTPListener) parse(b []byte, t time.Time) error {
-	metrics, err := h.parser.ParseWithDefaultTime(b, t)
+func (h *HTTPListener) parse(b []byte, t time.Time, precision string) error {
+	metrics, err := h.parser.ParseWithDefaultTimePrecision(b, t, precision)
 
 	for _, m := range metrics {
 		h.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
diff --git a/plugins/inputs/http_listener/http_listener_test.go b/plugins/inputs/http_listener/http_listener_test.go
index 41c0e9db8..654f2f83d 100644
--- a/plugins/inputs/http_listener/http_listener_test.go
+++ b/plugins/inputs/http_listener/http_listener_test.go
@@ -8,6 +8,7 @@ import (
 	"strconv"
 	"sync"
 	"testing"
+	"time"
 
 	"github.com/influxdata/telegraf/testutil"
 
@@ -311,5 +312,23 @@ func TestQueryAndPingHTTP(t *testing.T) {
 	require.EqualValues(t, 204, resp.StatusCode)
 }
 
+func TestWriteWithPrecision(t *testing.T) {
+	listener := newTestHTTPListener()
+
+	acc := &testutil.Accumulator{}
+	require.NoError(t, listener.Start(acc))
+	defer listener.Stop()
+
+	msg := "xyzzy value=42 1422568543\n"
+	resp, err := http.Post(
+		createURL(listener, "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg)))
+	require.NoError(t, err)
+	require.EqualValues(t, 204, resp.StatusCode)
+
+	acc.Wait(1)
+	require.Equal(t, 1, len(acc.Metrics))
+	require.Equal(t, time.Unix(0, 1422568543000000000), acc.Metrics[0].Time)
+}
+
 const hugeMetric = `super_long_metric,foo=bar clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i
 `
diff --git a/plugins/parsers/influx/parser.go b/plugins/parsers/influx/parser.go
index c15c503f7..0abb330e8 100644
--- a/plugins/parsers/influx/parser.go
+++ b/plugins/parsers/influx/parser.go
@@ -15,13 +15,13 @@ type InfluxParser struct {
 	DefaultTags map[string]string
 }
 
-func (p *InfluxParser) ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) {
+func (p *InfluxParser) ParseWithDefaultTimePrecision(buf []byte, t time.Time, precision string) ([]telegraf.Metric, error) {
 	if !bytes.HasSuffix(buf, []byte("\n")) {
 		buf = append(buf, '\n')
 	}
 	// parse even if the buffer begins with a newline
 	buf = bytes.TrimPrefix(buf, []byte("\n"))
-	metrics, err := metric.ParseWithDefaultTime(buf, t)
+	metrics, err := metric.ParseWithDefaultTimePrecision(buf, t, precision)
 	if len(p.DefaultTags) > 0 {
 		for _, m := range metrics {
 			for k, v := range p.DefaultTags {
@@ -41,7 +41,7 @@ func (p *InfluxParser) ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf
 // a non-nil error will be returned in addition to the metrics that parsed
 // successfully.
 func (p *InfluxParser) Parse(buf []byte) ([]telegraf.Metric, error) {
-	return p.ParseWithDefaultTime(buf, time.Now())
+	return p.ParseWithDefaultTimePrecision(buf, time.Now(), "")
 }
 
 func (p *InfluxParser) ParseLine(line string) (telegraf.Metric, error) {

From 516dffa4c4579fffd6a2f757e866707e22272f6f Mon Sep 17 00:00:00 2001
From: Patrick Hemmer 
Date: Mon, 10 Apr 2017 19:45:02 -0400
Subject: [PATCH 0189/1302] set default measurement name on snmp input (#2639)

---
 CHANGELOG.md                | 1 +
 plugins/inputs/snmp/snmp.go | 1 +
 2 files changed, 2 insertions(+)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9aab18fcd..3a1698214 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -96,6 +96,7 @@ be deprecated eventually.
 - [#2596](https://github.com/influxdata/telegraf/pull/2596): fix timestamp parsing on prometheus plugin
 - [#2610](https://github.com/influxdata/telegraf/pull/2610): Fix deadlock when output cannot write
 - [#2410](https://github.com/influxdata/telegraf/issues/2410): Fix connection leak in postgresql.
+- [#2628](https://github.com/influxdata/telegraf/issues/2628): Set default measurement name for snmp input.
 
 ## v1.2.1 [2017-02-01]
 
diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go
index 5394e57db..2aef729b3 100644
--- a/plugins/inputs/snmp/snmp.go
+++ b/plugins/inputs/snmp/snmp.go
@@ -314,6 +314,7 @@ func Errorf(err error, msg string, format ...interface{}) error {
 func init() {
 	inputs.Add("snmp", func() telegraf.Input {
 		return &Snmp{
+			Name:           "snmp",
 			Retries:        3,
 			MaxRepetitions: 10,
 			Timeout:        internal.Duration{Duration: 5 * time.Second},

From f55af7d21f7e6f124f69009fb4b9a8ff2c91d52a Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Tue, 11 Apr 2017 11:41:09 -0700
Subject: [PATCH 0190/1302] Use name filter for IOCounters in diskio (#2649)

Use IOCountersForNames for disk counters.
---
 CHANGELOG.md                     |  1 +
 Godeps                           |  2 +-
 plugins/inputs/system/disk.go    | 15 +--------------
 plugins/inputs/system/mock_PS.go |  2 +-
 plugins/inputs/system/ps.go      |  6 +++---
 5 files changed, 7 insertions(+), 19 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3a1698214..12381152c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -97,6 +97,7 @@ be deprecated eventually.
 - [#2610](https://github.com/influxdata/telegraf/pull/2610): Fix deadlock when output cannot write
 - [#2410](https://github.com/influxdata/telegraf/issues/2410): Fix connection leak in postgresql.
 - [#2628](https://github.com/influxdata/telegraf/issues/2628): Set default measurement name for snmp input.
+- [#2649](https://github.com/influxdata/telegraf/pull/2649): Improve performance of diskio with many disks
 
 ## v1.2.1 [2017-02-01]
 
diff --git a/Godeps b/Godeps
index ab72be8f2..2e04c0cdd 100644
--- a/Godeps
+++ b/Godeps
@@ -44,7 +44,7 @@ github.com/prometheus/common dd2f054febf4a6c00f2343686efb775948a8bff4
 github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
 github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
 github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
-github.com/shirou/gopsutil d371ba1293cb48fedc6850526ea48b3846c54f2c
+github.com/shirou/gopsutil dfbb3e40da8d6fcd1aa0d87003e965fe0ca745ea
 github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
 github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
 github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
diff --git a/plugins/inputs/system/disk.go b/plugins/inputs/system/disk.go
index 3f6d83c1c..004466f83 100644
--- a/plugins/inputs/system/disk.go
+++ b/plugins/inputs/system/disk.go
@@ -125,25 +125,12 @@ func (_ *DiskIOStats) SampleConfig() string {
 }
 
 func (s *DiskIOStats) Gather(acc telegraf.Accumulator) error {
-	diskio, err := s.ps.DiskIO()
+	diskio, err := s.ps.DiskIO(s.Devices)
 	if err != nil {
 		return fmt.Errorf("error getting disk io info: %s", err)
 	}
 
-	var restrictDevices bool
-	devices := make(map[string]bool)
-	if len(s.Devices) != 0 {
-		restrictDevices = true
-		for _, dev := range s.Devices {
-			devices[dev] = true
-		}
-	}
-
 	for _, io := range diskio {
-		_, member := devices[io.Name]
-		if restrictDevices && !member {
-			continue
-		}
 		tags := map[string]string{}
 		tags["name"] = s.diskName(io.Name)
 		for t, v := range s.diskTags(io.Name) {
diff --git a/plugins/inputs/system/mock_PS.go b/plugins/inputs/system/mock_PS.go
index e9f96a6c7..a83a8b803 100644
--- a/plugins/inputs/system/mock_PS.go
+++ b/plugins/inputs/system/mock_PS.go
@@ -61,7 +61,7 @@ func (m *MockPS) NetProto() ([]net.ProtoCountersStat, error) {
 	return r0, r1
 }
 
-func (m *MockPS) DiskIO() (map[string]disk.IOCountersStat, error) {
+func (m *MockPS) DiskIO(names []string) (map[string]disk.IOCountersStat, error) {
 	ret := m.Called()
 
 	r0 := ret.Get(0).(map[string]disk.IOCountersStat)
diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go
index b0e021e40..d25327812 100644
--- a/plugins/inputs/system/ps.go
+++ b/plugins/inputs/system/ps.go
@@ -17,7 +17,7 @@ type PS interface {
 	DiskUsage(mountPointFilter []string, fstypeExclude []string) ([]*disk.UsageStat, []*disk.PartitionStat, error)
 	NetIO() ([]net.IOCountersStat, error)
 	NetProto() ([]net.ProtoCountersStat, error)
-	DiskIO() (map[string]disk.IOCountersStat, error)
+	DiskIO(names []string) (map[string]disk.IOCountersStat, error)
 	VMStat() (*mem.VirtualMemoryStat, error)
 	SwapStat() (*mem.SwapMemoryStat, error)
 	NetConnections() ([]net.ConnectionStat, error)
@@ -120,8 +120,8 @@ func (s *systemPS) NetConnections() ([]net.ConnectionStat, error) {
 	return net.Connections("all")
 }
 
-func (s *systemPS) DiskIO() (map[string]disk.IOCountersStat, error) {
-	m, err := disk.IOCounters()
+func (s *systemPS) DiskIO(names []string) (map[string]disk.IOCountersStat, error) {
+	m, err := disk.IOCountersForNames(names)
 	if err == internal.NotImplementedError {
 		return nil, nil
 	}

From 0193cbee513bbd66b61f36a533241b9b212758fb Mon Sep 17 00:00:00 2001
From: Nick Irvine 
Date: Tue, 11 Apr 2017 12:05:39 -0700
Subject: [PATCH 0191/1302] Add max_message_len in kafka_consumer input (#2636)

---
 CHANGELOG.md                                  |  1 +
 plugins/inputs/kafka_consumer/README.md       |  4 +++
 .../inputs/kafka_consumer/kafka_consumer.go   | 31 ++++++++++++-------
 .../kafka_consumer/kafka_consumer_test.go     | 18 +++++++++++
 4 files changed, 43 insertions(+), 11 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 12381152c..10934f7fd 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -66,6 +66,7 @@ be deprecated eventually.
 - [#2425](https://github.com/influxdata/telegraf/pull/2425): Support to include/exclude docker container labels as tags
 - [#1667](https://github.com/influxdata/telegraf/pull/1667): dmcache input plugin
 - [#2637](https://github.com/influxdata/telegraf/issues/2637): Add support for precision in http_listener
+- [#2636](https://github.com/influxdata/telegraf/pull/2636): Add `message_len_max` option to `kafka_consumer` input
 
 ### Bugfixes
 
diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md
index afdb51e32..6a95a7c54 100644
--- a/plugins/inputs/kafka_consumer/README.md
+++ b/plugins/inputs/kafka_consumer/README.md
@@ -28,6 +28,10 @@ from the same topic in parallel.
   ## more about them here:
   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
   data_format = "influx"
+
+  ## Maximum length of a message to consume, in bytes (default 0/unlimited);
+  ## larger messages are dropped
+  max_message_len = 65536
 ```
 
 ## Testing
diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go
index 6f1f4020b..2f6933db0 100644
--- a/plugins/inputs/kafka_consumer/kafka_consumer.go
+++ b/plugins/inputs/kafka_consumer/kafka_consumer.go
@@ -17,6 +17,7 @@ import (
 type Kafka struct {
 	ConsumerGroup   string
 	Topics          []string
+	MaxMessageLen   int
 	ZookeeperPeers  []string
 	ZookeeperChroot string
 	Consumer        *consumergroup.ConsumerGroup
@@ -58,10 +59,14 @@ var sampleConfig = `
   offset = "oldest"
 
   ## Data format to consume.
-  ## Each data format has it's own unique set of configuration options, read
+  ## Each data format has its own unique set of configuration options, read
   ## more about them here:
   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
   data_format = "influx"
+
+  ## Maximum length of a message to consume, in bytes (default 0/unlimited);
+  ## larger messages are dropped
+  max_message_len = 65536
 `
 
 func (k *Kafka) SampleConfig() string {
@@ -130,17 +135,21 @@ func (k *Kafka) receiver() {
 			return
 		case err := <-k.errs:
 			if err != nil {
-				k.acc.AddError(fmt.Errorf("Kafka Consumer Error: %s\n", err))
+				k.acc.AddError(fmt.Errorf("Consumer Error: %s\n", err))
 			}
 		case msg := <-k.in:
-			metrics, err := k.parser.Parse(msg.Value)
-			if err != nil {
-				k.acc.AddError(fmt.Errorf("E! Kafka Message Parse Error\nmessage: %s\nerror: %s",
-					string(msg.Value), err.Error()))
-			}
-
-			for _, metric := range metrics {
-				k.acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time())
+			if k.MaxMessageLen != 0 && len(msg.Value) > k.MaxMessageLen {
+				k.acc.AddError(fmt.Errorf("Message longer than max_message_len (%d > %d)",
+					len(msg.Value), k.MaxMessageLen))
+			} else {
+				metrics, err := k.parser.Parse(msg.Value)
+				if err != nil {
+					k.acc.AddError(fmt.Errorf("Message Parse Error\nmessage: %s\nerror: %s",
+						string(msg.Value), err.Error()))
+				}
+				for _, metric := range metrics {
+					k.acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time())
+				}
 			}
 
 			if !k.doNotCommitMsgs {
@@ -159,7 +168,7 @@ func (k *Kafka) Stop() {
 	defer k.Unlock()
 	close(k.done)
 	if err := k.Consumer.Close(); err != nil {
-		k.acc.AddError(fmt.Errorf("E! Error closing kafka consumer: %s\n", err.Error()))
+		k.acc.AddError(fmt.Errorf("Error closing consumer: %s\n", err.Error()))
 	}
 }
 
diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go
index e1c24adbe..04498261c 100644
--- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go
+++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go
@@ -1,6 +1,7 @@
 package kafka_consumer
 
 import (
+	"strings"
 	"testing"
 
 	"github.com/influxdata/telegraf/plugins/parsers"
@@ -62,6 +63,23 @@ func TestRunParserInvalidMsg(t *testing.T) {
 	assert.Equal(t, acc.NFields(), 0)
 }
 
+// Test that overlong messages are dropped
+func TestDropOverlongMsg(t *testing.T) {
+	const maxMessageLen = 64 * 1024
+	k, in := newTestKafka()
+	k.MaxMessageLen = maxMessageLen
+	acc := testutil.Accumulator{}
+	k.acc = &acc
+	defer close(k.done)
+	overlongMsg := strings.Repeat("v", maxMessageLen+1)
+
+	go k.receiver()
+	in <- saramaMsg(overlongMsg)
+	acc.WaitError(1)
+
+	assert.Equal(t, acc.NFields(), 0)
+}
+
 // Test that the parser parses kafka messages into points
 func TestRunParserAndGather(t *testing.T) {
 	k, in := newTestKafka()

From 2c98e5ae66127c87d44f5a2ac5b819879a4b76e5 Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Wed, 12 Apr 2017 10:41:26 -0700
Subject: [PATCH 0192/1302] Add collectd parser (#2654)

---
 CHANGELOG.md                                  |   1 +
 Godeps                                        |   1 +
 README.md                                     |  10 +
 docs/DATA_FORMATS_INPUT.md                    |  41 +++
 docs/LICENSE_OF_DEPENDENCIES.md               |   2 +-
 internal/config/config.go                     |  31 ++
 logger/logger.go                              |  11 +-
 logger/logger_test.go                         |  13 +
 .../inputs/socket_listener/socket_listener.go |   4 +-
 plugins/parsers/collectd/parser.go            | 165 ++++++++++
 plugins/parsers/collectd/parser_test.go       | 298 ++++++++++++++++++
 plugins/parsers/registry.go                   |  19 ++
 12 files changed, 592 insertions(+), 4 deletions(-)
 create mode 100644 plugins/parsers/collectd/parser.go
 create mode 100644 plugins/parsers/collectd/parser_test.go

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 10934f7fd..a2d9fc68e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -67,6 +67,7 @@ be deprecated eventually.
 - [#1667](https://github.com/influxdata/telegraf/pull/1667): dmcache input plugin
 - [#2637](https://github.com/influxdata/telegraf/issues/2637): Add support for precision in http_listener
 - [#2636](https://github.com/influxdata/telegraf/pull/2636): Add `message_len_max` option to `kafka_consumer` input
+- [#1100](https://github.com/influxdata/telegraf/issues/1100): Add collectd parser
 
 ### Bugfixes
 
diff --git a/Godeps b/Godeps
index 2e04c0cdd..a41d028c8 100644
--- a/Godeps
+++ b/Godeps
@@ -1,3 +1,4 @@
+collectd.org 2ce144541b8903101fb8f1483cc0497a68798122
 github.com/Shopify/sarama 574d3147eee384229bf96a5d12c207fe7b5234f3
 github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
 github.com/aerospike/aerospike-client-go 95e1ad7791bdbca44707fedbb29be42024900d9c
diff --git a/README.md b/README.md
index 55154e36a..f46c2e298 100644
--- a/README.md
+++ b/README.md
@@ -195,6 +195,16 @@ Telegraf can also collect metrics via the following service plugins:
   * [mandrill](./plugins/inputs/webhooks/mandrill)
   * [rollbar](./plugins/inputs/webhooks/rollbar)
 
+Telegraf is able to parse the following input data formats into metrics, these
+formats may be used with input plugins supporting the `data_format` option:
+
+* [InfluxDB Line Protocol](./docs/DATA_FORMATS_INPUT.md#influx)
+* [JSON](./docs/DATA_FORMATS_INPUT.md#json)
+* [Graphite](./docs/DATA_FORMATS_INPUT.md#graphite)
+* [Value](./docs/DATA_FORMATS_INPUT.md#value)
+* [Nagios](./docs/DATA_FORMATS_INPUT.md#nagios)
+* [Collectd](./docs/DATA_FORMATS_INPUT.md#collectd)
+
 ## Processor Plugins
 
 * [printer](./plugins/processors/printer)
diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md
index f2a635d89..59287e4a4 100644
--- a/docs/DATA_FORMATS_INPUT.md
+++ b/docs/DATA_FORMATS_INPUT.md
@@ -7,6 +7,7 @@ Telegraf is able to parse the following input data formats into metrics:
 1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite)
 1. [Value](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#value), ie: 45 or "booyah"
 1. [Nagios](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#nagios) (exec input only)
+1. [Collectd](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#collectd)
 
 Telegraf metrics, like InfluxDB
 [points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
@@ -438,3 +439,43 @@ Note: Nagios Input Data Formats is only supported in `exec` input plugin.
   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
   data_format = "nagios"
 ```
+
+# Collectd:
+
+The collectd format parses the collectd binary network protocol.  Tags are
+created for host, instance, type, and type instance.  All collectd values are
+added as float64 fields.
+
+For more information about the binary network protocol see
+[here](https://collectd.org/wiki/index.php/Binary_protocol).
+
+You can control the cryptographic settings with parser options.  Create an
+authentication file and set `collectd_auth_file` to the path of the file, then
+set the desired security level in `collectd_security_level`.
+
+Additional information including client setup can be found
+[here](https://collectd.org/wiki/index.php/Networking_introduction#Cryptographic_setup).
+
+You can also change the path to the typesdb or add additional typesdb using
+`collectd_typesdb`.
+
+#### Collectd Configuration:
+
+```toml
+[[inputs.socket_listener]]
+  service_address = "udp://127.0.0.1:25826"
+  name_prefix = "collectd_"
+
+  ## Data format to consume.
+  ## Each data format has it's own unique set of configuration options, read
+  ## more about them here:
+  ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+  data_format = "collectd"
+
+  ## Authentication file for cryptographic security levels
+  collectd_auth_file = "/etc/collectd/auth_file"
+  ## One of none (default), sign, or encrypt
+  collectd_security_level = "encrypt"
+  ## Path of to TypesDB specifications
+  collectd_typesdb = ["/usr/share/collectd/types.db"]
+```
diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md
index 5bb1bd036..a367aa7fb 100644
--- a/docs/LICENSE_OF_DEPENDENCIES.md
+++ b/docs/LICENSE_OF_DEPENDENCIES.md
@@ -1,4 +1,5 @@
 # List
+- collectd.org [MIT LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE)
 - github.com/Shopify/sarama [MIT LICENSE](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE)
 - github.com/Sirupsen/logrus [MIT LICENSE](https://github.com/Sirupsen/logrus/blob/master/LICENSE)
 - github.com/armon/go-metrics [MIT LICENSE](https://github.com/armon/go-metrics/blob/master/LICENSE)
@@ -30,4 +31,3 @@
 - gopkg.in/dancannon/gorethink.v1 [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
 - gopkg.in/mgo.v2 [BSD LICENSE](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
 - golang.org/x/crypto/ [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
-
diff --git a/internal/config/config.go b/internal/config/config.go
index 013e81c12..f8c304179 100644
--- a/internal/config/config.go
+++ b/internal/config/config.go
@@ -1230,6 +1230,34 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
 		}
 	}
 
+	if node, ok := tbl.Fields["collectd_auth_file"]; ok {
+		if kv, ok := node.(*ast.KeyValue); ok {
+			if str, ok := kv.Value.(*ast.String); ok {
+				c.CollectdAuthFile = str.Value
+			}
+		}
+	}
+
+	if node, ok := tbl.Fields["collectd_security_level"]; ok {
+		if kv, ok := node.(*ast.KeyValue); ok {
+			if str, ok := kv.Value.(*ast.String); ok {
+				c.CollectdSecurityLevel = str.Value
+			}
+		}
+	}
+
+	if node, ok := tbl.Fields["collectd_typesdb"]; ok {
+		if kv, ok := node.(*ast.KeyValue); ok {
+			if ary, ok := kv.Value.(*ast.Array); ok {
+				for _, elem := range ary.Value {
+					if str, ok := elem.(*ast.String); ok {
+						c.CollectdTypesDB = append(c.CollectdTypesDB, str.Value)
+					}
+				}
+			}
+		}
+	}
+
 	c.MetricName = name
 
 	delete(tbl.Fields, "data_format")
@@ -1237,6 +1265,9 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
 	delete(tbl.Fields, "templates")
 	delete(tbl.Fields, "tag_keys")
 	delete(tbl.Fields, "data_type")
+	delete(tbl.Fields, "collectd_auth_file")
+	delete(tbl.Fields, "collectd_security_level")
+	delete(tbl.Fields, "collectd_typesdb")
 
 	return parsers.NewParser(c)
 }
diff --git a/logger/logger.go b/logger/logger.go
index 49613c27d..7ad1c8069 100644
--- a/logger/logger.go
+++ b/logger/logger.go
@@ -4,11 +4,14 @@ import (
 	"io"
 	"log"
 	"os"
+	"regexp"
 	"time"
 
 	"github.com/influxdata/wlog"
 )
 
+var prefixRegex = regexp.MustCompile("^[DIWE]!")
+
 // newTelegrafWriter returns a logging-wrapped writer.
 func newTelegrafWriter(w io.Writer) io.Writer {
 	return &telegrafLog{
@@ -21,7 +24,13 @@ type telegrafLog struct {
 }
 
 func (t *telegrafLog) Write(b []byte) (n int, err error) {
-	return t.writer.Write(append([]byte(time.Now().UTC().Format(time.RFC3339)+" "), b...))
+	var line []byte
+	if !prefixRegex.Match(b) {
+		line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" I! "), b...)
+	} else {
+		line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" "), b...)
+	}
+	return t.writer.Write(line)
 }
 
 // SetupLogging configures the logging output.
diff --git a/logger/logger_test.go b/logger/logger_test.go
index 8c0826e65..09c7c82eb 100644
--- a/logger/logger_test.go
+++ b/logger/logger_test.go
@@ -51,6 +51,19 @@ func TestErrorWriteLogToFile(t *testing.T) {
 	assert.Equal(t, f[19:], []byte("Z E! TEST\n"))
 }
 
+func TestAddDefaultLogLevel(t *testing.T) {
+	tmpfile, err := ioutil.TempFile("", "")
+	assert.NoError(t, err)
+	defer func() { os.Remove(tmpfile.Name()) }()
+
+	SetupLogging(true, false, tmpfile.Name())
+	log.Printf("TEST")
+
+	f, err := ioutil.ReadFile(tmpfile.Name())
+	assert.NoError(t, err)
+	assert.Equal(t, f[19:], []byte("Z I! TEST\n"))
+}
+
 func BenchmarkTelegrafLogWrite(b *testing.B) {
 	var msg = []byte("test")
 	var buf bytes.Buffer
diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go
index b5c0202cc..4a9a470a7 100644
--- a/plugins/inputs/socket_listener/socket_listener.go
+++ b/plugins/inputs/socket_listener/socket_listener.go
@@ -71,7 +71,7 @@ func (ssl *streamSocketListener) read(c net.Conn) {
 	for scnr.Scan() {
 		metrics, err := ssl.Parse(scnr.Bytes())
 		if err != nil {
-			ssl.AddError(fmt.Errorf("unable to parse incoming line"))
+			ssl.AddError(fmt.Errorf("unable to parse incoming line: %s", err))
 			//TODO rate limit
 			continue
 		}
@@ -105,7 +105,7 @@ func (psl *packetSocketListener) listen() {
 
 		metrics, err := psl.Parse(buf[:n])
 		if err != nil {
-			psl.AddError(fmt.Errorf("unable to parse incoming packet"))
+			psl.AddError(fmt.Errorf("unable to parse incoming packet: %s", err))
 			//TODO rate limit
 			continue
 		}
diff --git a/plugins/parsers/collectd/parser.go b/plugins/parsers/collectd/parser.go
new file mode 100644
index 000000000..20525610c
--- /dev/null
+++ b/plugins/parsers/collectd/parser.go
@@ -0,0 +1,165 @@
+package collectd
+
+import (
+	"errors"
+	"fmt"
+	"log"
+	"os"
+
+	"collectd.org/api"
+	"collectd.org/network"
+
+	"github.com/influxdata/telegraf"
+	"github.com/influxdata/telegraf/metric"
+)
+
+const (
+	DefaultAuthFile = "/etc/collectd/auth_file"
+)
+
+type CollectdParser struct {
+	// DefaultTags will be added to every parsed metric
+	DefaultTags map[string]string
+
+	popts network.ParseOpts
+}
+
+func (p *CollectdParser) SetParseOpts(popts *network.ParseOpts) {
+	p.popts = *popts
+}
+
+func NewCollectdParser(
+	authFile string,
+	securityLevel string,
+	typesDB []string,
+) (*CollectdParser, error) {
+	popts := network.ParseOpts{}
+
+	switch securityLevel {
+	case "none":
+		popts.SecurityLevel = network.None
+	case "sign":
+		popts.SecurityLevel = network.Sign
+	case "encrypt":
+		popts.SecurityLevel = network.Encrypt
+	default:
+		popts.SecurityLevel = network.None
+	}
+
+	if authFile == "" {
+		authFile = DefaultAuthFile
+	}
+	popts.PasswordLookup = network.NewAuthFile(authFile)
+
+	for _, path := range typesDB {
+		db, err := LoadTypesDB(path)
+		if err != nil {
+			return nil, err
+		}
+
+		if popts.TypesDB != nil {
+			popts.TypesDB.Merge(db)
+		} else {
+			popts.TypesDB = db
+		}
+	}
+
+	parser := CollectdParser{popts: popts}
+	return &parser, nil
+}
+
+func (p *CollectdParser) Parse(buf []byte) ([]telegraf.Metric, error) {
+	valueLists, err := network.Parse(buf, p.popts)
+	if err != nil {
+		return nil, fmt.Errorf("Collectd parser error: %s", err)
+	}
+
+	metrics := []telegraf.Metric{}
+	for _, valueList := range valueLists {
+		metrics = append(metrics, UnmarshalValueList(valueList)...)
+	}
+
+	if len(p.DefaultTags) > 0 {
+		for _, m := range metrics {
+			for k, v := range p.DefaultTags {
+				// only set the default tag if it doesn't already exist:
+				if !m.HasTag(k) {
+					m.AddTag(k, v)
+				}
+			}
+		}
+	}
+
+	return metrics, nil
+}
+
+func (p *CollectdParser) ParseLine(line string) (telegraf.Metric, error) {
+	metrics, err := p.Parse([]byte(line))
+	if err != nil {
+		return nil, err
+	}
+
+	if len(metrics) != 1 {
+		return nil, errors.New("Line contains multiple metrics")
+	}
+
+	return metrics[0], nil
+}
+
+func (p *CollectdParser) SetDefaultTags(tags map[string]string) {
+	p.DefaultTags = tags
+}
+
+// UnmarshalValueList translates a ValueList into a Telegraf metric.
+func UnmarshalValueList(vl *api.ValueList) []telegraf.Metric {
+	timestamp := vl.Time.UTC()
+
+	var metrics []telegraf.Metric
+	for i := range vl.Values {
+		var name string
+		name = fmt.Sprintf("%s_%s", vl.Identifier.Plugin, vl.DSName(i))
+		tags := make(map[string]string)
+		fields := make(map[string]interface{})
+
+		// Convert interface back to actual type, then to float64
+		switch value := vl.Values[i].(type) {
+		case api.Gauge:
+			fields["value"] = float64(value)
+		case api.Derive:
+			fields["value"] = float64(value)
+		case api.Counter:
+			fields["value"] = float64(value)
+		}
+
+		if vl.Identifier.Host != "" {
+			tags["host"] = vl.Identifier.Host
+		}
+		if vl.Identifier.PluginInstance != "" {
+			tags["instance"] = vl.Identifier.PluginInstance
+		}
+		if vl.Identifier.Type != "" {
+			tags["type"] = vl.Identifier.Type
+		}
+		if vl.Identifier.TypeInstance != "" {
+			tags["type_instance"] = vl.Identifier.TypeInstance
+		}
+
+		// Drop invalid points
+		m, err := metric.New(name, tags, fields, timestamp)
+		if err != nil {
+			log.Printf("E! Dropping metric %v: %v", name, err)
+			continue
+		}
+
+		metrics = append(metrics, m)
+	}
+	return metrics
+}
+
+func LoadTypesDB(path string) (*api.TypesDB, error) {
+	reader, err := os.Open(path)
+	if err != nil {
+		return nil, err
+	}
+	return api.NewTypesDB(reader)
+}
diff --git a/plugins/parsers/collectd/parser_test.go b/plugins/parsers/collectd/parser_test.go
new file mode 100644
index 000000000..3aad04013
--- /dev/null
+++ b/plugins/parsers/collectd/parser_test.go
@@ -0,0 +1,298 @@
+package collectd
+
+import (
+	"context"
+	"testing"
+
+	"collectd.org/api"
+	"collectd.org/network"
+	"github.com/stretchr/testify/require"
+
+	"github.com/influxdata/telegraf"
+)
+
+type AuthMap struct {
+	Passwd map[string]string
+}
+
+func (p *AuthMap) Password(user string) (string, error) {
+	return p.Passwd[user], nil
+}
+
+type metricData struct {
+	name   string
+	tags   map[string]string
+	fields map[string]interface{}
+}
+
+type testCase struct {
+	vl       []api.ValueList
+	expected []metricData
+}
+
+var singleMetric = testCase{
+	[]api.ValueList{
+		api.ValueList{
+			Identifier: api.Identifier{
+				Host:           "xyzzy",
+				Plugin:         "cpu",
+				PluginInstance: "1",
+				Type:           "cpu",
+				TypeInstance:   "user",
+			},
+			Values: []api.Value{
+				api.Counter(42),
+			},
+			DSNames: []string(nil),
+		},
+	},
+	[]metricData{
+		metricData{
+			"cpu_value",
+			map[string]string{
+				"type_instance": "user",
+				"host":          "xyzzy",
+				"instance":      "1",
+				"type":          "cpu",
+			},
+			map[string]interface{}{
+				"value": float64(42),
+			},
+		},
+	},
+}
+
+var multiMetric = testCase{
+	[]api.ValueList{
+		api.ValueList{
+			Identifier: api.Identifier{
+				Host:           "xyzzy",
+				Plugin:         "cpu",
+				PluginInstance: "0",
+				Type:           "cpu",
+				TypeInstance:   "user",
+			},
+			Values: []api.Value{
+				api.Derive(42),
+				api.Gauge(42),
+			},
+			DSNames: []string(nil),
+		},
+	},
+	[]metricData{
+		metricData{
+			"cpu_0",
+			map[string]string{
+				"type_instance": "user",
+				"host":          "xyzzy",
+				"instance":      "0",
+				"type":          "cpu",
+			},
+			map[string]interface{}{
+				"value": float64(42),
+			},
+		},
+		metricData{
+			"cpu_1",
+			map[string]string{
+				"type_instance": "user",
+				"host":          "xyzzy",
+				"instance":      "0",
+				"type":          "cpu",
+			},
+			map[string]interface{}{
+				"value": float64(42),
+			},
+		},
+	},
+}
+
+func TestNewCollectdParser(t *testing.T) {
+	parser, err := NewCollectdParser("", "", []string{})
+	require.Nil(t, err)
+	require.Equal(t, parser.popts.SecurityLevel, network.None)
+	require.NotNil(t, parser.popts.PasswordLookup)
+	require.Nil(t, parser.popts.TypesDB)
+}
+
+func TestParse(t *testing.T) {
+	cases := []testCase{singleMetric, multiMetric}
+
+	for _, tc := range cases {
+		buf, err := writeValueList(tc.vl)
+		require.Nil(t, err)
+		bytes, err := buf.Bytes()
+		require.Nil(t, err)
+
+		parser := &CollectdParser{}
+		require.Nil(t, err)
+		metrics, err := parser.Parse(bytes)
+		require.Nil(t, err)
+
+		assertEqualMetrics(t, tc.expected, metrics)
+	}
+}
+
+func TestParse_DefaultTags(t *testing.T) {
+	buf, err := writeValueList(singleMetric.vl)
+	require.Nil(t, err)
+	bytes, err := buf.Bytes()
+	require.Nil(t, err)
+
+	parser := &CollectdParser{}
+	parser.SetDefaultTags(map[string]string{
+		"foo": "bar",
+	})
+	require.Nil(t, err)
+	metrics, err := parser.Parse(bytes)
+	require.Nil(t, err)
+
+	require.Equal(t, "bar", metrics[0].Tags()["foo"])
+}
+
+func TestParse_SignSecurityLevel(t *testing.T) {
+	parser := &CollectdParser{}
+	popts := &network.ParseOpts{
+		SecurityLevel: network.Sign,
+		PasswordLookup: &AuthMap{
+			map[string]string{
+				"user0": "bar",
+			},
+		},
+	}
+	parser.SetParseOpts(popts)
+
+	// Signed data
+	buf, err := writeValueList(singleMetric.vl)
+	require.Nil(t, err)
+	buf.Sign("user0", "bar")
+	bytes, err := buf.Bytes()
+	require.Nil(t, err)
+
+	metrics, err := parser.Parse(bytes)
+	require.Nil(t, err)
+	assertEqualMetrics(t, singleMetric.expected, metrics)
+
+	// Encrypted data
+	buf, err = writeValueList(singleMetric.vl)
+	require.Nil(t, err)
+	buf.Encrypt("user0", "bar")
+	bytes, err = buf.Bytes()
+	require.Nil(t, err)
+
+	metrics, err = parser.Parse(bytes)
+	require.Nil(t, err)
+	assertEqualMetrics(t, singleMetric.expected, metrics)
+
+	// Plain text data skipped
+	buf, err = writeValueList(singleMetric.vl)
+	require.Nil(t, err)
+	bytes, err = buf.Bytes()
+	require.Nil(t, err)
+
+	metrics, err = parser.Parse(bytes)
+	require.Nil(t, err)
+	require.Equal(t, []telegraf.Metric{}, metrics)
+
+	// Wrong password error
+	buf, err = writeValueList(singleMetric.vl)
+	require.Nil(t, err)
+	buf.Sign("x", "y")
+	bytes, err = buf.Bytes()
+	require.Nil(t, err)
+
+	metrics, err = parser.Parse(bytes)
+	require.NotNil(t, err)
+}
+
+func TestParse_EncryptSecurityLevel(t *testing.T) {
+	parser := &CollectdParser{}
+	popts := &network.ParseOpts{
+		SecurityLevel: network.Encrypt,
+		PasswordLookup: &AuthMap{
+			map[string]string{
+				"user0": "bar",
+			},
+		},
+	}
+	parser.SetParseOpts(popts)
+
+	// Signed data skipped
+	buf, err := writeValueList(singleMetric.vl)
+	require.Nil(t, err)
+	buf.Sign("user0", "bar")
+	bytes, err := buf.Bytes()
+	require.Nil(t, err)
+
+	metrics, err := parser.Parse(bytes)
+	require.Nil(t, err)
+	require.Equal(t, []telegraf.Metric{}, metrics)
+
+	// Encrypted data
+	buf, err = writeValueList(singleMetric.vl)
+	require.Nil(t, err)
+	buf.Encrypt("user0", "bar")
+	bytes, err = buf.Bytes()
+	require.Nil(t, err)
+
+	metrics, err = parser.Parse(bytes)
+	require.Nil(t, err)
+	assertEqualMetrics(t, singleMetric.expected, metrics)
+
+	// Plain text data skipped
+	buf, err = writeValueList(singleMetric.vl)
+	require.Nil(t, err)
+	bytes, err = buf.Bytes()
+	require.Nil(t, err)
+
+	metrics, err = parser.Parse(bytes)
+	require.Nil(t, err)
+	require.Equal(t, []telegraf.Metric{}, metrics)
+
+	// Wrong password error
+	buf, err = writeValueList(singleMetric.vl)
+	require.Nil(t, err)
+	buf.Sign("x", "y")
+	bytes, err = buf.Bytes()
+	require.Nil(t, err)
+
+	metrics, err = parser.Parse(bytes)
+	require.NotNil(t, err)
+}
+
+func TestParseLine(t *testing.T) {
+	buf, err := writeValueList(singleMetric.vl)
+	require.Nil(t, err)
+	bytes, err := buf.Bytes()
+	require.Nil(t, err)
+
+	parser, err := NewCollectdParser("", "", []string{})
+	require.Nil(t, err)
+	metric, err := parser.ParseLine(string(bytes))
+	require.Nil(t, err)
+
+	assertEqualMetrics(t, singleMetric.expected, []telegraf.Metric{metric})
+}
+
+func writeValueList(valueLists []api.ValueList) (*network.Buffer, error) {
+	buffer := network.NewBuffer(0)
+
+	ctx := context.Background()
+	for _, vl := range valueLists {
+		err := buffer.Write(ctx, &vl)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return buffer, nil
+}
+
+func assertEqualMetrics(t *testing.T, expected []metricData, received []telegraf.Metric) {
+	require.Equal(t, len(expected), len(received))
+	for i, m := range received {
+		require.Equal(t, expected[i].name, m.Name())
+		require.Equal(t, expected[i].tags, m.Tags())
+		require.Equal(t, expected[i].fields, m.Fields())
+	}
+}
diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go
index 360d795bc..bda6aeba3 100644
--- a/plugins/parsers/registry.go
+++ b/plugins/parsers/registry.go
@@ -5,6 +5,7 @@ import (
 
 	"github.com/influxdata/telegraf"
 
+	"github.com/influxdata/telegraf/plugins/parsers/collectd"
 	"github.com/influxdata/telegraf/plugins/parsers/graphite"
 	"github.com/influxdata/telegraf/plugins/parsers/influx"
 	"github.com/influxdata/telegraf/plugins/parsers/json"
@@ -53,6 +54,13 @@ type Config struct {
 	// MetricName applies to JSON & value. This will be the name of the measurement.
 	MetricName string
 
+	// Authentication file for collectd
+	CollectdAuthFile string
+	// One of none (default), sign, or encrypt
+	CollectdSecurityLevel string
+	// Dataset specification for collectd
+	CollectdTypesDB []string
+
 	// DataType only applies to value, this will be the type to parse value to
 	DataType string
 
@@ -78,6 +86,9 @@ func NewParser(config *Config) (Parser, error) {
 	case "graphite":
 		parser, err = NewGraphiteParser(config.Separator,
 			config.Templates, config.DefaultTags)
+	case "collectd":
+		parser, err = NewCollectdParser(config.CollectdAuthFile,
+			config.CollectdSecurityLevel, config.CollectdTypesDB)
 	default:
 		err = fmt.Errorf("Invalid data format: %s", config.DataFormat)
 	}
@@ -124,3 +135,11 @@ func NewValueParser(
 		DefaultTags: defaultTags,
 	}, nil
 }
+
+func NewCollectdParser(
+	authFile string,
+	securityLevel string,
+	typesDB []string,
+) (Parser, error) {
+	return collectd.NewCollectdParser(authFile, securityLevel, typesDB)
+}

From 360b10c4deecb5a3b5f9322cb5702c42ed205671 Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Wed, 12 Apr 2017 10:42:11 -0700
Subject: [PATCH 0193/1302] Clarify precision documentation (#2655)

---
 internal/config/config.go | 15 +++++++++------
 1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/internal/config/config.go b/internal/config/config.go
index f8c304179..61263f49a 100644
--- a/internal/config/config.go
+++ b/internal/config/config.go
@@ -85,8 +85,8 @@ type AgentConfig struct {
 	//     ie, if Interval=10s then always collect on :00, :10, :20, etc.
 	RoundInterval bool
 
-	// By default, precision will be set to the same timestamp order as the
-	// collection interval, with the maximum being 1s.
+	// By default or when set to "0s", precision will be set to the same
+	// timestamp order as the collection interval, with the maximum being 1s.
 	//   ie, when interval = "10s", precision will be "1s"
 	//       when interval = "250ms", precision will be "1ms"
 	// Precision will NOT be used for service inputs. It is up to each individual
@@ -230,10 +230,13 @@ var header = `# Telegraf Configuration
   ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
   flush_jitter = "0s"
 
-  ## By default, precision will be set to the same timestamp order as the
-  ## collection interval, with the maximum being 1s.
-  ## Precision will NOT be used for service inputs, such as logparser and statsd.
-  ## Valid values are "ns", "us" (or "µs"), "ms", "s".
+  ## By default or when set to "0s", precision will be set to the same
+  ## timestamp order as the collection interval, with the maximum being 1s.
+  ##   ie, when interval = "10s", precision will be "1s"
+  ##       when interval = "250ms", precision will be "1ms"
+  ## Precision will NOT be used for service inputs. It is up to each individual
+  ## service input to set the timestamp at the appropriate precision.
+  ## Valid time units are "ns", "us" (or "µs"), "ms", "s".
   precision = ""
 
   ## Logging configuration:

From 49ab4e26f8ea6d22d52958fc29c8e8908fa8fca0 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jes=C3=BAs=20Roncero?= 
Date: Wed, 12 Apr 2017 20:04:44 +0100
Subject: [PATCH 0194/1302] Nagios plugin documentation fix (#2659)

---
 docs/DATA_FORMATS_INPUT.md | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md
index 59287e4a4..8f80b560e 100644
--- a/docs/DATA_FORMATS_INPUT.md
+++ b/docs/DATA_FORMATS_INPUT.md
@@ -428,13 +428,13 @@ Note: Nagios Input Data Formats is only supported in `exec` input plugin.
 ```toml
 [[inputs.exec]]
   ## Commands array
-  commands = ["/usr/lib/nagios/plugins/check_load", "-w 5,6,7 -c 7,8,9"]
+  commands = ["/usr/lib/nagios/plugins/check_load -w 5,6,7 -c 7,8,9"]
 
   ## measurement name suffix (for separating different commands)
   name_suffix = "_mycollector"
 
   ## Data format to consume.
-  ## Each data format has it's own unique set of configuration options, read
+  ## Each data format has its own unique set of configuration options, read
   ## more about them here:
   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
   data_format = "nagios"

From 3e0c55bff9cdbc9dec34c49d8d65972812ad9d9a Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Wed, 12 Apr 2017 17:10:17 -0700
Subject: [PATCH 0195/1302] Update grok version (#2662)

---
 Godeps                                     |  2 +-
 plugins/inputs/logparser/grok/grok_test.go | 20 ++++++++++++++++++++
 2 files changed, 21 insertions(+), 1 deletion(-)

diff --git a/Godeps b/Godeps
index a41d028c8..0a7cc30a5 100644
--- a/Godeps
+++ b/Godeps
@@ -49,7 +49,7 @@ github.com/shirou/gopsutil dfbb3e40da8d6fcd1aa0d87003e965fe0ca745ea
 github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
 github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
 github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
-github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2
+github.com/vjeantet/grok d73e972b60935c7fec0b4ffbc904ed39ecaf7efe
 github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee
 github.com/wvanbergen/kazoo-go 968957352185472eacb69215fa3dbfcfdbac1096
 github.com/yuin/gopher-lua 66c871e454fcf10251c61bf8eff02d0978cae75a
diff --git a/plugins/inputs/logparser/grok/grok_test.go b/plugins/inputs/logparser/grok/grok_test.go
index 4e0ead6e9..64fb20c43 100644
--- a/plugins/inputs/logparser/grok/grok_test.go
+++ b/plugins/inputs/logparser/grok/grok_test.go
@@ -687,3 +687,23 @@ func TestTsModder_Rollover(t *testing.T) {
 	}
 	assert.Equal(t, reftime.Add(time.Nanosecond*1000), modt)
 }
+
+func TestShortPatternRegression(t *testing.T) {
+	p := &Parser{
+		Patterns: []string{"%{TS_UNIX:timestamp:ts-unix} %{NUMBER:value:int}"},
+		CustomPatterns: `
+		  TS_UNIX %{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ} %{YEAR}
+		`,
+	}
+	require.NoError(t, p.Compile())
+
+	metric, err := p.ParseLine(`Wed Apr 12 13:10:34 PST 2017 42`)
+	require.NoError(t, err)
+	require.NotNil(t, metric)
+
+	require.Equal(t,
+		map[string]interface{}{
+			"value": int64(42),
+		},
+		metric.Fields())
+}

From 9388fff1f7366da864c1331ec3f7810a7a308282 Mon Sep 17 00:00:00 2001
From: Chris Goffinet 
Date: Wed, 12 Apr 2017 20:40:10 -0400
Subject: [PATCH 0196/1302] Fixed content-type header in output plugin OpenTSDB
 (#2663)

---
 plugins/outputs/opentsdb/opentsdb_http.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/plugins/outputs/opentsdb/opentsdb_http.go b/plugins/outputs/opentsdb/opentsdb_http.go
index 912ca670a..e74e74f03 100644
--- a/plugins/outputs/opentsdb/opentsdb_http.go
+++ b/plugins/outputs/opentsdb/opentsdb_http.go
@@ -134,7 +134,7 @@ func (o *openTSDBHttp) flush() error {
 	if err != nil {
 		return fmt.Errorf("Error when building request: %s", err.Error())
 	}
-	req.Header.Set("Content-Type", "applicaton/json")
+	req.Header.Set("Content-Type", "application/json")
 	req.Header.Set("Content-Encoding", "gzip")
 
 	if o.Debug {

From 45c9b867f63b665c0edbf15dc71bf650100a7709 Mon Sep 17 00:00:00 2001
From: Gregory Kman 
Date: Wed, 12 Apr 2017 19:46:48 -0500
Subject: [PATCH 0197/1302] Update ping-input-plugin Readme (#2651)

---
 plugins/inputs/ping/README.md | 35 +++++++++++++++++++++++------------
 plugins/inputs/ping/ping.go   |  2 +-
 2 files changed, 24 insertions(+), 13 deletions(-)

diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md
index 38558a33c..b02345e8e 100644
--- a/plugins/inputs/ping/README.md
+++ b/plugins/inputs/ping/README.md
@@ -1,20 +1,27 @@
-# Ping input plugin
+# Ping Input plugin
 
 This input plugin will measures the round-trip
 
-## Windows:
 ### Configuration:
+
 ```
-	## urls to ping
-	urls = ["www.google.com"] # required
-	
-	## number of pings to send per collection (ping -n )
-	count = 4 # required
-	
-	## Ping timeout, in seconds. 0 means default timeout (ping -w )
-	Timeout = 0
+# NOTE: this plugin forks the ping command. You may need to set capabilities
+# via setcap cap_net_raw+p /bin/ping
+[[inputs.ping]]
+## List of urls to ping
+urls = ["www.google.com"] # required
+## number of pings to send per collection (ping -c )
+# count = 1
+## interval, in s, at which to ping. 0 == default (ping -i )
+# ping_interval = 1.0
+## per-ping timeout, in s. 0 == no timeout (ping -W )
+# timeout = 1.0
+## interface to send ping from (ping -I )
+# interface = ""
 ```
+
 ### Measurements & Fields:
+
 - packets_transmitted ( from ping output )
 - reply_received ( increasing only on valid metric from echo replay, eg. 'Destination net unreachable' reply will increment packets_received but not reply_received )
 - packets_received ( from ping output )
@@ -25,12 +32,16 @@ This input plugin will measures the round-trip
     - average_response_ms ( compute from minimum_response_ms and maximum_response_ms )
     - minimum_response_ms ( from ping output )
     - maximum_response_ms ( from ping output )
-	
+
 ### Tags:
-- server
+
+- host
+- url
 
 ### Example Output:
+
 ```
+$ ./telegraf -config telegraf.conf -input-filter ping -test
 * Plugin: ping, Collection 1
 ping,host=WIN-PBAPLP511R7,url=www.google.com average_response_ms=7i,maximum_response_ms=9i,minimum_response_ms=7i,packets_received=4i,packets_transmitted=4i,percent_packet_loss=0,percent_reply_loss=0,reply_received=4i 1469879119000000000
 ```
diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go
index 32264eec7..f5256750d 100644
--- a/plugins/inputs/ping/ping.go
+++ b/plugins/inputs/ping/ping.go
@@ -49,7 +49,7 @@ const sampleConfig = `
   ## NOTE: this plugin forks the ping command. You may need to set capabilities
   ## via setcap cap_net_raw+p /bin/ping
   #
-  ## urls to ping
+  ## List of urls to ping
   urls = ["www.google.com"] # required
   ## number of pings to send per collection (ping -c )
   # count = 1

From dff216c44d5fae38db0ae78a5dc67a320b5c5397 Mon Sep 17 00:00:00 2001
From: ingosus 
Date: Thu, 13 Apr 2017 22:59:28 +0300
Subject: [PATCH 0198/1302] Feature #1820: add testing without outputs (#2446)

---
 CHANGELOG.md             | 1 +
 cmd/telegraf/telegraf.go | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index a2d9fc68e..dd3ee39df 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -68,6 +68,7 @@ be deprecated eventually.
 - [#2637](https://github.com/influxdata/telegraf/issues/2637): Add support for precision in http_listener
 - [#2636](https://github.com/influxdata/telegraf/pull/2636): Add `message_len_max` option to `kafka_consumer` input
 - [#1100](https://github.com/influxdata/telegraf/issues/1100): Add collectd parser
+- [#1820](https://github.com/influxdata/telegraf/issues/1820): easier plugin testing without outputs
 
 ### Bugfixes
 
diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go
index 40e90a1ec..af11e6682 100644
--- a/cmd/telegraf/telegraf.go
+++ b/cmd/telegraf/telegraf.go
@@ -144,7 +144,7 @@ func reloadLoop(
 				log.Fatal("E! " + err.Error())
 			}
 		}
-		if len(c.Outputs) == 0 {
+		if !*fTest && len(c.Outputs) == 0 {
 			log.Fatalf("E! Error: no outputs found, did you provide a valid config file?")
 		}
 		if len(c.Inputs) == 0 {

From cadd845b36e7ed745e136e4bc42ff4caab6b1c82 Mon Sep 17 00:00:00 2001
From: calerogers 
Date: Thu, 13 Apr 2017 15:53:02 -0700
Subject: [PATCH 0199/1302] Irqstat input plugin (#2494)

closes #2469
---
 CHANGELOG.md                                 |   1 +
 README.md                                    |   1 +
 plugins/inputs/all/all.go                    |   1 +
 plugins/inputs/interrupts/README.md          |  35 +++++
 plugins/inputs/interrupts/interrupts.go      | 140 +++++++++++++++++++
 plugins/inputs/interrupts/interrupts_test.go |  59 ++++++++
 6 files changed, 237 insertions(+)
 create mode 100644 plugins/inputs/interrupts/README.md
 create mode 100644 plugins/inputs/interrupts/interrupts.go
 create mode 100644 plugins/inputs/interrupts/interrupts_test.go

diff --git a/CHANGELOG.md b/CHANGELOG.md
index dd3ee39df..d4792790f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -41,6 +41,7 @@ be deprecated eventually.
 
 ### Features
 
+- [#2494](https://github.com/influxdata/telegraf/pull/2494): Add interrupts input plugin.
 - [#2094](https://github.com/influxdata/telegraf/pull/2094): Add generic socket listener & writer.
 - [#2204](https://github.com/influxdata/telegraf/pull/2204): Extend http_response to support searching for a substring in response. Return 1 if found, else 0.
 - [#2137](https://github.com/influxdata/telegraf/pull/2137): Added userstats to mysql input plugin.
diff --git a/README.md b/README.md
index f46c2e298..2dc6997d6 100644
--- a/README.md
+++ b/README.md
@@ -123,6 +123,7 @@ configuration options.
 * [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
 * [internal](./plugins/inputs/internal)
 * [influxdb](./plugins/inputs/influxdb)
+* [interrupts](./plugins/inputs/interrupts)
 * [ipmi_sensor](./plugins/inputs/ipmi_sensor)
 * [iptables](./plugins/inputs/iptables)
 * [jolokia](./plugins/inputs/jolokia)
diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go
index 983179e90..f7207da84 100644
--- a/plugins/inputs/all/all.go
+++ b/plugins/inputs/all/all.go
@@ -30,6 +30,7 @@ import (
 	_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
 	_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
 	_ "github.com/influxdata/telegraf/plugins/inputs/internal"
+	_ "github.com/influxdata/telegraf/plugins/inputs/interrupts"
 	_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
 	_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
 	_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
diff --git a/plugins/inputs/interrupts/README.md b/plugins/inputs/interrupts/README.md
new file mode 100644
index 000000000..aec30094e
--- /dev/null
+++ b/plugins/inputs/interrupts/README.md
@@ -0,0 +1,35 @@
+# Interrupts Input Plugin
+
+The interrupts plugin gathers metrics about IRQs from `/proc/interrupts` and `/proc/softirqs`.
+
+### Configuration
+```
+[[inputs.interrupts]]
+  ## A list of IRQs to include for metric ingestion, if not specified
+  ## will default to collecting all IRQs.
+  include = ["0", "1", "30", "NET_RX"]
+```
+
+### Measurements
+There are two measurements reported by this plugin.
+- `interrupts` gathers metrics from the `/proc/interrupts` file
+- `soft_interrupts` gathers metrics from the `/proc/softirqs` file
+
+### Fields
+- CPUx: the amount of interrupts for the IRQ handled by that CPU
+- total: total amount of interrupts for all CPUs
+
+### Tags
+- irq: the IRQ
+- type: the type of interrupt
+- device: the name of the device that is located at that IRQ
+
+### Example Output
+```
+./telegraf -config ~/interrupts_config.conf -test
+* Plugin: inputs.interrupts, Collection 1
+> interrupts,irq=0,type=IO-APIC,device=2-edge\ timer,host=hostname CPU0=23i,total=23i 1489346531000000000
+> interrupts,irq=1,host=hostname,type=IO-APIC,device=1-edge\ i8042 CPU0=9i,total=9i 1489346531000000000
+> interrupts,irq=30,type=PCI-MSI,device=65537-edge\ virtio1-input.0,host=hostname CPU0=1i,total=1i 1489346531000000000
+> soft_interrupts,irq=NET_RX,host=hostname CPU0=280879i,total=280879i 1489346531000000000
+```
diff --git a/plugins/inputs/interrupts/interrupts.go b/plugins/inputs/interrupts/interrupts.go
new file mode 100644
index 000000000..1feb6441c
--- /dev/null
+++ b/plugins/inputs/interrupts/interrupts.go
@@ -0,0 +1,140 @@
+package interrupts
+
+import (
+	"bufio"
+	"fmt"
+	"github.com/influxdata/telegraf"
+	"github.com/influxdata/telegraf/plugins/inputs"
+	"io/ioutil"
+	"strconv"
+	"strings"
+)
+
+type Interrupts struct{}
+
+type IRQ struct {
+	ID     string
+	Type   string
+	Device string
+	Total  int64
+	Cpus   []int64
+}
+
+func NewIRQ(id string) *IRQ {
+	return &IRQ{ID: id, Cpus: []int64{}}
+}
+
+const sampleConfig = `
+  ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e.
+  # [inputs.interrupts.tagdrop]
+    # irq = [ "NET_RX", "TASKLET" ]
+`
+
+func (s *Interrupts) Description() string {
+	return "This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs."
+}
+
+func (s *Interrupts) SampleConfig() string {
+	return sampleConfig
+}
+
+func parseInterrupts(irqdata string) ([]IRQ, error) {
+	var irqs []IRQ
+	var cpucount int
+	scanner := bufio.NewScanner(strings.NewReader(irqdata))
+	ok := scanner.Scan()
+	if ok {
+		cpus := strings.Fields(scanner.Text())
+		if cpus[0] == "CPU0" {
+			cpucount = len(cpus)
+		}
+	} else if scanner.Err() != nil {
+		return irqs, fmt.Errorf("Reading %s: %s", scanner.Text(), scanner.Err())
+	}
+	for scanner.Scan() {
+		fields := strings.Fields(scanner.Text())
+		if !strings.HasSuffix(fields[0], ":") {
+			continue
+		}
+		irqid := strings.TrimRight(fields[0], ":")
+		irq := NewIRQ(irqid)
+		irqvals := fields[1:len(fields)]
+		for i := 0; i < cpucount; i++ {
+			if i < len(irqvals) {
+				irqval, err := strconv.ParseInt(irqvals[i], 10, 64)
+				if err != nil {
+					return irqs, fmt.Errorf("Unable to parse %q from %q: %s", irqvals[i], scanner.Text(), err)
+				}
+				irq.Cpus = append(irq.Cpus, irqval)
+			}
+		}
+		for _, irqval := range irq.Cpus {
+			irq.Total += irqval
+		}
+		_, err := strconv.ParseInt(irqid, 10, 64)
+		if err == nil && len(fields) >= cpucount+2 {
+			irq.Type = fields[cpucount+1]
+			irq.Device = strings.Join(fields[cpucount+2:], " ")
+		} else if len(fields) > cpucount {
+			irq.Type = strings.Join(fields[cpucount+1:], " ")
+		}
+		irqs = append(irqs, *irq)
+	}
+	return irqs, nil
+}
+
+func fileToString(path string) (string, error) {
+	data, err := ioutil.ReadFile(path)
+	if err != nil {
+		return "", err
+	}
+	content := string(data)
+	return content, nil
+}
+
+func gatherTagsFields(irq IRQ) (map[string]string, map[string]interface{}) {
+	tags := map[string]string{"irq": irq.ID, "type": irq.Type, "device": irq.Device}
+	fields := map[string]interface{}{"total": irq.Total}
+	for i := 0; i < len(irq.Cpus); i++ {
+		cpu := fmt.Sprintf("CPU%d", i)
+		fields[cpu] = irq.Cpus[i]
+	}
+	return tags, fields
+}
+
+func (s *Interrupts) Gather(acc telegraf.Accumulator) error {
+	irqdata, err := fileToString("/proc/interrupts")
+	if err != nil {
+		acc.AddError(fmt.Errorf("Reading %s: %s", "/proc/interrupts", err))
+	}
+	irqs, err := parseInterrupts(irqdata)
+	if err != nil {
+		acc.AddError(fmt.Errorf("Parsing %s: %s", "/proc/interrupts", err))
+	} else {
+		for _, irq := range irqs {
+			tags, fields := gatherTagsFields(irq)
+			acc.AddFields("interrupts", fields, tags)
+		}
+	}
+
+	irqdata, err = fileToString("/proc/softirqs")
+	if err != nil {
+		acc.AddError(fmt.Errorf("Reading %s: %s", "/proc/softirqs", err))
+	}
+	irqs, err = parseInterrupts(irqdata)
+	if err != nil {
+		acc.AddError(fmt.Errorf("Parsing %s: %s", "/proc/softirqs", err))
+	} else {
+		for _, irq := range irqs {
+			tags, fields := gatherTagsFields(irq)
+			acc.AddFields("softirqs", fields, tags)
+		}
+	}
+	return nil
+}
+
+func init() {
+	inputs.Add("interrupts", func() telegraf.Input {
+		return &Interrupts{}
+	})
+}
diff --git a/plugins/inputs/interrupts/interrupts_test.go b/plugins/inputs/interrupts/interrupts_test.go
new file mode 100644
index 000000000..d968eb094
--- /dev/null
+++ b/plugins/inputs/interrupts/interrupts_test.go
@@ -0,0 +1,59 @@
+package interrupts
+
+import (
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+	"testing"
+)
+
+func TestParseInterrupts(t *testing.T) {
+	interruptStr := `           CPU0       CPU1
+  0:        134          0   IO-APIC-edge      timer
+  1:          7          3   IO-APIC-edge      i8042
+NMI:          0          0   Non-maskable interrupts
+LOC: 2338608687 2334309625   Local timer interrupts
+MIS:          0
+NET_RX:     867028		225
+TASKLET:	205			0`
+
+	parsed := []IRQ{
+		IRQ{
+			ID: "0", Type: "IO-APIC-edge", Device: "timer",
+			Cpus: []int64{int64(134), int64(0)}, Total: int64(134),
+		},
+		IRQ{
+			ID: "1", Type: "IO-APIC-edge", Device: "i8042",
+			Cpus: []int64{int64(7), int64(3)}, Total: int64(10),
+		},
+		IRQ{
+			ID: "NMI", Type: "Non-maskable interrupts",
+			Cpus: []int64{int64(0), int64(0)}, Total: int64(0),
+		},
+		IRQ{
+			ID: "LOC", Type: "Local timer interrupts",
+			Cpus:  []int64{int64(2338608687), int64(2334309625)},
+			Total: int64(4672918312),
+		},
+		IRQ{
+			ID: "MIS", Cpus: []int64{int64(0)}, Total: int64(0),
+		},
+		IRQ{
+			ID: "NET_RX", Cpus: []int64{int64(867028), int64(225)},
+			Total: int64(867253),
+		},
+		IRQ{
+			ID: "TASKLET", Cpus: []int64{int64(205), int64(0)},
+			Total: int64(205),
+		},
+	}
+	got, err := parseInterrupts(interruptStr)
+	require.Equal(t, nil, err)
+	require.NotEqual(t, 0, len(got))
+	require.Equal(t, len(got), len(parsed))
+	for i := 0; i < len(parsed); i++ {
+		assert.Equal(t, parsed[i], got[i])
+		for k := 0; k < len(parsed[i].Cpus); k++ {
+			assert.Equal(t, parsed[i].Cpus[k], got[i].Cpus[k])
+		}
+	}
+}

From a12e082dbe875fec6bac5e027248694bbccb72b3 Mon Sep 17 00:00:00 2001
From: calerogers 
Date: Fri, 14 Apr 2017 13:40:36 -0700
Subject: [PATCH 0200/1302] Refactor interrupts plugin code (#2670)

---
 plugins/inputs/interrupts/README.md          |  6 +-
 plugins/inputs/interrupts/interrupts.go      | 63 +++++++-------------
 plugins/inputs/interrupts/interrupts_test.go |  5 +-
 3 files changed, 29 insertions(+), 45 deletions(-)

diff --git a/plugins/inputs/interrupts/README.md b/plugins/inputs/interrupts/README.md
index aec30094e..f823aae07 100644
--- a/plugins/inputs/interrupts/README.md
+++ b/plugins/inputs/interrupts/README.md
@@ -5,9 +5,9 @@ The interrupts plugin gathers metrics about IRQs from `/proc/interrupts` and `/p
 ### Configuration
 ```
 [[inputs.interrupts]]
-  ## A list of IRQs to include for metric ingestion, if not specified
-  ## will default to collecting all IRQs.
-  include = ["0", "1", "30", "NET_RX"]
+  ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e.
+  # [inputs.interrupts.tagdrop]
+    # irq = [ "NET_RX", "TASKLET" ]
 ```
 
 ### Measurements
diff --git a/plugins/inputs/interrupts/interrupts.go b/plugins/inputs/interrupts/interrupts.go
index 1feb6441c..75cbf3be1 100644
--- a/plugins/inputs/interrupts/interrupts.go
+++ b/plugins/inputs/interrupts/interrupts.go
@@ -5,7 +5,8 @@ import (
 	"fmt"
 	"github.com/influxdata/telegraf"
 	"github.com/influxdata/telegraf/plugins/inputs"
-	"io/ioutil"
+	"io"
+	"os"
 	"strconv"
 	"strings"
 )
@@ -38,18 +39,16 @@ func (s *Interrupts) SampleConfig() string {
 	return sampleConfig
 }
 
-func parseInterrupts(irqdata string) ([]IRQ, error) {
+func parseInterrupts(r io.Reader) ([]IRQ, error) {
 	var irqs []IRQ
 	var cpucount int
-	scanner := bufio.NewScanner(strings.NewReader(irqdata))
-	ok := scanner.Scan()
-	if ok {
+	scanner := bufio.NewScanner(r)
+	if scanner.Scan() {
 		cpus := strings.Fields(scanner.Text())
-		if cpus[0] == "CPU0" {
-			cpucount = len(cpus)
+		if cpus[0] != "CPU0" {
+			return nil, fmt.Errorf("Expected first line to start with CPU0, but was %s", scanner.Text())
 		}
-	} else if scanner.Err() != nil {
-		return irqs, fmt.Errorf("Reading %s: %s", scanner.Text(), scanner.Err())
+		cpucount = len(cpus)
 	}
 	for scanner.Scan() {
 		fields := strings.Fields(scanner.Text())
@@ -80,16 +79,10 @@ func parseInterrupts(irqdata string) ([]IRQ, error) {
 		}
 		irqs = append(irqs, *irq)
 	}
-	return irqs, nil
-}
-
-func fileToString(path string) (string, error) {
-	data, err := ioutil.ReadFile(path)
-	if err != nil {
-		return "", err
+	if scanner.Err() != nil {
+		return nil, fmt.Errorf("Error scanning file: %s", scanner.Err())
 	}
-	content := string(data)
-	return content, nil
+	return irqs, nil
 }
 
 func gatherTagsFields(irq IRQ) (map[string]string, map[string]interface{}) {
@@ -103,31 +96,21 @@ func gatherTagsFields(irq IRQ) (map[string]string, map[string]interface{}) {
 }
 
 func (s *Interrupts) Gather(acc telegraf.Accumulator) error {
-	irqdata, err := fileToString("/proc/interrupts")
-	if err != nil {
-		acc.AddError(fmt.Errorf("Reading %s: %s", "/proc/interrupts", err))
-	}
-	irqs, err := parseInterrupts(irqdata)
-	if err != nil {
-		acc.AddError(fmt.Errorf("Parsing %s: %s", "/proc/interrupts", err))
-	} else {
-		for _, irq := range irqs {
-			tags, fields := gatherTagsFields(irq)
-			acc.AddFields("interrupts", fields, tags)
+	for measurement, file := range map[string]string{"interrupts": "/proc/interrupts", "soft_interrupts": "/proc/softirqs"} {
+		f, err := os.Open(file)
+		if err != nil {
+			acc.AddError(fmt.Errorf("Could not open file: %s", file))
+			continue
+		}
+		defer f.Close()
+		irqs, err := parseInterrupts(f)
+		if err != nil {
+			acc.AddError(fmt.Errorf("Parsing %s: %s", file, err))
+			continue
 		}
-	}
-
-	irqdata, err = fileToString("/proc/softirqs")
-	if err != nil {
-		acc.AddError(fmt.Errorf("Reading %s: %s", "/proc/softirqs", err))
-	}
-	irqs, err = parseInterrupts(irqdata)
-	if err != nil {
-		acc.AddError(fmt.Errorf("Parsing %s: %s", "/proc/softirqs", err))
-	} else {
 		for _, irq := range irqs {
 			tags, fields := gatherTagsFields(irq)
-			acc.AddFields("softirqs", fields, tags)
+			acc.AddFields(measurement, fields, tags)
 		}
 	}
 	return nil
diff --git a/plugins/inputs/interrupts/interrupts_test.go b/plugins/inputs/interrupts/interrupts_test.go
index d968eb094..6c76c8504 100644
--- a/plugins/inputs/interrupts/interrupts_test.go
+++ b/plugins/inputs/interrupts/interrupts_test.go
@@ -1,6 +1,7 @@
 package interrupts
 
 import (
+	"bytes"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 	"testing"
@@ -15,7 +16,7 @@ LOC: 2338608687 2334309625   Local timer interrupts
 MIS:          0
 NET_RX:     867028		225
 TASKLET:	205			0`
-
+	f := bytes.NewBufferString(interruptStr)
 	parsed := []IRQ{
 		IRQ{
 			ID: "0", Type: "IO-APIC-edge", Device: "timer",
@@ -46,7 +47,7 @@ TASKLET:	205			0`
 			Total: int64(205),
 		},
 	}
-	got, err := parseInterrupts(interruptStr)
+	got, err := parseInterrupts(f)
 	require.Equal(t, nil, err)
 	require.NotEqual(t, 0, len(got))
 	require.Equal(t, len(got), len(parsed))

From b90a5b48a1e322a8484ae496311ff7e95e4eb5a2 Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Fri, 14 Apr 2017 13:47:43 -0700
Subject: [PATCH 0201/1302] Improve logparser README (#2664)

---
 plugins/inputs/logparser/README.md    | 142 ++++++++++++++++++++++----
 plugins/inputs/logparser/grok/grok.go |   1 +
 2 files changed, 121 insertions(+), 22 deletions(-)

diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md
index 5973d9f42..177d77a98 100644
--- a/plugins/inputs/logparser/README.md
+++ b/plugins/inputs/logparser/README.md
@@ -1,6 +1,6 @@
-# logparser Input Plugin
+# Logparser Input Plugin
 
-The logparser plugin streams and parses the given logfiles. Currently it only
+The `logparser` plugin streams and parses the given logfiles. Currently it
 has the capability of parsing "grok" patterns from logfiles, which also supports
 regex patterns.
 
@@ -37,35 +37,28 @@ regex patterns.
     '''
 ```
 
-## Grok Parser
-
-The grok parser uses a slightly modified version of logstash "grok" patterns,
-with the format
-
-```
-%{[:][:]}
-```
-
-Telegraf has many of it's own
-[built-in patterns](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/logparser/grok/patterns/influx-patterns),
-as well as supporting
-[logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns).
-
+### Grok Parser
 
 The best way to get acquainted with grok patterns is to read the logstash docs,
 which are available here:
   https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html
 
+The Telegraf grok parser uses a slightly modified version of logstash "grok"
+patterns, with the format
 
-If you need help building patterns to match your logs,
-you will find the http://grokdebug.herokuapp.com application quite useful!
+```
+%{[:][:]}
+```
 
+The `capture_syntax` defines the grok pattern that's used to parse the input
+line and the `semantic_name` is used to name the field or tag.  The extension
+`modifier` controls the data type that the parsed item is converted to or
+other special handling.
 
 By default all named captures are converted into string fields.
-Modifiers can be used to convert captures to other types or tags.
 Timestamp modifiers can be used to convert captures to the timestamp of the
- parsed metric.
-
+parsed metric.  If no timestamp is parsed the metric will be created using the
+current time.
 
 - Available modifiers:
   - string   (default if nothing is specified)
@@ -91,7 +84,112 @@ Timestamp modifiers can be used to convert captures to the timestamp of the
   - ts-epochnano     (nanoseconds since unix epoch)
   - ts-"CUSTOM"
 
-
 CUSTOM time layouts must be within quotes and be the representation of the
 "reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`
 See https://golang.org/pkg/time/#Parse for more details.
+
+Telegraf has many of its own
+[built-in patterns](./grok/patterns/influx-patterns),
+as well as supporting
+[logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns).
+
+If you need help building patterns to match your logs,
+you will find the https://grokdebug.herokuapp.com application quite useful!
+
+#### Timestamp Examples
+
+This example input and config parses a file using a custom timestamp conversion:
+
+```
+2017-02-21 13:10:34 value=42
+```
+
+```toml
+[[inputs.logparser]]
+  [inputs.logparser.grok]
+    patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} value=%{NUMBER:value:int}']
+```
+
+This example parses a file using a built-in conversion and a custom pattern:
+
+```
+Wed Apr 12 13:10:34 PST 2017 value=42
+```
+
+```toml
+[[inputs.logparser]]
+  [inputs.logparser.grok]
+	patterns = ["%{TS_UNIX:timestamp:ts-unix} value=%{NUMBER:value:int}"]
+    custom_patterns = '''
+      TS_UNIX %{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ} %{YEAR}
+    '''
+```
+
+#### TOML Escaping
+
+When saving patterns to the configuration file, keep in mind the different TOML
+[string](https://github.com/toml-lang/toml#string) types and the escaping
+rules for each.  These escaping rules must be applied in addition to the
+escaping required by the grok syntax.  Using the Multi-line line literal
+syntax with `'''` may be useful.
+
+The following config examples will parse this input file:
+
+```
+|42|\uD83D\uDC2F|'telegraf'|
+```
+
+Since `|` is a special character in the grok language, we must escape it to
+get a literal `|`.  With a basic TOML string, special characters such as
+backslash must be escaped, requiring us to escape the backslash a second time.
+
+```toml
+[[inputs.logparser]]
+  [inputs.logparser.grok]
+    patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"]
+    custom_patterns = "UNICODE_ESCAPE (?:\\\\u[0-9A-F]{4})+"
+```
+
+We cannot use a literal TOML string for the pattern, because we cannot match a
+`'` within it.  However, it works well for the custom pattern.
+```toml
+[[inputs.logparser]]
+  [inputs.logparser.grok]
+    patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"]
+    custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+'
+```
+
+A multi-line literal string allows us to encode the pattern:
+```toml
+[[inputs.logparser]]
+  [inputs.logparser.grok]
+    patterns = ['''
+	  \|%{NUMBER:value:int}\|%{UNICODE_ESCAPE:escape}\|'%{WORD:name}'\|
+	''']
+    custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+'
+```
+
+### Tips for creating patterns
+
+Writing complex patterns can be difficult, here is some advice for writing a
+new pattern or testing a pattern developed [online](https://grokdebug.herokuapp.com).
+
+Create a file output that writes to stdout, and disable other outputs while
+testing.  This will allow you to see the captured metrics.  Keep in mind that
+the file output will only print once per `flush_interval`.
+
+```toml
+[[outputs.file]]
+  files = ["stdout"]
+```
+
+- Start with a file containing only a single line of your input.
+- Remove all but the first token or piece of the line.
+- Add the section of your pattern to match this piece to your configuration file.
+- Verify that the metric is parsed successfully by running Telegraf.
+- If successful, add the next token, update the pattern and retest.
+- Continue one token at a time until the entire line is successfully parsed.
+
+### Additional Resources
+
+- https://www.influxdata.com/telegraf-correlate-log-metrics-data-performance-bottlenecks/
diff --git a/plugins/inputs/logparser/grok/grok.go b/plugins/inputs/logparser/grok/grok.go
index 7131b8249..f684e9339 100644
--- a/plugins/inputs/logparser/grok/grok.go
+++ b/plugins/inputs/logparser/grok/grok.go
@@ -168,6 +168,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
 	}
 
 	if len(values) == 0 {
+		log.Printf("D! Grok no match found for: %q", line)
 		return nil, nil
 	}
 

From b968759d1015f47bf60de5ec05e762a994692110 Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Thu, 13 Apr 2017 18:56:04 -0700
Subject: [PATCH 0202/1302] Use variadic disk.IOCounters() function

---
 Godeps                      | 2 +-
 plugins/inputs/system/ps.go | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/Godeps b/Godeps
index 0a7cc30a5..510f5b1ed 100644
--- a/Godeps
+++ b/Godeps
@@ -45,7 +45,7 @@ github.com/prometheus/common dd2f054febf4a6c00f2343686efb775948a8bff4
 github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
 github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
 github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
-github.com/shirou/gopsutil dfbb3e40da8d6fcd1aa0d87003e965fe0ca745ea
+github.com/shirou/gopsutil 70693b6a3da51a8a686d31f1b346077bbc066062
 github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
 github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
 github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go
index d25327812..20e01742a 100644
--- a/plugins/inputs/system/ps.go
+++ b/plugins/inputs/system/ps.go
@@ -121,7 +121,7 @@ func (s *systemPS) NetConnections() ([]net.ConnectionStat, error) {
 }
 
 func (s *systemPS) DiskIO(names []string) (map[string]disk.IOCountersStat, error) {
-	m, err := disk.IOCountersForNames(names)
+	m, err := disk.IOCounters(names...)
 	if err == internal.NotImplementedError {
 		return nil, nil
 	}

From dc5779e2a7f60bd28db4161115d0e2b871b27e76 Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Fri, 14 Apr 2017 17:32:14 -0700
Subject: [PATCH 0203/1302] Rename heap_objects_bytes to heap_objects in
 internal plugin. (#2674)

* Rename heap_objects_bytes to heap_objects in internal plugin.

This field does not contain bytes

fixes #2671
---
 CHANGELOG.md                        | 1 +
 plugins/inputs/internal/internal.go | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index d4792790f..989e3f7a9 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -102,6 +102,7 @@ be deprecated eventually.
 - [#2410](https://github.com/influxdata/telegraf/issues/2410): Fix connection leak in postgresql.
 - [#2628](https://github.com/influxdata/telegraf/issues/2628): Set default measurement name for snmp input.
 - [#2649](https://github.com/influxdata/telegraf/pull/2649): Improve performance of diskio with many disks
+- [#2671](https://github.com/influxdata/telegraf/issues/2671): The internal input plugin uses the wrong units for `heap_objects`
 
 ## v1.2.1 [2017-02-01]
 
diff --git a/plugins/inputs/internal/internal.go b/plugins/inputs/internal/internal.go
index f6123edd5..8b5286f56 100644
--- a/plugins/inputs/internal/internal.go
+++ b/plugins/inputs/internal/internal.go
@@ -48,7 +48,7 @@ func (s *Self) Gather(acc telegraf.Accumulator) error {
 			"heap_idle_bytes":     m.HeapIdle,     // bytes in idle spans
 			"heap_in_use_bytes":   m.HeapInuse,    // bytes in non-idle span
 			"heap_released_bytes": m.HeapReleased, // bytes released to the OS
-			"heap_objects_bytes":  m.HeapObjects,  // total number of allocated objects
+			"heap_objects":        m.HeapObjects,  // total number of allocated objects
 			"num_gc":              m.NumGC,
 		}
 		acc.AddFields("internal_memstats", fields, map[string]string{})

From 58ee96267900a5451efd9eb488b7f4e72fe4694d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Fran=C3=A7ois=20de=20Metz?= 
Date: Mon, 17 Apr 2017 18:42:03 +0000
Subject: [PATCH 0204/1302] GitHub webhooks: check signature (#2493)

---
 CHANGELOG.md                                  |  1 +
 etc/telegraf.conf                             |  1 +
 plugins/inputs/webhooks/github/README.md      |  2 ++
 .../inputs/webhooks/github/github_webhooks.go | 28 ++++++++++++++--
 .../webhooks/github/github_webhooks_test.go   | 33 +++++++++++++++++++
 plugins/inputs/webhooks/webhooks.go           |  1 +
 6 files changed, 63 insertions(+), 3 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 989e3f7a9..6715ef3bb 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -70,6 +70,7 @@ be deprecated eventually.
 - [#2636](https://github.com/influxdata/telegraf/pull/2636): Add `message_len_max` option to `kafka_consumer` input
 - [#1100](https://github.com/influxdata/telegraf/issues/1100): Add collectd parser
 - [#1820](https://github.com/influxdata/telegraf/issues/1820): easier plugin testing without outputs
+- [#2493](https://github.com/influxdata/telegraf/pull/2493): Check signature in the GitHub webhook plugin
 
 ### Bugfixes
 
diff --git a/etc/telegraf.conf b/etc/telegraf.conf
index 63e41d7bb..07ae5ac8f 100644
--- a/etc/telegraf.conf
+++ b/etc/telegraf.conf
@@ -2382,6 +2382,7 @@
 #
 #   [inputs.webhooks.github]
 #     path = "/github"
+#     # secret = ""
 #
 #   [inputs.webhooks.mandrill]
 #     path = "/mandrill"
diff --git a/plugins/inputs/webhooks/github/README.md b/plugins/inputs/webhooks/github/README.md
index 68594cd78..908d92a63 100644
--- a/plugins/inputs/webhooks/github/README.md
+++ b/plugins/inputs/webhooks/github/README.md
@@ -2,6 +2,8 @@
 
 You should configure your Organization's Webhooks to point at the `webhooks` service. To do this go to `github.com/{my_organization}` and click `Settings > Webhooks > Add webhook`. In the resulting menu set `Payload URL` to `http://:1619/github`, `Content type` to `application/json` and under the section `Which events would you like to trigger this webhook?` select 'Send me everything'. By default all of the events will write to the `github_webhooks` measurement, this is configurable by setting the `measurement_name` in the config file.
 
+You can also add a secret that will be used by telegraf to verify the authenticity of the requests.
+
 ## Events
 
 The titles of the following sections are links to the full payloads and details for each event. The body contains what information from the event is persisted. The format is as follows:
diff --git a/plugins/inputs/webhooks/github/github_webhooks.go b/plugins/inputs/webhooks/github/github_webhooks.go
index a31c6fdf2..0bb792bf5 100644
--- a/plugins/inputs/webhooks/github/github_webhooks.go
+++ b/plugins/inputs/webhooks/github/github_webhooks.go
@@ -1,6 +1,9 @@
 package github
 
 import (
+	"crypto/hmac"
+	"crypto/sha1"
+	"encoding/hex"
 	"encoding/json"
 	"io/ioutil"
 	"log"
@@ -11,8 +14,9 @@ import (
 )
 
 type GithubWebhook struct {
-	Path string
-	acc  telegraf.Accumulator
+	Path   string
+	Secret string
+	acc    telegraf.Accumulator
 }
 
 func (gh *GithubWebhook) Register(router *mux.Router, acc telegraf.Accumulator) {
@@ -23,12 +27,19 @@ func (gh *GithubWebhook) Register(router *mux.Router, acc telegraf.Accumulator)
 
 func (gh *GithubWebhook) eventHandler(w http.ResponseWriter, r *http.Request) {
 	defer r.Body.Close()
-	eventType := r.Header["X-Github-Event"][0]
+	eventType := r.Header.Get("X-Github-Event")
 	data, err := ioutil.ReadAll(r.Body)
 	if err != nil {
 		w.WriteHeader(http.StatusBadRequest)
 		return
 	}
+
+	if gh.Secret != "" && !checkSignature(gh.Secret, data, r.Header.Get("X-Hub-Signature")) {
+		log.Printf("E! Fail to check the github webhook signature\n")
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+
 	e, err := NewEvent(data, eventType)
 	if err != nil {
 		w.WriteHeader(http.StatusBadRequest)
@@ -108,3 +119,14 @@ func NewEvent(data []byte, name string) (Event, error) {
 	}
 	return nil, &newEventError{"Not a recognized event type"}
 }
+
+func checkSignature(secret string, data []byte, signature string) bool {
+	return hmac.Equal([]byte(signature), []byte(generateSignature(secret, data)))
+}
+
+func generateSignature(secret string, data []byte) string {
+	mac := hmac.New(sha1.New, []byte(secret))
+	mac.Write(data)
+	result := mac.Sum(nil)
+	return "sha1=" + hex.EncodeToString(result)
+}
diff --git a/plugins/inputs/webhooks/github/github_webhooks_test.go b/plugins/inputs/webhooks/github/github_webhooks_test.go
index 0ec991726..65041e4a0 100644
--- a/plugins/inputs/webhooks/github/github_webhooks_test.go
+++ b/plugins/inputs/webhooks/github/github_webhooks_test.go
@@ -21,6 +21,19 @@ func GithubWebhookRequest(event string, jsonString string, t *testing.T) {
 	}
 }
 
+func GithubWebhookRequestWithSignature(event string, jsonString string, t *testing.T, signature string, expectedStatus int) {
+	var acc testutil.Accumulator
+	gh := &GithubWebhook{Path: "/github", Secret: "signature", acc: &acc}
+	req, _ := http.NewRequest("POST", "/github", strings.NewReader(jsonString))
+	req.Header.Add("X-Github-Event", event)
+	req.Header.Add("X-Hub-Signature", signature)
+	w := httptest.NewRecorder()
+	gh.eventHandler(w, req)
+	if w.Code != expectedStatus {
+		t.Errorf("POST "+event+" returned HTTP status code %v.\nExpected %v", w.Code, expectedStatus)
+	}
+}
+
 func TestCommitCommentEvent(t *testing.T) {
 	GithubWebhookRequest("commit_comment", CommitCommentEventJSON(), t)
 }
@@ -100,3 +113,23 @@ func TestTeamAddEvent(t *testing.T) {
 func TestWatchEvent(t *testing.T) {
 	GithubWebhookRequest("watch", WatchEventJSON(), t)
 }
+
+func TestEventWithSignatureFail(t *testing.T) {
+	GithubWebhookRequestWithSignature("watch", WatchEventJSON(), t, "signature", http.StatusBadRequest)
+}
+
+func TestEventWithSignatureSuccess(t *testing.T) {
+	GithubWebhookRequestWithSignature("watch", WatchEventJSON(), t, generateSignature("signature", []byte(WatchEventJSON())), http.StatusOK)
+}
+
+func TestCheckSignatureSuccess(t *testing.T) {
+	if !checkSignature("my_little_secret", []byte("random-signature-body"), "sha1=3dca279e731c97c38e3019a075dee9ebbd0a99f0") {
+		t.Errorf("check signature failed")
+	}
+}
+
+func TestCheckSignatureFailed(t *testing.T) {
+	if checkSignature("m_little_secret", []byte("random-signature-body"), "sha1=3dca279e731c97c38e3019a075dee9ebbd0a99f0") {
+		t.Errorf("check signature failed")
+	}
+}
diff --git a/plugins/inputs/webhooks/webhooks.go b/plugins/inputs/webhooks/webhooks.go
index fcddbebd7..bc8519d7a 100644
--- a/plugins/inputs/webhooks/webhooks.go
+++ b/plugins/inputs/webhooks/webhooks.go
@@ -47,6 +47,7 @@ func (wb *Webhooks) SampleConfig() string {
 
   [inputs.webhooks.github]
     path = "/github"
+    # secret = ""
 
   [inputs.webhooks.mandrill]
     path = "/mandrill"

From 70b3e763e79d2c9dfaed228f9eaf9591655a1505 Mon Sep 17 00:00:00 2001
From: Ross McDonald 
Date: Mon, 17 Apr 2017 15:49:36 -0500
Subject: [PATCH 0205/1302] Add input for receiving papertrail webhooks (#2038)

---
 CHANGELOG.md                                  |   1 +
 plugins/inputs/webhooks/README.md             |   1 +
 plugins/inputs/webhooks/papertrail/README.md  |  32 ++++
 .../webhooks/papertrail/papertrail_test.go    | 181 ++++++++++++++++++
 .../papertrail/papertrail_webhooks.go         |  79 ++++++++
 .../papertrail/papertrail_webhooks_models.go  |  41 ++++
 plugins/inputs/webhooks/webhooks.go           |  13 +-
 plugins/inputs/webhooks/webhooks_test.go      |   7 +
 8 files changed, 351 insertions(+), 4 deletions(-)
 create mode 100644 plugins/inputs/webhooks/papertrail/README.md
 create mode 100644 plugins/inputs/webhooks/papertrail/papertrail_test.go
 create mode 100644 plugins/inputs/webhooks/papertrail/papertrail_webhooks.go
 create mode 100644 plugins/inputs/webhooks/papertrail/papertrail_webhooks_models.go

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6715ef3bb..0c7b7c2fd 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -71,6 +71,7 @@ be deprecated eventually.
 - [#1100](https://github.com/influxdata/telegraf/issues/1100): Add collectd parser
 - [#1820](https://github.com/influxdata/telegraf/issues/1820): easier plugin testing without outputs
 - [#2493](https://github.com/influxdata/telegraf/pull/2493): Check signature in the GitHub webhook plugin
+- [#2038](https://github.com/influxdata/telegraf/issues/2038): Add papertrail support to webhooks
 
 ### Bugfixes
 
diff --git a/plugins/inputs/webhooks/README.md b/plugins/inputs/webhooks/README.md
index bc7714e9e..8b789e338 100644
--- a/plugins/inputs/webhooks/README.md
+++ b/plugins/inputs/webhooks/README.md
@@ -19,6 +19,7 @@ $ sudo service telegraf start
 - [Github](github/)
 - [Mandrill](mandrill/)
 - [Rollbar](rollbar/)
+- [Papertrail](papertrail/)
 
 ## Adding new webhooks plugin
 
diff --git a/plugins/inputs/webhooks/papertrail/README.md b/plugins/inputs/webhooks/papertrail/README.md
new file mode 100644
index 000000000..a3463dcaa
--- /dev/null
+++ b/plugins/inputs/webhooks/papertrail/README.md
@@ -0,0 +1,32 @@
+# papertrail webhooks
+
+Enables Telegraf to act as a [Papertrail Webhook](http://help.papertrailapp.com/kb/how-it-works/web-hooks/).
+
+## Events
+
+[Full documentation](http://help.papertrailapp.com/kb/how-it-works/web-hooks/#callback).
+
+Events from Papertrail come in two forms:
+
+* The [event-based callback](http://help.papertrailapp.com/kb/how-it-works/web-hooks/#callback):
+
+  * A point is created per event, with the timestamp as `received_at`
+  * Each point has a field counter (`count`), which is set to `1` (signifying the event occurred)
+  * Each event "hostname" object is converted to a `host` tag
+  * The "saved_search" name in the payload is added as an `event` tag
+
+* The [count-based callback](http://help.papertrailapp.com/kb/how-it-works/web-hooks/#count-only-webhooks)
+
+  * A point is created per timeseries object per count, with the timestamp as the "timeseries" key (the unix epoch of the event)
+  * Each point has a field counter (`count`), which is set to the value of each "timeseries" object
+  * Each count "source_name" object is converted to a `host` tag
+  * The "saved_search" name in the payload is added as an `event` tag
+
+The current functionality is very basic, however this allows you to
+track the number of events by host and saved search.
+
+When an event is received, any point will look similar to:
+
+```
+papertrail,host=myserver.example.com,event=saved_search_name count=3i 1453248892000000000
+```
diff --git a/plugins/inputs/webhooks/papertrail/papertrail_test.go b/plugins/inputs/webhooks/papertrail/papertrail_test.go
new file mode 100644
index 000000000..14b8aec89
--- /dev/null
+++ b/plugins/inputs/webhooks/papertrail/papertrail_test.go
@@ -0,0 +1,181 @@
+package papertrail
+
+import (
+	"net/http"
+	"net/http/httptest"
+	"net/url"
+	"strings"
+	"testing"
+
+	"github.com/influxdata/telegraf/testutil"
+	"github.com/stretchr/testify/require"
+)
+
+const (
+	contentType = "application/x-www-form-urlencoded"
+)
+
+func post(pt *PapertrailWebhook, contentType string, body string) *httptest.ResponseRecorder {
+	req, _ := http.NewRequest("POST", "/", strings.NewReader(body))
+	req.Header.Set("Content-Type", contentType)
+	w := httptest.NewRecorder()
+	pt.eventHandler(w, req)
+	return w
+}
+
+func TestWrongContentType(t *testing.T) {
+	var acc testutil.Accumulator
+	pt := &PapertrailWebhook{Path: "/papertrail", acc: &acc}
+	form := url.Values{}
+	form.Set("payload", sampleEventPayload)
+	data := form.Encode()
+
+	resp := post(pt, "", data)
+	require.Equal(t, http.StatusUnsupportedMediaType, resp.Code)
+}
+
+func TestMissingPayload(t *testing.T) {
+	var acc testutil.Accumulator
+	pt := &PapertrailWebhook{Path: "/papertrail", acc: &acc}
+
+	resp := post(pt, contentType, "")
+	require.Equal(t, http.StatusBadRequest, resp.Code)
+}
+
+func TestPayloadNotJSON(t *testing.T) {
+	var acc testutil.Accumulator
+	pt := &PapertrailWebhook{Path: "/papertrail", acc: &acc}
+
+	resp := post(pt, contentType, "payload={asdf]")
+	require.Equal(t, http.StatusBadRequest, resp.Code)
+}
+
+func TestPayloadInvalidJSON(t *testing.T) {
+	var acc testutil.Accumulator
+	pt := &PapertrailWebhook{Path: "/papertrail", acc: &acc}
+
+	resp := post(pt, contentType, `payload={"value": 42}`)
+	require.Equal(t, http.StatusBadRequest, resp.Code)
+}
+
+func TestEventPayload(t *testing.T) {
+	var acc testutil.Accumulator
+	pt := &PapertrailWebhook{Path: "/papertrail", acc: &acc}
+
+	form := url.Values{}
+	form.Set("payload", sampleEventPayload)
+	resp := post(pt, contentType, form.Encode())
+	require.Equal(t, http.StatusOK, resp.Code)
+
+	fields := map[string]interface{}{
+		"count": uint64(1),
+	}
+
+	tags1 := map[string]string{
+		"event": "Important stuff",
+		"host":  "abc",
+	}
+	tags2 := map[string]string{
+		"event": "Important stuff",
+		"host":  "def",
+	}
+
+	acc.AssertContainsTaggedFields(t, "papertrail", fields, tags1)
+	acc.AssertContainsTaggedFields(t, "papertrail", fields, tags2)
+}
+
+func TestCountPayload(t *testing.T) {
+	var acc testutil.Accumulator
+	pt := &PapertrailWebhook{Path: "/papertrail", acc: &acc}
+	form := url.Values{}
+	form.Set("payload", sampleCountPayload)
+	resp := post(pt, contentType, form.Encode())
+	require.Equal(t, http.StatusOK, resp.Code)
+
+	fields1 := map[string]interface{}{
+		"count": uint64(5),
+	}
+	fields2 := map[string]interface{}{
+		"count": uint64(3),
+	}
+
+	tags1 := map[string]string{
+		"event": "Important stuff",
+		"host":  "arthur",
+	}
+	tags2 := map[string]string{
+		"event": "Important stuff",
+		"host":  "ford",
+	}
+
+	acc.AssertContainsTaggedFields(t, "papertrail", fields1, tags1)
+	acc.AssertContainsTaggedFields(t, "papertrail", fields2, tags2)
+}
+
+const sampleEventPayload = `{
+  "events": [
+    {
+      "id": 7711561783320576,
+      "received_at": "2011-05-18T20:30:02-07:00",
+      "display_received_at": "May 18 20:30:02",
+      "source_ip": "208.75.57.121",
+      "source_name": "abc",
+      "source_id": 2,
+      "hostname": "abc",
+      "program": "CROND",
+      "severity": "Info",
+      "facility": "Cron",
+      "message": "message body"
+    },
+    {
+      "id": 7711562567655424,
+      "received_at": "2011-05-18T20:30:02-07:00",
+      "display_received_at": "May 18 20:30:02",
+      "source_ip": "208.75.57.120",
+      "source_name": "server1",
+      "source_id": 19,
+      "hostname": "def",
+      "program": "CROND",
+      "severity": "Info",
+      "facility": "Cron",
+      "message": "A short event"
+    }
+  ],
+  "saved_search": {
+    "id": 42,
+    "name": "Important stuff",
+    "query": "cron OR server1",
+    "html_edit_url": "https://papertrailapp.com/searches/42/edit",
+    "html_search_url": "https://papertrailapp.com/searches/42"
+  },
+  "max_id": "7711582041804800",
+  "min_id": "7711561783320576"
+}`
+
+const sampleCountPayload = `{
+   "counts": [
+     {
+       "source_name": "arthur",
+       "source_id": 4,
+       "timeseries": {
+         "1453248895": 5
+       }
+     },
+     {
+       "source_name": "ford",
+       "source_id": 3,
+       "timeseries": {
+         "1453248927": 3
+       }
+     }
+   ],
+   "saved_search": {
+     "id": 42,
+     "name": "Important stuff",
+     "query": "cron OR server1",
+     "html_edit_url": "https://papertrailapp.com/searches/42/edit",
+     "html_search_url": "https://papertrailapp.com/searches/42"
+   },
+   "max_id": "7711582041804800",
+   "min_id": "7711561783320576"
+}`
diff --git a/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go b/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go
new file mode 100644
index 000000000..42453c130
--- /dev/null
+++ b/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go
@@ -0,0 +1,79 @@
+package papertrail
+
+import (
+	"encoding/json"
+	"log"
+	"net/http"
+	"time"
+
+	"github.com/gorilla/mux"
+	"github.com/influxdata/telegraf"
+)
+
+type PapertrailWebhook struct {
+	Path string
+	acc  telegraf.Accumulator
+}
+
+func (pt *PapertrailWebhook) Register(router *mux.Router, acc telegraf.Accumulator) {
+	router.HandleFunc(pt.Path, pt.eventHandler).Methods("POST")
+	log.Printf("I! Started the papertrail_webhook on %s", pt.Path)
+	pt.acc = acc
+}
+
+func (pt *PapertrailWebhook) eventHandler(w http.ResponseWriter, r *http.Request) {
+	if r.Header.Get("Content-Type") != "application/x-www-form-urlencoded" {
+		http.Error(w, "Unsupported Media Type", http.StatusUnsupportedMediaType)
+		return
+	}
+
+	data := r.PostFormValue("payload")
+	if data == "" {
+		http.Error(w, "Bad Request", http.StatusBadRequest)
+		return
+	}
+
+	var payload Payload
+	err := json.Unmarshal([]byte(data), &payload)
+	if err != nil {
+		http.Error(w, "Bad Request", http.StatusBadRequest)
+		return
+	}
+
+	if payload.Events != nil {
+
+		// Handle event-based payload
+		for _, e := range payload.Events {
+			// Warning: Duplicate event timestamps will overwrite each other
+			tags := map[string]string{
+				"host":  e.Hostname,
+				"event": payload.SavedSearch.Name,
+			}
+			fields := map[string]interface{}{
+				"count": uint64(1),
+			}
+			pt.acc.AddFields("papertrail", fields, tags, e.ReceivedAt)
+		}
+
+	} else if payload.Counts != nil {
+
+		// Handle count-based payload
+		for _, c := range payload.Counts {
+			for ts, count := range *c.TimeSeries {
+				tags := map[string]string{
+					"host":  c.SourceName,
+					"event": payload.SavedSearch.Name,
+				}
+				fields := map[string]interface{}{
+					"count": count,
+				}
+				pt.acc.AddFields("papertrail", fields, tags, time.Unix(ts, 0))
+			}
+		}
+	} else {
+		http.Error(w, "Bad Request", http.StatusBadRequest)
+		return
+	}
+
+	w.WriteHeader(http.StatusOK)
+}
diff --git a/plugins/inputs/webhooks/papertrail/papertrail_webhooks_models.go b/plugins/inputs/webhooks/papertrail/papertrail_webhooks_models.go
new file mode 100644
index 000000000..dd4e8d8bd
--- /dev/null
+++ b/plugins/inputs/webhooks/papertrail/papertrail_webhooks_models.go
@@ -0,0 +1,41 @@
+package papertrail
+
+import (
+	"time"
+)
+
+type Event struct {
+	ID                int64     `json:"id"`
+	ReceivedAt        time.Time `json:"received_at"`
+	DisplayReceivedAt string    `json:"display_received_at"`
+	SourceIP          string    `json:"source_ip"`
+	SourceName        string    `json:"source_name"`
+	SourceID          int       `json:"source_id"`
+	Hostname          string    `json:"hostname"`
+	Program           string    `json:"program"`
+	Severity          string    `json:"severity"`
+	Facility          string    `json:"facility"`
+	Message           string    `json:"message"`
+}
+
+type Count struct {
+	SourceName string            `json:"source_name"`
+	SourceID   int64             `json:"source_id"`
+	TimeSeries *map[int64]uint64 `json:"timeseries"`
+}
+
+type SavedSearch struct {
+	ID        int64  `json:"id"`
+	Name      string `json:"name"`
+	Query     string `json:"query"`
+	EditURL   string `json:"html_edit_url"`
+	SearchURL string `json:"html_search_url"`
+}
+
+type Payload struct {
+	Events      []*Event     `json:"events"`
+	Counts      []*Count     `json:"counts"`
+	SavedSearch *SavedSearch `json:"saved_search"`
+	MaxID       string       `json:"max_id"`
+	MinID       string       `json:"min_id"`
+}
diff --git a/plugins/inputs/webhooks/webhooks.go b/plugins/inputs/webhooks/webhooks.go
index bc8519d7a..7ed1ccd51 100644
--- a/plugins/inputs/webhooks/webhooks.go
+++ b/plugins/inputs/webhooks/webhooks.go
@@ -13,6 +13,7 @@ import (
 	"github.com/influxdata/telegraf/plugins/inputs/webhooks/filestack"
 	"github.com/influxdata/telegraf/plugins/inputs/webhooks/github"
 	"github.com/influxdata/telegraf/plugins/inputs/webhooks/mandrill"
+	"github.com/influxdata/telegraf/plugins/inputs/webhooks/papertrail"
 	"github.com/influxdata/telegraf/plugins/inputs/webhooks/rollbar"
 )
 
@@ -27,10 +28,11 @@ func init() {
 type Webhooks struct {
 	ServiceAddress string
 
-	Github    *github.GithubWebhook
-	Filestack *filestack.FilestackWebhook
-	Mandrill  *mandrill.MandrillWebhook
-	Rollbar   *rollbar.RollbarWebhook
+	Github     *github.GithubWebhook
+	Filestack  *filestack.FilestackWebhook
+	Mandrill   *mandrill.MandrillWebhook
+	Rollbar    *rollbar.RollbarWebhook
+	Papertrail *papertrail.PapertrailWebhook
 }
 
 func NewWebhooks() *Webhooks {
@@ -54,6 +56,9 @@ func (wb *Webhooks) SampleConfig() string {
 
   [inputs.webhooks.rollbar]
     path = "/rollbar"
+
+  [inputs.webhooks.papertrail]
+    path = "/papertrail"
  `
 }
 
diff --git a/plugins/inputs/webhooks/webhooks_test.go b/plugins/inputs/webhooks/webhooks_test.go
index 85d359e1c..6d3448870 100644
--- a/plugins/inputs/webhooks/webhooks_test.go
+++ b/plugins/inputs/webhooks/webhooks_test.go
@@ -5,6 +5,7 @@ import (
 	"testing"
 
 	"github.com/influxdata/telegraf/plugins/inputs/webhooks/github"
+	"github.com/influxdata/telegraf/plugins/inputs/webhooks/papertrail"
 	"github.com/influxdata/telegraf/plugins/inputs/webhooks/rollbar"
 )
 
@@ -26,4 +27,10 @@ func TestAvailableWebhooks(t *testing.T) {
 	if !reflect.DeepEqual(wb.AvailableWebhooks(), expected) {
 		t.Errorf("expected to be %v.\nGot %v", expected, wb.AvailableWebhooks())
 	}
+
+	wb.Papertrail = &papertrail.PapertrailWebhook{Path: "/papertrail"}
+	expected = append(expected, wb.Papertrail)
+	if !reflect.DeepEqual(wb.AvailableWebhooks(), expected) {
+		t.Errorf("expected to be %v.\nGot %v", expected, wb.AvailableWebhooks())
+	}
 }

From eb7ef5392e14b964877a2239976267d86dbd83c6 Mon Sep 17 00:00:00 2001
From: Nikolay Denev 
Date: Tue, 18 Apr 2017 19:42:58 +0100
Subject: [PATCH 0206/1302] Simplify system.DiskUsage() (#2630)

---
 plugins/inputs/system/cpu.go       |   2 +-
 plugins/inputs/system/disk.go      |   5 +-
 plugins/inputs/system/disk_test.go | 108 +++++++++++++++++++++++++++++
 plugins/inputs/system/memory.go    |   5 +-
 plugins/inputs/system/mock_PS.go   |  44 ++++++++++++
 plugins/inputs/system/net.go       |   2 +-
 plugins/inputs/system/netstat.go   |   2 +-
 plugins/inputs/system/ps.go        |  72 +++++++++++++------
 8 files changed, 212 insertions(+), 28 deletions(-)

diff --git a/plugins/inputs/system/cpu.go b/plugins/inputs/system/cpu.go
index 3ed2606fa..e6aa9f22d 100644
--- a/plugins/inputs/system/cpu.go
+++ b/plugins/inputs/system/cpu.go
@@ -121,7 +121,7 @@ func init() {
 		return &CPUStats{
 			PerCPU:   true,
 			TotalCPU: true,
-			ps:       &systemPS{},
+			ps:       newSystemPS(),
 		}
 	})
 }
diff --git a/plugins/inputs/system/disk.go b/plugins/inputs/system/disk.go
index 004466f83..46f2219a7 100644
--- a/plugins/inputs/system/disk.go
+++ b/plugins/inputs/system/disk.go
@@ -219,11 +219,12 @@ func (s *DiskIOStats) diskTags(devName string) map[string]string {
 }
 
 func init() {
+	ps := newSystemPS()
 	inputs.Add("disk", func() telegraf.Input {
-		return &DiskStats{ps: &systemPS{}}
+		return &DiskStats{ps: ps}
 	})
 
 	inputs.Add("diskio", func() telegraf.Input {
-		return &DiskIOStats{ps: &systemPS{}, SkipSerialNumber: true}
+		return &DiskIOStats{ps: ps, SkipSerialNumber: true}
 	})
 }
diff --git a/plugins/inputs/system/disk_test.go b/plugins/inputs/system/disk_test.go
index fc0ff4d0d..5ba4d041f 100644
--- a/plugins/inputs/system/disk_test.go
+++ b/plugins/inputs/system/disk_test.go
@@ -1,14 +1,122 @@
 package system
 
 import (
+	"os"
 	"testing"
 
 	"github.com/influxdata/telegraf/testutil"
 	"github.com/shirou/gopsutil/disk"
 	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/mock"
 	"github.com/stretchr/testify/require"
 )
 
+type MockFileInfo struct {
+	os.FileInfo
+}
+
+func TestDiskUsage(t *testing.T) {
+	mck := &mock.Mock{}
+	mps := MockPSDisk{&systemPS{&mockDiskUsage{mck}}, mck}
+	defer mps.AssertExpectations(t)
+
+	var acc testutil.Accumulator
+	var err error
+
+	psAll := []disk.PartitionStat{
+		{
+			Device:     "/dev/sda",
+			Mountpoint: "/",
+			Fstype:     "ext4",
+			Opts:       "",
+		},
+		{
+			Device:     "/dev/sdb",
+			Mountpoint: "/home",
+			Fstype:     "ext4",
+			Opts:       "",
+		},
+	}
+	duAll := []disk.UsageStat{
+		{
+			Path:        "/",
+			Fstype:      "ext4",
+			Total:       128,
+			Free:        23,
+			Used:        100,
+			InodesTotal: 1234,
+			InodesFree:  234,
+			InodesUsed:  1000,
+		},
+		{
+			Path:        "/home",
+			Fstype:      "ext4",
+			Total:       256,
+			Free:        46,
+			Used:        200,
+			InodesTotal: 2468,
+			InodesFree:  468,
+			InodesUsed:  2000,
+		},
+	}
+
+	mps.On("Partitions", true).Return(psAll, nil)
+	mps.On("OSGetenv", "HOST_MOUNT_PREFIX").Return("")
+	mps.On("OSStat", "/").Return(MockFileInfo{}, nil)
+	mps.On("OSStat", "/home").Return(MockFileInfo{}, nil)
+	mps.On("PSDiskUsage", "/").Return(&duAll[0], nil)
+	mps.On("PSDiskUsage", "/home").Return(&duAll[1], nil)
+
+	err = (&DiskStats{ps: mps}).Gather(&acc)
+	require.NoError(t, err)
+
+	numDiskMetrics := acc.NFields()
+	expectedAllDiskMetrics := 14
+	assert.Equal(t, expectedAllDiskMetrics, numDiskMetrics)
+
+	tags1 := map[string]string{
+		"path":   "/",
+		"fstype": "ext4",
+		"device": "sda",
+	}
+	tags2 := map[string]string{
+		"path":   "/home",
+		"fstype": "ext4",
+		"device": "sdb",
+	}
+
+	fields1 := map[string]interface{}{
+		"total":        uint64(128),
+		"used":         uint64(100),
+		"free":         uint64(23),
+		"inodes_total": uint64(1234),
+		"inodes_free":  uint64(234),
+		"inodes_used":  uint64(1000),
+		"used_percent": float64(81.30081300813008),
+	}
+	fields2 := map[string]interface{}{
+		"total":        uint64(256),
+		"used":         uint64(200),
+		"free":         uint64(46),
+		"inodes_total": uint64(2468),
+		"inodes_free":  uint64(468),
+		"inodes_used":  uint64(2000),
+		"used_percent": float64(81.30081300813008),
+	}
+	acc.AssertContainsTaggedFields(t, "disk", fields1, tags1)
+	acc.AssertContainsTaggedFields(t, "disk", fields2, tags2)
+
+	// We expect 6 more DiskMetrics to show up with an explicit match on "/"
+	// and /home not matching the /dev in MountPoints
+	err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/dev"}}).Gather(&acc)
+	assert.Equal(t, expectedAllDiskMetrics+7, acc.NFields())
+
+	// We should see all the diskpoints as MountPoints includes both
+	// / and /home
+	err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/home"}}).Gather(&acc)
+	assert.Equal(t, 2*expectedAllDiskMetrics+7, acc.NFields())
+}
+
 func TestDiskStats(t *testing.T) {
 	var mps MockPS
 	defer mps.AssertExpectations(t)
diff --git a/plugins/inputs/system/memory.go b/plugins/inputs/system/memory.go
index 26dc550f8..3f679b36c 100644
--- a/plugins/inputs/system/memory.go
+++ b/plugins/inputs/system/memory.go
@@ -73,11 +73,12 @@ func (s *SwapStats) Gather(acc telegraf.Accumulator) error {
 }
 
 func init() {
+	ps := newSystemPS()
 	inputs.Add("mem", func() telegraf.Input {
-		return &MemStats{ps: &systemPS{}}
+		return &MemStats{ps: ps}
 	})
 
 	inputs.Add("swap", func() telegraf.Input {
-		return &SwapStats{ps: &systemPS{}}
+		return &SwapStats{ps: ps}
 	})
 }
diff --git a/plugins/inputs/system/mock_PS.go b/plugins/inputs/system/mock_PS.go
index a83a8b803..d5093f031 100644
--- a/plugins/inputs/system/mock_PS.go
+++ b/plugins/inputs/system/mock_PS.go
@@ -1,6 +1,8 @@
 package system
 
 import (
+	"os"
+
 	"github.com/stretchr/testify/mock"
 
 	"github.com/shirou/gopsutil/cpu"
@@ -13,6 +15,16 @@ import (
 
 type MockPS struct {
 	mock.Mock
+	PSDiskDeps
+}
+
+type MockPSDisk struct {
+	*systemPS
+	*mock.Mock
+}
+
+type mockDiskUsage struct {
+	*mock.Mock
 }
 
 func (m *MockPS) LoadAvg() (*load.AvgStat, error) {
@@ -96,3 +108,35 @@ func (m *MockPS) NetConnections() ([]net.ConnectionStat, error) {
 
 	return r0, r1
 }
+
+func (m *mockDiskUsage) Partitions(all bool) ([]disk.PartitionStat, error) {
+	ret := m.Called(all)
+
+	r0 := ret.Get(0).([]disk.PartitionStat)
+	r1 := ret.Error(1)
+
+	return r0, r1
+}
+
+func (m *mockDiskUsage) OSGetenv(key string) string {
+	ret := m.Called(key)
+	return ret.Get(0).(string)
+}
+
+func (m *mockDiskUsage) OSStat(name string) (os.FileInfo, error) {
+	ret := m.Called(name)
+
+	r0 := ret.Get(0).(os.FileInfo)
+	r1 := ret.Error(1)
+
+	return r0, r1
+}
+
+func (m *mockDiskUsage) PSDiskUsage(path string) (*disk.UsageStat, error) {
+	ret := m.Called(path)
+
+	r0 := ret.Get(0).(*disk.UsageStat)
+	r1 := ret.Error(1)
+
+	return r0, r1
+}
diff --git a/plugins/inputs/system/net.go b/plugins/inputs/system/net.go
index 3f89176fb..f47a2cc6c 100644
--- a/plugins/inputs/system/net.go
+++ b/plugins/inputs/system/net.go
@@ -105,6 +105,6 @@ func (s *NetIOStats) Gather(acc telegraf.Accumulator) error {
 
 func init() {
 	inputs.Add("net", func() telegraf.Input {
-		return &NetIOStats{ps: &systemPS{}}
+		return &NetIOStats{ps: newSystemPS()}
 	})
 }
diff --git a/plugins/inputs/system/netstat.go b/plugins/inputs/system/netstat.go
index 98b729bbe..1699e0808 100644
--- a/plugins/inputs/system/netstat.go
+++ b/plugins/inputs/system/netstat.go
@@ -66,6 +66,6 @@ func (s *NetStats) Gather(acc telegraf.Accumulator) error {
 
 func init() {
 	inputs.Add("netstat", func() telegraf.Input {
-		return &NetStats{ps: &systemPS{}}
+		return &NetStats{ps: newSystemPS()}
 	})
 }
diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go
index 20e01742a..979a3b164 100644
--- a/plugins/inputs/system/ps.go
+++ b/plugins/inputs/system/ps.go
@@ -23,6 +23,13 @@ type PS interface {
 	NetConnections() ([]net.ConnectionStat, error)
 }
 
+type PSDiskDeps interface {
+	Partitions(all bool) ([]disk.PartitionStat, error)
+	OSGetenv(key string) string
+	OSStat(name string) (os.FileInfo, error)
+	PSDiskUsage(path string) (*disk.UsageStat, error)
+}
+
 func add(acc telegraf.Accumulator,
 	name string, val float64, tags map[string]string) {
 	if val >= 0 {
@@ -30,7 +37,15 @@ func add(acc telegraf.Accumulator,
 	}
 }
 
-type systemPS struct{}
+func newSystemPS() *systemPS {
+	return &systemPS{&systemPSDisk{}}
+}
+
+type systemPS struct {
+	PSDiskDeps
+}
+
+type systemPSDisk struct{}
 
 func (s *systemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.TimesStat, error) {
 	var cpuTimes []cpu.TimesStat
@@ -55,7 +70,7 @@ func (s *systemPS) DiskUsage(
 	mountPointFilter []string,
 	fstypeExclude []string,
 ) ([]*disk.UsageStat, []*disk.PartitionStat, error) {
-	parts, err := disk.Partitions(true)
+	parts, err := s.Partitions(true)
 	if err != nil {
 		return nil, nil, err
 	}
@@ -74,35 +89,34 @@ func (s *systemPS) DiskUsage(
 	var partitions []*disk.PartitionStat
 
 	for i := range parts {
-
 		p := parts[i]
 
 		if len(mountPointFilter) > 0 {
 			// If the mount point is not a member of the filter set,
 			// don't gather info on it.
-			_, ok := mountPointFilterSet[p.Mountpoint]
-			if !ok {
+			if _, ok := mountPointFilterSet[p.Mountpoint]; !ok {
 				continue
 			}
 		}
-		mountpoint := os.Getenv("HOST_MOUNT_PREFIX") + p.Mountpoint
-		if _, err := os.Stat(mountpoint); err == nil {
-			du, err := disk.Usage(mountpoint)
-			if err != nil {
-				return nil, nil, err
-			}
-			du.Path = p.Mountpoint
 
-			// If the mount point is a member of the exclude set,
-			// don't gather info on it.
-			_, ok := fstypeExcludeSet[p.Fstype]
-			if ok {
-				continue
-			}
-			du.Fstype = p.Fstype
-			usage = append(usage, du)
-			partitions = append(partitions, &p)
+		// If the mount point is a member of the exclude set,
+		// don't gather info on it.
+		if _, ok := fstypeExcludeSet[p.Fstype]; ok {
+			continue
 		}
+
+		mountpoint := s.OSGetenv("HOST_MOUNT_PREFIX") + p.Mountpoint
+		if _, err := s.OSStat(mountpoint); err != nil {
+			continue
+		}
+		du, err := s.PSDiskUsage(mountpoint)
+		if err != nil {
+			continue
+		}
+		du.Path = p.Mountpoint
+		du.Fstype = p.Fstype
+		usage = append(usage, du)
+		partitions = append(partitions, &p)
 	}
 
 	return usage, partitions, nil
@@ -136,3 +150,19 @@ func (s *systemPS) VMStat() (*mem.VirtualMemoryStat, error) {
 func (s *systemPS) SwapStat() (*mem.SwapMemoryStat, error) {
 	return mem.SwapMemory()
 }
+
+func (s *systemPSDisk) Partitions(all bool) ([]disk.PartitionStat, error) {
+	return disk.Partitions(all)
+}
+
+func (s *systemPSDisk) OSGetenv(key string) string {
+	return os.Getenv(key)
+}
+
+func (s *systemPSDisk) OSStat(name string) (os.FileInfo, error) {
+	return os.Stat(name)
+}
+
+func (s *systemPSDisk) PSDiskUsage(path string) (*disk.UsageStat, error) {
+	return disk.Usage(path)
+}

From 2542ef6d6207941a0e2e8610d0add75db7d90ede Mon Sep 17 00:00:00 2001
From: Patrick Hemmer 
Date: Tue, 18 Apr 2017 16:00:41 -0400
Subject: [PATCH 0207/1302] change jolokia input to use bulk requests (#2253)

---
 CHANGELOG.md                           |   1 +
 plugins/inputs/jolokia/jolokia.go      | 175 ++++++++++++------------
 plugins/inputs/jolokia/jolokia_test.go | 178 +++++++++++++++++--------
 3 files changed, 215 insertions(+), 139 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0c7b7c2fd..7437e4ad5 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -72,6 +72,7 @@ be deprecated eventually.
 - [#1820](https://github.com/influxdata/telegraf/issues/1820): easier plugin testing without outputs
 - [#2493](https://github.com/influxdata/telegraf/pull/2493): Check signature in the GitHub webhook plugin
 - [#2038](https://github.com/influxdata/telegraf/issues/2038): Add papertrail support to webhooks
+- [#2253](https://github.com/influxdata/telegraf/pull/2253): Change jolokia plugin to use bulk requests.
 
 ### Bugfixes
 
diff --git a/plugins/inputs/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go
index 7f371c935..0a9122b87 100644
--- a/plugins/inputs/jolokia/jolokia.go
+++ b/plugins/inputs/jolokia/jolokia.go
@@ -3,7 +3,6 @@ package jolokia
 import (
 	"bytes"
 	"encoding/json"
-	"errors"
 	"fmt"
 	"io/ioutil"
 	"net/http"
@@ -130,7 +129,7 @@ func (j *Jolokia) Description() string {
 	return "Read JMX metrics through Jolokia"
 }
 
-func (j *Jolokia) doRequest(req *http.Request) (map[string]interface{}, error) {
+func (j *Jolokia) doRequest(req *http.Request) ([]map[string]interface{}, error) {
 	resp, err := j.jClient.MakeRequest(req)
 	if err != nil {
 		return nil, err
@@ -155,85 +154,81 @@ func (j *Jolokia) doRequest(req *http.Request) (map[string]interface{}, error) {
 	}
 
 	// Unmarshal json
-	var jsonOut map[string]interface{}
+	var jsonOut []map[string]interface{}
 	if err = json.Unmarshal([]byte(body), &jsonOut); err != nil {
-		return nil, errors.New("Error decoding JSON response")
-	}
-
-	if status, ok := jsonOut["status"]; ok {
-		if status != float64(200) {
-			return nil, fmt.Errorf("Not expected status value in response body: %3.f",
-				status)
-		}
-	} else {
-		return nil, fmt.Errorf("Missing status in response body")
+		return nil, fmt.Errorf("Error decoding JSON response: %s: %s", err, body)
 	}
 
 	return jsonOut, nil
 }
 
-func (j *Jolokia) prepareRequest(server Server, metric Metric) (*http.Request, error) {
+func (j *Jolokia) prepareRequest(server Server, metrics []Metric) (*http.Request, error) {
 	var jolokiaUrl *url.URL
 	context := j.Context // Usually "/jolokia/"
 
-	// Create bodyContent
-	bodyContent := map[string]interface{}{
-		"type":  "read",
-		"mbean": metric.Mbean,
+	var bulkBodyContent []map[string]interface{}
+	for _, metric := range metrics {
+		// Create bodyContent
+		bodyContent := map[string]interface{}{
+			"type":  "read",
+			"mbean": metric.Mbean,
+		}
+
+		if metric.Attribute != "" {
+			bodyContent["attribute"] = metric.Attribute
+			if metric.Path != "" {
+				bodyContent["path"] = metric.Path
+			}
+		}
+
+		// Add target, only in proxy mode
+		if j.Mode == "proxy" {
+			serviceUrl := fmt.Sprintf("service:jmx:rmi:///jndi/rmi://%s:%s/jmxrmi",
+				server.Host, server.Port)
+
+			target := map[string]string{
+				"url": serviceUrl,
+			}
+
+			if server.Username != "" {
+				target["user"] = server.Username
+			}
+
+			if server.Password != "" {
+				target["password"] = server.Password
+			}
+
+			bodyContent["target"] = target
+
+			proxy := j.Proxy
+
+			// Prepare ProxyURL
+			proxyUrl, err := url.Parse("http://" + proxy.Host + ":" + proxy.Port + context)
+			if err != nil {
+				return nil, err
+			}
+			if proxy.Username != "" || proxy.Password != "" {
+				proxyUrl.User = url.UserPassword(proxy.Username, proxy.Password)
+			}
+
+			jolokiaUrl = proxyUrl
+
+		} else {
+			serverUrl, err := url.Parse("http://" + server.Host + ":" + server.Port + context)
+			if err != nil {
+				return nil, err
+			}
+			if server.Username != "" || server.Password != "" {
+				serverUrl.User = url.UserPassword(server.Username, server.Password)
+			}
+
+			jolokiaUrl = serverUrl
+		}
+
+		bulkBodyContent = append(bulkBodyContent, bodyContent)
 	}
 
-	if metric.Attribute != "" {
-		bodyContent["attribute"] = metric.Attribute
-		if metric.Path != "" {
-			bodyContent["path"] = metric.Path
-		}
-	}
-
-	// Add target, only in proxy mode
-	if j.Mode == "proxy" {
-		serviceUrl := fmt.Sprintf("service:jmx:rmi:///jndi/rmi://%s:%s/jmxrmi",
-			server.Host, server.Port)
-
-		target := map[string]string{
-			"url": serviceUrl,
-		}
-
-		if server.Username != "" {
-			target["user"] = server.Username
-		}
-
-		if server.Password != "" {
-			target["password"] = server.Password
-		}
-
-		bodyContent["target"] = target
-
-		proxy := j.Proxy
-
-		// Prepare ProxyURL
-		proxyUrl, err := url.Parse("http://" + proxy.Host + ":" + proxy.Port + context)
-		if err != nil {
-			return nil, err
-		}
-		if proxy.Username != "" || proxy.Password != "" {
-			proxyUrl.User = url.UserPassword(proxy.Username, proxy.Password)
-		}
-
-		jolokiaUrl = proxyUrl
-
-	} else {
-		serverUrl, err := url.Parse("http://" + server.Host + ":" + server.Port + context)
-		if err != nil {
-			return nil, err
-		}
-		if server.Username != "" || server.Password != "" {
-			serverUrl.User = url.UserPassword(server.Username, server.Password)
-		}
-
-		jolokiaUrl = serverUrl
-	}
-
-	requestBody, err := json.Marshal(bodyContent)
+	requestBody, err := json.Marshal(bulkBodyContent)
 
 	req, err := http.NewRequest("POST", jolokiaUrl.String(), bytes.NewBuffer(requestBody))
 
@@ -276,25 +271,35 @@ func (j *Jolokia) Gather(acc telegraf.Accumulator) error {
 		tags["jolokia_host"] = server.Host
 		fields := make(map[string]interface{})
 
-		for _, metric := range metrics {
-			measurement := metric.Name
+		req, err := j.prepareRequest(server, metrics)
+		if err != nil {
+			acc.AddError(fmt.Errorf("unable to create request: %s", err))
+			continue
+		}
+		out, err := j.doRequest(req)
+		if err != nil {
+			acc.AddError(fmt.Errorf("error performing request: %s", err))
+			continue
+		}
 
-			req, err := j.prepareRequest(server, metric)
-			if err != nil {
-				return err
+		if len(out) != len(metrics) {
+			acc.AddError(fmt.Errorf("did not receive the correct number of metrics in response. expected %d, received %d", len(metrics), len(out)))
+			continue
+		}
+		for i, resp := range out {
+			if status, ok := resp["status"]; ok && status != float64(200) {
+				acc.AddError(fmt.Errorf("Not expected status value in response body (%s:%s mbean=\"%s\" attribute=\"%s\"): %3.f",
+					server.Host, server.Port, metrics[i].Mbean, metrics[i].Attribute, status))
+				continue
+			} else if !ok {
+				acc.AddError(fmt.Errorf("Missing status in response body"))
+				continue
 			}
 
-			out, err := j.doRequest(req)
-
-			if err != nil {
-				fmt.Printf("Error handling response: %s\n", err)
+			if values, ok := resp["value"]; ok {
+				j.extractValues(metrics[i].Name, values, fields)
 			} else {
-				if values, ok := out["value"]; ok {
-					j.extractValues(measurement, values, fields)
-				} else {
-					fmt.Printf("Missing key 'value' in output response\n")
-				}
-
+				acc.AddError(fmt.Errorf("Missing key 'value' in output response\n"))
 			}
 		}
 
diff --git a/plugins/inputs/jolokia/jolokia_test.go b/plugins/inputs/jolokia/jolokia_test.go
index 3c4fc2561..cf415f36f 100644
--- a/plugins/inputs/jolokia/jolokia_test.go
+++ b/plugins/inputs/jolokia/jolokia_test.go
@@ -13,65 +13,105 @@ import (
 )
 
 const validThreeLevelMultiValueJSON = `
-{
-  "request":{
-    "mbean":"java.lang:type=*",
-    "type":"read"
+[
+  {
+    "request":{
+      "mbean":"java.lang:type=*",
+      "type":"read"
+    },
+    "value":{
+      "java.lang:type=Memory":{
+        "ObjectPendingFinalizationCount":0,
+        "Verbose":false,
+        "HeapMemoryUsage":{
+          "init":134217728,
+          "committed":173015040,
+          "max":1908932608,
+          "used":16840016
+        },
+        "NonHeapMemoryUsage":{
+          "init":2555904,
+          "committed":51380224,
+          "max":-1,
+          "used":49944048
+        },
+        "ObjectName":{
+          "objectName":"java.lang:type=Memory"
+        }
+      }
+    },
+    "timestamp":1446129191,
+    "status":200
+  }
+]`
+
+const validBulkResponseJSON = `
+[
+  {
+    "request":{
+      "mbean":"java.lang:type=Memory",
+      "attribute":"HeapMemoryUsage",
+      "type":"read"
+    },
+    "value":{
+      "init":67108864,
+      "committed":456130560,
+      "max":477626368,
+      "used":203288528
+    },
+    "timestamp":1446129191,
+    "status":200
   },
-  "value":{
-		"java.lang:type=Memory":{
-			"ObjectPendingFinalizationCount":0,
-			"Verbose":false,
-			"HeapMemoryUsage":{
-				"init":134217728,
-				"committed":173015040,
-				"max":1908932608,
-				"used":16840016
-			},
-			"NonHeapMemoryUsage":{
-				"init":2555904,
-				"committed":51380224,
-				"max":-1,
-				"used":49944048
-			},
-			"ObjectName":{
-				"objectName":"java.lang:type=Memory"
-			}
-		}
-  },
-  "timestamp":1446129191,
-  "status":200
-}`
+  {
+    "request":{
+      "mbean":"java.lang:type=Memory",
+      "attribute":"NonHeapMemoryUsage",
+      "type":"read"
+    },
+    "value":{
+      "init":2555904,
+      "committed":51380224,
+      "max":-1,
+      "used":49944048
+    },
+    "timestamp":1446129191,
+    "status":200
+  }
+]`
 
 const validMultiValueJSON = `
-{
-  "request":{
-    "mbean":"java.lang:type=Memory",
-    "attribute":"HeapMemoryUsage",
-    "type":"read"
-  },
-  "value":{
-    "init":67108864,
-    "committed":456130560,
-    "max":477626368,
-    "used":203288528
-  },
-  "timestamp":1446129191,
-  "status":200
-}`
+[
+  {
+    "request":{
+      "mbean":"java.lang:type=Memory",
+      "attribute":"HeapMemoryUsage",
+      "type":"read"
+    },
+    "value":{
+      "init":67108864,
+      "committed":456130560,
+      "max":477626368,
+      "used":203288528
+    },
+    "timestamp":1446129191,
+    "status":200
+  }
+]`
 
 const validSingleValueJSON = `
-{
-  "request":{
-    "path":"used",
-    "mbean":"java.lang:type=Memory",
-    "attribute":"HeapMemoryUsage",
-    "type":"read"
-  },
-  "value":209274376,
-  "timestamp":1446129256,
-  "status":200
-}`
+[
+  {
+    "request":{
+      "path":"used",
+      "mbean":"java.lang:type=Memory",
+      "attribute":"HeapMemoryUsage",
+      "type":"read"
+    },
+    "value":209274376,
+    "timestamp":1446129256,
+    "status":200
+  }
+]`
 
 const invalidJSON = "I don't think this is JSON"
 
@@ -82,6 +122,8 @@ var HeapMetric = Metric{Name: "heap_memory_usage",
 	Mbean: "java.lang:type=Memory", Attribute: "HeapMemoryUsage"}
 var UsedHeapMetric = Metric{Name: "heap_memory_usage",
 	Mbean: "java.lang:type=Memory", Attribute: "HeapMemoryUsage"}
+var NonHeapMetric = Metric{Name: "non_heap_memory_usage",
+	Mbean: "java.lang:type=Memory", Attribute: "NonHeapMemoryUsage"}
 
 type jolokiaClientStub struct {
 	responseBody string
@@ -135,6 +177,34 @@ func TestHttpJsonMultiValue(t *testing.T) {
 	acc.AssertContainsTaggedFields(t, "jolokia", fields, tags)
 }
 
+// Test that bulk responses are handled
+func TestHttpJsonBulkResponse(t *testing.T) {
+	jolokia := genJolokiaClientStub(validBulkResponseJSON, 200, Servers, []Metric{HeapMetric, NonHeapMetric})
+
+	var acc testutil.Accumulator
+	err := jolokia.Gather(&acc)
+
+	assert.Nil(t, err)
+	assert.Equal(t, 1, len(acc.Metrics))
+
+	fields := map[string]interface{}{
+		"heap_memory_usage_init":          67108864.0,
+		"heap_memory_usage_committed":     456130560.0,
+		"heap_memory_usage_max":           477626368.0,
+		"heap_memory_usage_used":          203288528.0,
+		"non_heap_memory_usage_init":      2555904.0,
+		"non_heap_memory_usage_committed": 51380224.0,
+		"non_heap_memory_usage_max":       -1.0,
+		"non_heap_memory_usage_used":      49944048.0,
+	}
+	tags := map[string]string{
+		"jolokia_host": "127.0.0.1",
+		"jolokia_port": "8080",
+		"jolokia_name": "as1",
+	}
+	acc.AssertContainsTaggedFields(t, "jolokia", fields, tags)
+}
+
 // Test that the proper values are ignored or collected
 func TestHttpJsonThreeLevelMultiValue(t *testing.T) {
 	jolokia := genJolokiaClientStub(validThreeLevelMultiValueJSON, 200, Servers, []Metric{HeapMetric})

From 3690e1b9bf0e222f544f1b5b2e08dea7b65ccf84 Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Wed, 19 Apr 2017 13:42:24 -0700
Subject: [PATCH 0208/1302] Add diskio for darwin to changelog

---
 CHANGELOG.md | 1 +
 1 file changed, 1 insertion(+)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 7437e4ad5..c32367cb4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -73,6 +73,7 @@ be deprecated eventually.
 - [#2493](https://github.com/influxdata/telegraf/pull/2493): Check signature in the GitHub webhook plugin
 - [#2038](https://github.com/influxdata/telegraf/issues/2038): Add papertrail support to webhooks
 - [#2253](https://github.com/influxdata/telegraf/pull/2253): Change jolokia plugin to use bulk requests.
+- [#2575](https://github.com/influxdata/telegraf/issues/2575) Add diskio input for Darwin
 
 ### Bugfixes
 

From bf30ef89ee013b59d0fd2638055180b58043b374 Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Wed, 19 Apr 2017 17:02:44 -0700
Subject: [PATCH 0209/1302] Fix ipmi_sensor config is shared between all plugin
 instances (#2684)

---
 CHANGELOG.md                       | 1 +
 plugins/inputs/ipmi_sensor/ipmi.go | 1 +
 2 files changed, 2 insertions(+)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index c32367cb4..6160d7558 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -107,6 +107,7 @@ be deprecated eventually.
 - [#2628](https://github.com/influxdata/telegraf/issues/2628): Set default measurement name for snmp input.
 - [#2649](https://github.com/influxdata/telegraf/pull/2649): Improve performance of diskio with many disks
 - [#2671](https://github.com/influxdata/telegraf/issues/2671): The internal input plugin uses the wrong units for `heap_objects`
+- [#2684](https://github.com/influxdata/telegraf/pull/2684): Fix ipmi_sensor config is shared between all plugin instances
 
 ## v1.2.1 [2017-02-01]
 
diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go
index 0114812d3..a3beeb297 100644
--- a/plugins/inputs/ipmi_sensor/ipmi.go
+++ b/plugins/inputs/ipmi_sensor/ipmi.go
@@ -152,6 +152,7 @@ func init() {
 		m.Path = path
 	}
 	inputs.Add("ipmi_sensor", func() telegraf.Input {
+		m := m
 		return &m
 	})
 }

From 748ca7d50377217378920bf36bbffced964a369f Mon Sep 17 00:00:00 2001
From: Martin 
Date: Thu, 20 Apr 2017 20:19:33 +0200
Subject: [PATCH 0210/1302] Fixed install/remove of telegraf on non-systemd
 Debian/Ubuntu systems (#2360)

---
 CHANGELOG.md            |  1 +
 scripts/post-install.sh | 51 +++++++++++++++++++++++---------------
 scripts/post-remove.sh  | 55 +++++++++++++++++++++++++++--------------
 scripts/pre-install.sh  | 16 ++++++------
 scripts/pre-remove.sh   |  9 +++----
 5 files changed, 81 insertions(+), 51 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6160d7558..b19c3785a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -85,6 +85,7 @@ be deprecated eventually.
 - [#2356](https://github.com/influxdata/telegraf/issues/2356): cpu input panic when /proc/stat is empty.
 - [#2341](https://github.com/influxdata/telegraf/issues/2341): telegraf swallowing panics in --test mode.
 - [#2358](https://github.com/influxdata/telegraf/pull/2358): Create pidfile with 644 permissions & defer file deletion.
+- [#2360](https://github.com/influxdata/telegraf/pull/2360): Fixed install/remove of telegraf on non-systemd Debian/Ubuntu systems
 - [#2282](https://github.com/influxdata/telegraf/issues/2282): Reloading telegraf freezes prometheus output.
 - [#2390](https://github.com/influxdata/telegraf/issues/2390): Empty tag value causes error on InfluxDB output.
 - [#2380](https://github.com/influxdata/telegraf/issues/2380): buffer_size field value is negative number from "internal" plugin.
diff --git a/scripts/post-install.sh b/scripts/post-install.sh
index 45a19d26c..2baabe69a 100644
--- a/scripts/post-install.sh
+++ b/scripts/post-install.sh
@@ -24,10 +24,8 @@ function install_chkconfig {
     chkconfig --add telegraf
 }
 
-id telegraf &>/dev/null
-if [[ $? -ne 0 ]]; then
-    grep "^telegraf:" /etc/group &>/dev/null
-    if [[ $? -ne 0 ]]; then
+if ! id telegraf &>/dev/null; then
+    if ! grep "^telegraf:" /etc/group &>/dev/null; then
         useradd -r -K USERGROUPS_ENAB=yes -M telegraf -s /bin/false -d /etc/telegraf
     else
         useradd -r -K USERGROUPS_ENAB=yes -M telegraf -s /bin/false -d /etc/telegraf -g telegraf
@@ -60,31 +58,44 @@ fi
 # Distribution-specific logic
 if [[ -f /etc/redhat-release ]]; then
     # RHEL-variant logic
-    which systemctl &>/dev/null
-    if [[ $? -eq 0 ]]; then
-	    install_systemd
+    if [[ "$(readlink /proc/1/exe)" == */systemd ]]; then
+        install_systemd
     else
-	    # Assuming sysv
-	    install_init
-	    install_chkconfig
+        # Assuming SysVinit
+        install_init
+        # Run update-rc.d or fallback to chkconfig if not available
+        if which update-rc.d &>/dev/null; then
+            install_update_rcd
+        else
+            install_chkconfig
+        fi
     fi
 elif [[ -f /etc/debian_version ]]; then
     # Debian/Ubuntu logic
-    which systemctl &>/dev/null
-    if [[ $? -eq 0 ]]; then
-	    install_systemd
-	    systemctl restart telegraf || echo "WARNING: systemd not running."
+    if [[ "$(readlink /proc/1/exe)" == */systemd ]]; then
+        install_systemd
+        systemctl restart telegraf || echo "WARNING: systemd not running."
     else
-	    # Assuming sysv
-	    install_init
-	    install_update_rcd
-	    invoke-rc.d telegraf restart
+	    # Assuming SysVinit
+        install_init
+        # Run update-rc.d or fallback to chkconfig if not available
+        if which update-rc.d &>/dev/null; then
+            install_update_rcd
+        else
+            install_chkconfig
+        fi
+        invoke-rc.d telegraf restart
     fi
 elif [[ -f /etc/os-release ]]; then
     source /etc/os-release
     if [[ $ID = "amzn" ]]; then
 	    # Amazon Linux logic
-	    install_init
-	    install_chkconfig
+        install_init
+        # Run update-rc.d or fallback to chkconfig if not available
+        if which update-rc.d &>/dev/null; then
+            install_update_rcd
+        else
+            install_chkconfig
+        fi
     fi
 fi
diff --git a/scripts/post-remove.sh b/scripts/post-remove.sh
index 0f262d225..b66a3aa9a 100644
--- a/scripts/post-remove.sh
+++ b/scripts/post-remove.sh
@@ -15,28 +15,45 @@ function disable_chkconfig {
     rm -f /etc/init.d/telegraf
 }
 
-if [[ "$1" == "0" ]]; then
-    # RHEL and any distribution that follow RHEL, Amazon Linux covered
-    # telegraf is no longer installed, remove from init system
-    rm -f /etc/default/telegraf
+if [[ -f /etc/redhat-release ]]; then
+    # RHEL-variant logic
+    if [[ "$1" = "0" ]]; then
+        # InfluxDB is no longer installed, remove from init system
+        rm -f /etc/default/telegraf
 
-    which systemctl &>/dev/null
-    if [[ $? -eq 0 ]]; then
-        disable_systemd
-    else
-        # Assuming sysv
-        disable_chkconfig
+        if [[ "$(readlink /proc/1/exe)" == */systemd ]]; then
+            disable_systemd
+        else
+            # Assuming sysv
+            disable_chkconfig
+        fi
     fi
-elif [ "$1" == "remove" -o "$1" == "purge" ]; then
+elif [[ -f /etc/debian_version ]]; then
     # Debian/Ubuntu logic
-    # Remove/purge
-    rm -f /etc/default/telegraf
+    if [ "$1" == "remove" -o "$1" == "purge" ]; then
+        # Remove/purge
+        rm -f /etc/default/telegraf
 
-    which systemctl &>/dev/null
-    if [[ $? -eq 0 ]]; then
-        disable_systemd
-    else
-        # Assuming sysv
-        disable_update_rcd
+        if [[ "$(readlink /proc/1/exe)" == */systemd ]]; then
+            disable_systemd
+        else
+            # Assuming sysv
+            # Run update-rc.d or fallback to chkconfig if not available
+            if which update-rc.d &>/dev/null; then
+                disable_update_rcd
+            else
+                disable_chkconfig
+            fi
+        fi
+    fi
+elif [[ -f /etc/os-release ]]; then
+    source /etc/os-release
+    if [[ $ID = "amzn" ]]; then
+        # Amazon Linux logic
+        if [[ "$1" = "0" ]]; then
+            # InfluxDB is no longer installed, remove from init system
+            rm -f /etc/default/telegraf
+            disable_chkconfig
+        fi
     fi
 fi
diff --git a/scripts/pre-install.sh b/scripts/pre-install.sh
index 443d6bc87..b371f462d 100644
--- a/scripts/pre-install.sh
+++ b/scripts/pre-install.sh
@@ -1,14 +1,16 @@
 #!/bin/bash
 
-if [[ -f /etc/opt/telegraf/telegraf.conf ]]; then
+if [[ -d /etc/opt/telegraf ]]; then
     # Legacy configuration found
     if [[ ! -d /etc/telegraf ]]; then
-	# New configuration does not exist, move legacy configuration to new location
-	echo -e "Please note, Telegraf's configuration is now located at '/etc/telegraf' (previously '/etc/opt/telegraf')."
-	mv /etc/opt/telegraf /etc/telegraf
+        # New configuration does not exist, move legacy configuration to new location
+        echo -e "Please note, Telegraf's configuration is now located at '/etc/telegraf' (previously '/etc/opt/telegraf')."
+        mv -vn /etc/opt/telegraf /etc/telegraf
 
-	backup_name="telegraf.conf.$(date +%s).backup"
-	echo "A backup of your current configuration can be found at: /etc/telegraf/$backup_name"
-	cp -a /etc/telegraf/telegraf.conf /etc/telegraf/$backup_name
+        if [[ -f /etc/telegraf/telegraf.conf ]]; then
+            backup_name="telegraf.conf.$(date +%s).backup"
+            echo "A backup of your current configuration can be found at: /etc/telegraf/${backup_name}"
+            cp -a "/etc/telegraf/telegraf.conf" "/etc/telegraf/${backup_name}"
+        fi
     fi
 fi
diff --git a/scripts/pre-remove.sh b/scripts/pre-remove.sh
index a57184630..2887fc9b6 100644
--- a/scripts/pre-remove.sh
+++ b/scripts/pre-remove.sh
@@ -5,11 +5,10 @@ BIN_DIR=/usr/bin
 # Distribution-specific logic
 if [[ -f /etc/debian_version ]]; then
     # Debian/Ubuntu logic
-    which systemctl &>/dev/null
-    if [[ $? -eq 0 ]]; then
-	deb-systemd-invoke stop telegraf.service
+    if [[ "$(readlink /proc/1/exe)" == */systemd ]]; then
+        deb-systemd-invoke stop telegraf.service
     else
-	# Assuming sysv
-	invoke-rc.d telegraf stop
+        # Assuming sysv
+        invoke-rc.d telegraf stop
     fi
 fi

From b03d78d00f4401950f2863506a2c12afcade4473 Mon Sep 17 00:00:00 2001
From: Oleg Grytsynevych 
Date: Thu, 20 Apr 2017 20:22:44 +0200
Subject: [PATCH 0211/1302] win_perf_counters: Format errors reported by
 pdh.dll in human-readable format (#2338)

---
 plugins/inputs/win_perf_counters/pdh.go             | 13 +++++++++++++
 .../inputs/win_perf_counters/win_perf_counters.go   |  9 ++++-----
 2 files changed, 17 insertions(+), 5 deletions(-)

diff --git a/plugins/inputs/win_perf_counters/pdh.go b/plugins/inputs/win_perf_counters/pdh.go
index fa00e0603..2caa21445 100644
--- a/plugins/inputs/win_perf_counters/pdh.go
+++ b/plugins/inputs/win_perf_counters/pdh.go
@@ -33,8 +33,11 @@
 package win_perf_counters
 
 import (
+	"fmt"
 	"syscall"
 	"unsafe"
+
+	"golang.org/x/sys/windows"
 )
 
 // Error codes
@@ -417,3 +420,13 @@ func UTF16PtrToString(s *uint16) string {
 	}
 	return syscall.UTF16ToString((*[1 << 29]uint16)(unsafe.Pointer(s))[0:])
 }
+
+func PdhFormatError(msgId uint32) string {
+	var flags uint32 = windows.FORMAT_MESSAGE_FROM_HMODULE | windows.FORMAT_MESSAGE_ARGUMENT_ARRAY | windows.FORMAT_MESSAGE_IGNORE_INSERTS
+	buf := make([]uint16, 300)
+	_, err := windows.FormatMessage(flags, uintptr(libpdhDll.Handle), msgId, 0, buf, nil)
+	if err == nil {
+		return fmt.Sprintf("%s", UTF16PtrToString(&buf[0]))
+	}
+	return fmt.Sprintf("(pdhErr=%d) %s", msgId, err.Error())
+}
diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go
index 5365dc68b..3cc946669 100644
--- a/plugins/inputs/win_perf_counters/win_perf_counters.go
+++ b/plugins/inputs/win_perf_counters/win_perf_counters.go
@@ -12,7 +12,7 @@ import (
 	"github.com/influxdata/telegraf/plugins/inputs"
 )
 
-var sampleConfig string = `
+var sampleConfig = `
   ## By default this plugin returns basic CPU and Disk statistics.
   ## See the README file for more examples.
   ## Uncomment examples below or write your own as you see fit. If the system
@@ -124,8 +124,8 @@ func (m *Win_PerfCounters) AddItem(metrics *itemList, query string, objectName s
 	// Call PdhCollectQueryData one time to check existance of the counter
 	ret = PdhCollectQueryData(handle)
 	if ret != ERROR_SUCCESS {
-		ret = PdhCloseQuery(handle)
-		return errors.New("Invalid query for Performance Counters")
+		PdhCloseQuery(handle)
+		return errors.New(PdhFormatError(ret))
 	}
 
 	temp := &item{query, objectName, counter, instance, measurement,
@@ -174,7 +174,7 @@ func (m *Win_PerfCounters) ParseConfig(metrics *itemList) error {
 						}
 					} else {
 						if PerfObject.FailOnMissing || PerfObject.WarnOnMissing {
-							fmt.Printf("Invalid query: %s\n", query)
+							fmt.Printf("Invalid query: '%s'. Error: %s", query, err.Error())
 						}
 						if PerfObject.FailOnMissing {
 							return err
@@ -298,7 +298,6 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error {
 				bufCount = 0
 				bufSize = 0
 			}
-
 		}
 	}
 

From a2373019324322f768ec382435fe425ed4d8bdcb Mon Sep 17 00:00:00 2001
From: Alexander Blagoev 
Date: Thu, 20 Apr 2017 21:25:22 +0300
Subject: [PATCH 0212/1302] Memcached input documentation (#2685)

Closes #2615
---
 plugins/inputs/memcached/README.md | 69 ++++++++++++++++++++++++++++++
 1 file changed, 69 insertions(+)
 create mode 100644 plugins/inputs/memcached/README.md

diff --git a/plugins/inputs/memcached/README.md b/plugins/inputs/memcached/README.md
new file mode 100644
index 000000000..ed4ebe7ff
--- /dev/null
+++ b/plugins/inputs/memcached/README.md
@@ -0,0 +1,69 @@
+# Memcached Input Plugin
+
+This plugin gathers statistics data from a Memcached server.
+
+### Configuration:
+
+```toml
+# Read metrics from one or many memcached servers.
+[[inputs.memcached]]
+  # An array of address to gather stats about. Specify an ip on hostname
+  # with optional port. ie localhost, 10.0.0.1:11211, etc.
+  servers = ["localhost:11211"]
+  # An array of unix memcached sockets to gather stats about.
+  # unix_sockets = ["/var/run/memcached.sock"]
+```
+
+### Measurements & Fields:
+
+The fields from this plugin are gathered in the *memcached* measurement.
+
+Fields:
+
+* get_hits - Number of keys that have been requested and found present
+* get_misses - Number of items that have been requested and not found
+* evictions - Number of valid items removed from cache to free memory for new items
+* limit_maxbytes - Number of bytes this server is allowed to use for storage
+* bytes - Current number of bytes used to store items
+* uptime - Number of secs since the server started
+* curr_items - Current number of items stored
+* total_items - Total number of items stored since the server started
+* curr_connections - Number of open connections
+* total_connections - Total number of connections opened since the server started running
+* connection_structures - Number of connection structures allocated by the server
+* cmd_get - Cumulative number of retrieval reqs
+* cmd_set - Cumulative number of storage reqs
+* delete_hits - Number of deletion reqs resulting in an item being removed
+* delete_misses - umber of deletions reqs for missing keys
+* incr_hits - Number of successful incr reqs
+* incr_misses - Number of incr reqs against missing keys
+* decr_hits - Number of successful decr reqs
+* decr_misses - Number of decr reqs against missing keys
+* cas_hits - Number of successful CAS reqs
+* cas_misses - Number of CAS reqs against missing keys
+* bytes_read - Total number of bytes read by this server from network
+* bytes_written - Total number of bytes sent by this server to network
+* threads - Number of worker threads requested
+* conn_yields - Number of times any connection yielded to another due to hitting the -R limit
+
+Description of gathered fields taken from [here](https://github.com/memcached/memcached/blob/master/doc/protocol.txt).
+
+### Tags:
+
+* Memcached measurements have the following tags:
+    - server (the host name from which metrics are gathered)
+
+### Sample Queries:
+
+You can use the following query to get the average get hit and miss ratio, as well as the total average size of cached items, number of cached items and average connection counts per server.
+
+```
+SELECT mean(get_hits) / mean(cmd_get) as get_ratio, mean(get_misses) / mean(cmd_get) as get_misses_ratio, mean(bytes), mean(curr_items), mean(curr_connections) FROM memcached WHERE time > now() - 1h GROUP BY server
+```
+
+### Example Output:
+
+```
+$ ./telegraf -config telegraf.conf -input-filter memcached -test
+memcached,server=localhost:11211 get_hits=1,get_misses=2,evictions=0,limit_maxbytes=0,bytes=10,uptime=3600,curr_items=2,total_items=2,curr_connections=1,total_connections=2,connection_structures=1,cmd_get=2,cmd_set=1,delete_hits=0,delete_misses=0,incr_hits=0,incr_misses=0,decr_hits=0,decr_misses=0,cas_hits=0,cas_misses=0,bytes_read=10,bytes_written=10,threads=1,conn_yields=0 1453831884664956455
+```

From 799c8bed299d3e897209c65b7d5e0015db051689 Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Thu, 20 Apr 2017 15:33:54 -0700
Subject: [PATCH 0213/1302] Add fix for network aliases to changelog

Change was made in gopsutil
---
 CHANGELOG.md | 1 +
 1 file changed, 1 insertion(+)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index b19c3785a..626a9ef93 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -109,6 +109,7 @@ be deprecated eventually.
 - [#2649](https://github.com/influxdata/telegraf/pull/2649): Improve performance of diskio with many disks
 - [#2671](https://github.com/influxdata/telegraf/issues/2671): The internal input plugin uses the wrong units for `heap_objects`
 - [#2684](https://github.com/influxdata/telegraf/pull/2684): Fix ipmi_sensor config is shared between all plugin instances
+- [#2450](https://github.com/influxdata/telegraf/issues/2450): Network statistics not collected when system has alias interfaces
 
 ## v1.2.1 [2017-02-01]
 

From 38e1c1de779dfdd34e6952c1f8e9ca0a234927e6 Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Thu, 20 Apr 2017 16:29:39 -0700
Subject: [PATCH 0214/1302] Update commit hash of tail fork

---
 Godeps | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/Godeps b/Godeps
index 510f5b1ed..9ffd7e1b8 100644
--- a/Godeps
+++ b/Godeps
@@ -22,7 +22,7 @@ github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7
 github.com/gorilla/mux 392c28fe23e1c45ddba891b0320b3b5df220beea
 github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
 github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
-github.com/influxdata/tail e9ef7e826dafcb3093b40b989fefa90eeb9a8ca1
+github.com/influxdata/tail a395bf99fe07c233f41fba0735fa2b13b58588ea
 github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
 github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
 github.com/jackc/pgx b84338d7d62598f75859b2b146d830b22f1b9ec8

From da0773151b970a342e1b2a84dab52a1ca20b669a Mon Sep 17 00:00:00 2001
From: Daniel Nelson 
Date: Fri, 21 Apr 2017 10:55:54 -0700
Subject: [PATCH 0215/1302] Use C locale when running sadf (#2690)

fixes #1911
---
 CHANGELOG.md                                  |  1 +
 plugins/inputs/sysstat/sysstat.go             | 26 +++++++++++++++++++
 .../inputs/sysstat/sysstat_interval_test.go   |  3 +++
 3 files changed, 30 insertions(+)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 626a9ef93..c2ddc9d27 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -110,6 +110,7 @@ be deprecated eventually.
 - [#2671](https://github.com/influxdata/telegraf/issues/2671): The internal input plugin uses the wrong units for `heap_objects`
 - [#2684](https://github.com/influxdata/telegraf/pull/2684): Fix ipmi_sensor config is shared between all plugin instances
 - [#2450](https://github.com/influxdata/telegraf/issues/2450): Network statistics not collected when system has alias interfaces
+- [#1911](https://github.com/influxdata/telegraf/issues/1911): Sysstat plugin needs LANG=C or similar locale
 
 ## v1.2.1 [2017-02-01]
 
diff --git a/plugins/inputs/sysstat/sysstat.go b/plugins/inputs/sysstat/sysstat.go
index 9c9ef6b05..27e181002 100644
--- a/plugins/inputs/sysstat/sysstat.go
+++ b/plugins/inputs/sysstat/sysstat.go
@@ -210,11 +210,37 @@ func (s *Sysstat) collect() error {
 	return nil
 }
 
+func filterEnviron(env []string, prefix string) []string {
+	newenv := env[:0]
+	for _, envvar := range env {
+		if !strings.HasPrefix(envvar, prefix) {
+			newenv = append(newenv, envvar)
+		}
+	}
+	return newenv
+}
+
+// Return the Cmd with its environment configured to use the C locale
+func withCLocale(cmd *exec.Cmd) *exec.Cmd {
+	var env []string
+	if cmd.Env != nil {
+		env = cmd.Env
+	} else {
+		env = os.Environ()
+	}
+	env = filterEnviron(env, "LANG")
+	env = filterEnviron(env, "LC_")
+	env = append(env, "LANG=C")
+	cmd.Env = env
+	return cmd
+}
+
 // parse runs Sadf on the previously saved tmpFile:
 //    Sadf -p -- -p